From 51b80be1f22bd2356b51265f63a7bdeb14788568 Mon Sep 17 00:00:00 2001 From: jason Date: Thu, 19 Feb 2026 11:57:54 +0000 Subject: [PATCH 01/13] feat: add kzg commitment types and shadow proof logging --- Cargo.lock | 2 + Cargo.toml | 4 +- .../src/chunk_ingress_service/chunks.rs | 71 +++++ .../chunk_ingress_service/ingress_proofs.rs | 1 + crates/types/Cargo.toml | 2 + crates/types/proptest-regressions/kzg.txt | 9 + crates/types/src/config/consensus.rs | 9 + crates/types/src/kzg.rs | 265 ++++++++++++++++++ crates/types/src/lib.rs | 1 + 9 files changed, 363 insertions(+), 1 deletion(-) create mode 100644 crates/types/proptest-regressions/kzg.txt create mode 100644 crates/types/src/kzg.rs diff --git a/Cargo.lock b/Cargo.lock index 65c33359a2..36fb6e264b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5476,11 +5476,13 @@ dependencies = [ "async-trait", "base58", "base64-url", + "blst", "borsh", "borsh-derive", "build-print", "bytemuck", "bytes", + "c-kzg", "chrono", "derive_more", "eyre", diff --git a/Cargo.toml b/Cargo.toml index 7b118e9fd3..5d9389993b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -151,6 +151,8 @@ chrono = { version = "0.4", features = ["serde"] } revm = { version = "34.0.0", default-features = false } revm-primitives = "22" +blst = "0.3.16" +c-kzg = { version = "2.1.5", features = ["ethereum_kzg_settings"] } alloy-dyn-abi = "1.5.6" alloy-evm = { version = "0.27.2", default-features = false } alloy-primitives = { version = "1.5.6", default-features = false, features = [ @@ -167,7 +169,7 @@ alloy-sol-types = { version = "1.5.6", default-features = false } alloy-consensus = { version = "1.7.3", default-features = false } alloy-contract = { version = "1.7.3", default-features = false } alloy-core = { version = "1.5.6", default-features = false } -alloy-eips = { version = "1.7.3", default-features = false } +alloy-eips = { version = "1.7.3", default-features = false, features = ["kzg"] } alloy-genesis = { version = "1.7.3", default-features = false } alloy-network = { version = "1.7.3", default-features = false } alloy-provider = { version = "1.7.3", default-features = false } diff --git a/crates/actors/src/chunk_ingress_service/chunks.rs b/crates/actors/src/chunk_ingress_service/chunks.rs index b1235127d2..0246118dd1 100644 --- a/crates/actors/src/chunk_ingress_service/chunks.rs +++ b/crates/actors/src/chunk_ingress_service/chunks.rs @@ -777,6 +777,7 @@ pub fn generate_ingress_proof( signer: IrysSigner, chain_id: ChainId, anchor: H256, + enable_shadow_kzg_logging: bool, ) -> eyre::Result { // load the chunks from the DB // TODO: for now we assume the chunks all all in the DB chunk cache @@ -853,9 +854,79 @@ pub fn generate_ingress_proof( db.update(|rw_tx| irys_database::store_ingress_proof_checked(rw_tx, &proof, &signer))??; + if enable_shadow_kzg_logging { + if let Err(e) = shadow_log_kzg_commitments(&db, data_root) { + warn!( + data_root = %data_root, + error = %e, + "[shadow-kzg] computation failed" + ); + } + } + Ok(proof) } +/// Compute KZG commitments in shadow mode: re-reads chunks from DB, computes +/// per-chunk KZG commitments, and logs results. Errors are informational only. +fn shadow_log_kzg_commitments(db: &DatabaseProvider, data_root: DataRoot) -> eyre::Result<()> { + use irys_types::kzg::{compute_chunk_commitment, default_kzg_settings}; + use std::time::Instant; + + let settings = default_kzg_settings(); + let start = Instant::now(); + + db.view_eyre(|tx| { + let mut dup_cursor = tx.cursor_dup_read::()?; + let dup_walker = dup_cursor.walk_dup(Some(data_root), None)?; + + for (i, entry) in dup_walker.into_iter().enumerate() { + let (_root_hash, index_entry) = entry?; + let chunk = tx + .get::(index_entry.meta.chunk_path_hash)? + .ok_or(eyre!("missing chunk for shadow KZG"))?; + let chunk_bin = chunk + .chunk + .ok_or(eyre!("missing chunk body for shadow KZG"))? + .0; + + let chunk_start = Instant::now(); + match compute_chunk_commitment(&chunk_bin, settings) { + Ok(commitment) => { + let hex: String = commitment + .as_ref() + .iter() + .map(|b| format!("{b:02x}")) + .collect(); + info!( + data_root = %data_root, + chunk_index = i, + commitment = %hex, + chunk_time_ms = chunk_start.elapsed().as_millis(), + "[shadow-kzg] computed chunk commitment" + ); + } + Err(e) => { + warn!( + data_root = %data_root, + chunk_index = i, + error = %e, + "[shadow-kzg] chunk commitment failed" + ); + } + } + } + + info!( + data_root = %data_root, + total_time_ms = start.elapsed().as_millis(), + "[shadow-kzg] completed all chunk commitments" + ); + + Ok(()) + }) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs index b2fb3fdbb2..35604906b8 100644 --- a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs +++ b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs @@ -383,6 +383,7 @@ pub fn generate_and_store_ingress_proof( signer, chain_id, anchor, + config.consensus.enable_shadow_kzg_logging, ); let proof = match proof_res { diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index c0c4707be0..70a1ebf7ff 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -47,6 +47,8 @@ reth-ethereum-primitives.workspace = true reth-primitives-traits.workspace = true reth-chainspec.workspace = true alloy-eips.workspace = true +blst.workspace = true +c-kzg.workspace = true alloy-genesis.workspace = true reth-db.workspace = true reth-db-api.workspace = true diff --git a/crates/types/proptest-regressions/kzg.txt b/crates/types/proptest-regressions/kzg.txt new file mode 100644 index 0000000000..777f509732 --- /dev/null +++ b/crates/types/proptest-regressions/kzg.txt @@ -0,0 +1,9 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc de642ea547c9b240d7fa5f2ffb00b7aa16acd1c4bbbb614e0b543f7c2be55dcf # shrinks to seed_a = 0, seed_b = 128 +cc f67367e695d6ce23719ff011b6ea0c128006178a0a7e8d2a29112d6c97a64de8 # shrinks to seed = 116 +cc b0a76d98e62da302af0d107e9d67a35aad82d6e07400acd291e28d2ce9397621 # shrinks to seed = 116 diff --git a/crates/types/src/config/consensus.rs b/crates/types/src/config/consensus.rs index 2d3f8e8837..c10bd24628 100644 --- a/crates/types/src/config/consensus.rs +++ b/crates/types/src/config/consensus.rs @@ -115,6 +115,12 @@ pub struct ConsensusConfig { #[serde(default = "default_disable_full_ingress_proof_validation")] pub enable_full_ingress_proof_validation: bool, + /// Enable shadow KZG commitment computation during ingress proof generation. + /// When enabled, KZG commitments are computed alongside V1 proofs and logged + /// for comparison, but do not affect consensus. + #[serde(default)] + pub enable_shadow_kzg_logging: bool, + /// Target number of years data should be preserved on the network /// Determines long-term storage pricing and incentives pub safe_minimum_number_of_years: u64, @@ -615,6 +621,7 @@ impl ConsensusConfig { entropy_packing_iterations: 1_000_000, // Toggles full ingress proof validation on or off enable_full_ingress_proof_validation: false, + enable_shadow_kzg_logging: false, // Fee required to stake a mining address in Irys tokens stake_value: Amount::token(dec!(400_000)).expect("valid token amount"), // Base fee required for pledging a partition in Irys tokens @@ -753,6 +760,7 @@ impl ConsensusConfig { .expect("valid percentage"), minimum_term_fee_usd: Amount::token(dec!(0.01)).expect("valid token amount"), // $0.01 USD minimum enable_full_ingress_proof_validation: false, + enable_shadow_kzg_logging: false, max_future_timestamp_drift_millis: 15_000, // Hardfork configuration - testnet uses 1 proof for easier testing hardforks: IrysHardforkConfig { @@ -803,6 +811,7 @@ impl ConsensusConfig { .expect("valid percentage"), minimum_term_fee_usd: Amount::token(dec!(0.01)).expect("valid token amount"), // $0.01 USD minimum enable_full_ingress_proof_validation: false, + enable_shadow_kzg_logging: false, max_future_timestamp_drift_millis: 15_000, genesis: GenesisConfig { diff --git a/crates/types/src/kzg.rs b/crates/types/src/kzg.rs new file mode 100644 index 0000000000..95c3710a12 --- /dev/null +++ b/crates/types/src/kzg.rs @@ -0,0 +1,265 @@ +use alloy_eips::eip4844::env_settings::EnvKzgSettings; +use c_kzg::{Blob, KzgCommitment, KzgSettings}; +use openssl::sha; + +pub const BLOB_SIZE: usize = 131_072; // 128KB = 4096 * 32 bytes +pub const CHUNK_SIZE_FOR_KZG: usize = 262_144; // 256KB = 2 * BLOB_SIZE +pub const COMMITMENT_SIZE: usize = 48; // Compressed G1 point + +/// Returns a reference to the default (Ethereum mainnet) trusted setup KZG settings. +/// Lazily initialized on first call, thread-safe. +pub fn default_kzg_settings() -> &'static KzgSettings { + EnvKzgSettings::Default.get() +} + +/// Compute a KZG commitment for a single 128KB blob (4096 field elements). +/// +/// `data` must be exactly [`BLOB_SIZE`] bytes. If the data is shorter, the caller +/// must zero-pad it before calling this function. +pub fn compute_blob_commitment( + data: &[u8; BLOB_SIZE], + settings: &KzgSettings, +) -> eyre::Result { + let blob = Blob::new(*data); + settings + .blob_to_kzg_commitment(&blob) + .map_err(|e| eyre::eyre!("KZG blob commitment failed: {e}")) +} + +/// Aggregate two G1 commitments: C = C1 + r·C2 where r = SHA256(C1 || C2) +/// interpreted as a BLS12-381 scalar. +/// +/// Uses the `blst` library (transitive dependency via `c-kzg`) for elliptic +/// curve point operations on BLS12-381 G1. +pub fn aggregate_commitments( + c1: &KzgCommitment, + c2: &KzgCommitment, +) -> eyre::Result { + use blst::min_pk::PublicKey; + use blst::{blst_p1, blst_p1_affine, blst_scalar}; + + // Compute random challenge: r = SHA256(C1 || C2) + let mut hasher = sha::Sha256::new(); + hasher.update(c1.as_ref()); + hasher.update(c2.as_ref()); + let r_bytes = hasher.finish(); + + // Convert r to blst scalar (big-endian input) + let mut r_scalar = blst_scalar::default(); + unsafe { + blst::blst_scalar_from_bendian(&mut r_scalar, r_bytes.as_ptr()); + } + + // Decompress C1 and C2 from their 48-byte compressed G1 representations + let p1 = PublicKey::from_bytes(c1.as_ref()) + .map_err(|e| eyre::eyre!("failed to decompress C1: {e:?}"))?; + let p2 = PublicKey::from_bytes(c2.as_ref()) + .map_err(|e| eyre::eyre!("failed to decompress C2: {e:?}"))?; + + // Get affine points via From trait + let p1_affine: &blst_p1_affine = (&p1).into(); + let p2_affine: &blst_p1_affine = (&p2).into(); + + // Convert C2 to projective, then compute r·C2 + let mut p2_proj = blst_p1::default(); + let mut r_c2 = blst_p1::default(); + unsafe { + blst::blst_p1_from_affine(&mut p2_proj, p2_affine); + blst::blst_p1_mult(&mut r_c2, &p2_proj, r_scalar.b.as_ptr(), 256); + } + + // Compute C1 + r·C2 (using affine + projective variant) + let mut result = blst_p1::default(); + unsafe { + let mut c1_proj = blst_p1::default(); + blst::blst_p1_from_affine(&mut c1_proj, p1_affine); + blst::blst_p1_add(&mut result, &c1_proj, &r_c2); + } + + // Compress back to 48-byte representation + let mut compressed = [0_u8; COMMITMENT_SIZE]; + unsafe { + blst::blst_p1_compress(compressed.as_mut_ptr(), &result); + } + + Ok(KzgCommitment::from(compressed)) +} + +/// Compute the aggregated KZG commitment for a 256KB native Irys chunk. +/// +/// Splits the chunk into two 128KB halves, commits each half as a separate +/// blob, then aggregates: C = C1 + r·C2 where r = SHA256(C1 || C2). +/// +/// If `chunk_data` is shorter than [`CHUNK_SIZE_FOR_KZG`], it is zero-padded. +/// If it is longer, returns an error. +pub fn compute_chunk_commitment( + chunk_data: &[u8], + settings: &KzgSettings, +) -> eyre::Result { + if chunk_data.len() > CHUNK_SIZE_FOR_KZG { + return Err(eyre::eyre!( + "chunk data too large: {} bytes (max {})", + chunk_data.len(), + CHUNK_SIZE_FOR_KZG + )); + } + + // Zero-pad to 256KB + let mut padded = [0_u8; CHUNK_SIZE_FOR_KZG]; + padded[..chunk_data.len()].copy_from_slice(chunk_data); + + // Split into two 128KB halves + let (first_half, second_half) = padded.split_at(BLOB_SIZE); + let first_half: &[u8; BLOB_SIZE] = first_half + .try_into() + .expect("split_at guarantees BLOB_SIZE"); + let second_half: &[u8; BLOB_SIZE] = second_half + .try_into() + .expect("split_at guarantees BLOB_SIZE"); + + let c1 = compute_blob_commitment(first_half, settings)?; + let c2 = compute_blob_commitment(second_half, settings)?; + + aggregate_commitments(&c1, &c2) +} + +#[cfg(test)] +mod tests { + use super::*; + use proptest::prelude::*; + + fn kzg_settings() -> &'static KzgSettings { + default_kzg_settings() + } + + /// Helper to compare KzgCommitment values by their byte representation, + /// since the c-kzg type doesn't implement PartialEq. + fn commitment_bytes(c: &KzgCommitment) -> &[u8] { + c.as_ref() + } + + #[test] + fn commitment_size_is_48_bytes() { + let data = [0_u8; BLOB_SIZE]; + let commitment = compute_blob_commitment(&data, kzg_settings()).unwrap(); + assert_eq!(commitment.as_ref().len(), COMMITMENT_SIZE); + } + + #[test] + fn same_data_produces_same_commitment() { + let data = [42_u8; BLOB_SIZE]; + let c1 = compute_blob_commitment(&data, kzg_settings()).unwrap(); + let c2 = compute_blob_commitment(&data, kzg_settings()).unwrap(); + assert_eq!(commitment_bytes(&c1), commitment_bytes(&c2)); + } + + #[test] + fn different_data_produces_different_commitment() { + let data_a = [1_u8; BLOB_SIZE]; + let data_b = [2_u8; BLOB_SIZE]; + let c1 = compute_blob_commitment(&data_a, kzg_settings()).unwrap(); + let c2 = compute_blob_commitment(&data_b, kzg_settings()).unwrap(); + assert_ne!(commitment_bytes(&c1), commitment_bytes(&c2)); + } + + #[test] + fn chunk_commitment_deterministic() { + let data = vec![7_u8; CHUNK_SIZE_FOR_KZG]; + let c1 = compute_chunk_commitment(&data, kzg_settings()).unwrap(); + let c2 = compute_chunk_commitment(&data, kzg_settings()).unwrap(); + assert_eq!(commitment_bytes(&c1), commitment_bytes(&c2)); + } + + #[test] + fn chunk_commitment_different_data() { + let data_a = vec![1_u8; CHUNK_SIZE_FOR_KZG]; + let data_b = vec![2_u8; CHUNK_SIZE_FOR_KZG]; + let c1 = compute_chunk_commitment(&data_a, kzg_settings()).unwrap(); + let c2 = compute_chunk_commitment(&data_b, kzg_settings()).unwrap(); + assert_ne!(commitment_bytes(&c1), commitment_bytes(&c2)); + } + + #[test] + fn aggregate_commitment_produces_valid_point() { + let data_a = [1_u8; BLOB_SIZE]; + let data_b = [2_u8; BLOB_SIZE]; + let c1 = compute_blob_commitment(&data_a, kzg_settings()).unwrap(); + let c2 = compute_blob_commitment(&data_b, kzg_settings()).unwrap(); + let agg = aggregate_commitments(&c1, &c2).unwrap(); + + assert_eq!(agg.as_ref().len(), COMMITMENT_SIZE); + blst::min_pk::PublicKey::from_bytes(agg.as_ref()) + .expect("aggregate commitment should be a valid G1 point"); + } + + #[test] + fn zero_padded_blob_matches_single_commitment() { + // A chunk that fits in a single blob (≤128KB) should still produce a + // valid aggregated commitment. The second half is all zeros. + let small_data = vec![99_u8; BLOB_SIZE]; + let commitment = compute_chunk_commitment(&small_data, kzg_settings()).unwrap(); + + assert_eq!(commitment.as_ref().len(), COMMITMENT_SIZE); + blst::min_pk::PublicKey::from_bytes(commitment.as_ref()) + .expect("commitment should be a valid G1 point"); + } + + #[test] + fn partial_chunk_zero_padded() { + let small_data = vec![42_u8; 1000]; + let commitment = compute_chunk_commitment(&small_data, kzg_settings()).unwrap(); + assert_eq!(commitment.as_ref().len(), COMMITMENT_SIZE); + } + + #[test] + fn empty_chunk_produces_valid_commitment() { + let commitment = compute_chunk_commitment(&[], kzg_settings()).unwrap(); + assert_eq!(commitment.as_ref().len(), COMMITMENT_SIZE); + } + + #[test] + fn oversized_chunk_rejected() { + let oversized = vec![0_u8; CHUNK_SIZE_FOR_KZG + 1]; + let result = compute_chunk_commitment(&oversized, kzg_settings()); + assert!(result.is_err()); + } + + // BLS12-381 field modulus starts with 0x73; filling a blob with any byte + // >= 0x74 (116) makes each 32-byte field element exceed the modulus, + // causing C_KZG_BADARGS. Seeds must stay in 0..114 for uniform-fill blobs. + const MAX_VALID_SEED: u8 = 114; + + // KZG commitment computation is expensive (~150ms per blob in debug mode). + // Limit proptest cases to keep test runtime reasonable. + proptest! { + #![proptest_config(ProptestConfig::with_cases(20))] + + #[test] + fn blob_commitment_roundtrip(seed in 0_u8..MAX_VALID_SEED) { + let data = [seed; BLOB_SIZE]; + let c1 = compute_blob_commitment(&data, kzg_settings()).unwrap(); + let c2 = compute_blob_commitment(&data, kzg_settings()).unwrap(); + prop_assert_eq!(commitment_bytes(&c1), commitment_bytes(&c2)); + } + + #[test] + fn chunk_commitment_roundtrip(seed in 0_u8..MAX_VALID_SEED) { + let data = vec![seed; CHUNK_SIZE_FOR_KZG]; + let c1 = compute_chunk_commitment(&data, kzg_settings()).unwrap(); + let c2 = compute_chunk_commitment(&data, kzg_settings()).unwrap(); + prop_assert_eq!(commitment_bytes(&c1), commitment_bytes(&c2)); + } + + #[test] + fn different_seeds_different_chunk_commitments( + seed_a in 0_u8..57, + seed_b in 57_u8..MAX_VALID_SEED, + ) { + let data_a = vec![seed_a; CHUNK_SIZE_FOR_KZG]; + let data_b = vec![seed_b; CHUNK_SIZE_FOR_KZG]; + let c1 = compute_chunk_commitment(&data_a, kzg_settings()).unwrap(); + let c2 = compute_chunk_commitment(&data_b, kzg_settings()).unwrap(); + prop_assert_ne!(commitment_bytes(&c1), commitment_bytes(&c2)); + } + } +} diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 266ace83bb..14a46a65d2 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -19,6 +19,7 @@ pub mod gossip; pub mod hardfork_config; pub mod ingress; pub mod irys; +pub mod kzg; pub mod ledger_expiry; mod merkle; pub mod partition; From caaeb3b680344b0b6ef7af389bd617d813b24c51 Mon Sep 17 00:00:00 2001 From: jason Date: Thu, 19 Feb 2026 13:56:31 +0000 Subject: [PATCH 02/13] feat(proof): add ingressproof v2 and composite commitments --- crates/actors/src/block_discovery.rs | 6 +- crates/actors/src/cache_service.rs | 28 +- .../src/chunk_ingress_service/chunks.rs | 3 +- .../chunk_ingress_service/ingress_proofs.rs | 182 +++++++------ crates/actors/src/mempool_service.rs | 15 +- crates/actors/src/mempool_service/facade.rs | 23 ++ crates/chain-tests/src/external/api.rs | 4 +- .../src/external/programmable_data_basic.rs | 4 +- crates/chain-tests/src/utils.rs | 2 +- crates/chain-tests/src/validation/mod.rs | 4 +- crates/database/src/database.rs | 12 +- crates/p2p/src/gossip_data_handler.rs | 6 +- crates/p2p/src/server.rs | 4 +- crates/types/src/gossip.rs | 6 +- crates/types/src/ingress.rs | 253 +++++++++++++++--- crates/types/src/irys.rs | 2 +- crates/types/src/kzg.rs | 170 ++++++++++++ .../transaction_signing_versioned_tests.rs | 2 +- 18 files changed, 556 insertions(+), 170 deletions(-) diff --git a/crates/actors/src/block_discovery.rs b/crates/actors/src/block_discovery.rs index 5525a44912..535a36ae43 100644 --- a/crates/actors/src/block_discovery.rs +++ b/crates/actors/src/block_discovery.rs @@ -603,13 +603,13 @@ impl BlockDiscoveryServiceInner { })?; // Validate the anchors for proof in tx_proofs.iter() { - if !valid_ingress_anchor_blocks.contains(&proof.anchor) { + if !valid_ingress_anchor_blocks.contains(&proof.anchor()) { info!( "valid ingress anchor blocks: {:?}, bt_finished_height {} min_ingress_proof_anchor_height {} anchor {}, ID {}", &valid_ingress_anchor_blocks, &bt_finished_height, &min_ingress_proof_anchor_height, - &proof.anchor, + &proof.anchor(), &proof.id() ); return Err(BlockDiscoveryError::InvalidAnchor { @@ -617,7 +617,7 @@ impl BlockDiscoveryServiceInner { promotion_target_id: tx_header.id, id: proof.id(), }, - anchor: proof.anchor, + anchor: proof.anchor(), }); } } diff --git a/crates/actors/src/cache_service.rs b/crates/actors/src/cache_service.rs index a1cc1483ce..099cd3a267 100644 --- a/crates/actors/src/cache_service.rs +++ b/crates/actors/src/cache_service.rs @@ -581,11 +581,11 @@ impl InnerCacheTask { } } else { debug!( - ingress_proof.data_root = ?proof.data_root, + ingress_proof.data_root = ?proof.data_root(), "Skipping reanchoring of ingress proof due to REGENERATE_PROOFS = false" ); if let Err(e) = - ChunkIngressServiceInner::remove_ingress_proof(&self.db, proof.data_root) + ChunkIngressServiceInner::remove_ingress_proof(&self.db, proof.data_root()) { warn!(ingress_proof.data_root = ?proof, "Failed to remove ingress proof: {e}"); } @@ -598,26 +598,26 @@ impl InnerCacheTask { &self.block_tree_guard, &self.db, &self.config, - proof.data_root, + proof.data_root(), None, &self.gossip_broadcast, &self.cache_sender, ) { if error.is_benign() { - debug!(ingress_proof.data_root = ?proof.data_root, "Skipped ingress proof regeneration: {error}"); + debug!(ingress_proof.data_root = ?proof.data_root(), "Skipped ingress proof regeneration: {error}"); } else { - warn!(ingress_proof.data_root = ?proof.data_root, "Failed to regenerate ingress proof: {error}"); + warn!(ingress_proof.data_root = ?proof.data_root(), "Failed to regenerate ingress proof: {error}"); } } } else { debug!( - ingress_proof.data_root = ?proof.data_root, + ingress_proof.data_root = ?proof.data_root(), "Regeneration disabled, removing ingress proof for data root" ); if let Err(e) = - ChunkIngressServiceInner::remove_ingress_proof(&self.db, proof.data_root) + ChunkIngressServiceInner::remove_ingress_proof(&self.db, proof.data_root()) { - warn!(ingress_proof.data_root = ?proof.data_root, "Failed to remove ingress proof: {e}"); + warn!(ingress_proof.data_root = ?proof.data_root(), "Failed to remove ingress proof: {e}"); } } } @@ -1126,8 +1126,10 @@ mod tests { // Insert a (non-expired) ingress proof entry for the data root so pruning treats it as active db.update(|wtx| { - let mut ingress_proof = IngressProof::default(); - ingress_proof.data_root = tx_header.data_root; + let ingress_proof = IngressProof::V1(irys_types::ingress::IngressProofV1 { + data_root: tx_header.data_root, + ..Default::default() + }); irys_database::store_external_ingress_proof_checked( wtx, &ingress_proof, @@ -1631,8 +1633,10 @@ mod tests { let local_addr = signer.address(); db.update(|wtx| { - let mut ingress_proof = IngressProof::default(); - ingress_proof.data_root = tx_header.data_root; + let ingress_proof = IngressProof::V1(irys_types::ingress::IngressProofV1 { + data_root: tx_header.data_root, + ..Default::default() + }); irys_database::store_external_ingress_proof_checked( wtx, &ingress_proof, diff --git a/crates/actors/src/chunk_ingress_service/chunks.rs b/crates/actors/src/chunk_ingress_service/chunks.rs index 0246118dd1..29e465bda9 100644 --- a/crates/actors/src/chunk_ingress_service/chunks.rs +++ b/crates/actors/src/chunk_ingress_service/chunks.rs @@ -847,7 +847,8 @@ pub fn generate_ingress_proof( info!( "generated ingress proof {} for data root {}", - &proof.proof, &data_root + &proof.proof_id(), + &data_root ); assert_eq!(actual_data_size, size); assert_eq!(actual_chunk_count, expected_chunk_count); diff --git a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs index 35604906b8..a0b4d4fd38 100644 --- a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs +++ b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs @@ -63,14 +63,15 @@ impl IngressProofGenerationError { } impl ChunkIngressServiceInner { - #[tracing::instrument(level = "trace", skip_all, fields(data_root = %ingress_proof.data_root))] + #[tracing::instrument(level = "trace", skip_all, fields(data_root = %ingress_proof.data_root()))] pub(crate) fn handle_ingest_ingress_proof( &self, ingress_proof: IngressProof, ) -> Result<(), IngressProofError> { // Validate the proofs signature and basic details + let data_root_val = ingress_proof.data_root(); let address = ingress_proof - .pre_validate(&ingress_proof.data_root) + .pre_validate(&data_root_val) .map_err(|_| IngressProofError::InvalidSignature)?; // Reject proofs from addresses not staked or pending stake (spam protection) @@ -98,7 +99,7 @@ impl ChunkIngressServiceInner { if let Err(e) = res { tracing::error!( - ingress_proof.data_root = ?ingress_proof.data_root, + ingress_proof.data_root = ?ingress_proof.data_root(), "Failed to store ingress proof data root: {:?}", e ); @@ -106,7 +107,7 @@ impl ChunkIngressServiceInner { } let gossip_sender = &self.service_senders.gossip_broadcast; - let data_root = ingress_proof.data_root; + let data_root = ingress_proof.data_root(); let gossip_broadcast_message = GossipBroadcastMessageV2::from(ingress_proof); if let Err(error) = gossip_sender.send_traced(gossip_broadcast_message) { @@ -147,23 +148,21 @@ impl ChunkIngressServiceInner { })?; // TODO: add an ingress proof invalid LRU, like we have for txs + let anchor = ingress_proof.anchor(); let anchor_height = match crate::mempool_service::Inner::get_anchor_height_static( block_tree_read_guard, irys_db, - ingress_proof.anchor, + anchor, false, /* does not need to be canonical */ ) .map_err(|db_err| IngressProofError::DatabaseError(db_err.to_string()))? { Some(height) => height, None => { - // Unknown anchor - return Err(IngressProofError::InvalidAnchor(ingress_proof.anchor)); + return Err(IngressProofError::InvalidAnchor(anchor)); } }; - // check consensus config - let min_anchor_height = latest_height .saturating_sub(config.consensus.mempool.ingress_proof_anchor_expiry_depth as u64); @@ -172,9 +171,9 @@ impl ChunkIngressServiceInner { if too_old { warn!( "Ingress proof anchor {} has height {}, which is too old (min: {})", - ingress_proof.anchor, anchor_height, min_anchor_height + anchor, anchor_height, min_anchor_height ); - Err(IngressProofError::InvalidAnchor(ingress_proof.anchor)) + Err(IngressProofError::InvalidAnchor(anchor)) } else { Ok(()) } @@ -196,6 +195,22 @@ impl ChunkIngressServiceInner { Ok(()) } + /// Validate the ingress proof anchor, and if invalid, remove the ingress proof from the database. + /// Returns `Ok(true)` if the proof is expired (anchor invalid), `Ok(false)` if it is still valid. + /// This function DOES NOT delete the proof; deletion is performed exclusively by the cache service. + #[instrument(skip_all, fields(proof.data_root = ?ingress_proof.data_root()))] + pub(crate) fn is_ingress_proof_expired( + &self, + ingress_proof: &IngressProof, + ) -> ProofCheckResult { + Self::is_ingress_proof_expired_static( + &self.block_tree_read_guard, + &self.irys_db, + &self.config, + ingress_proof, + ) + } + pub(crate) fn is_ingress_proof_expired_static( block_tree_read_guard: &BlockTreeReadGuard, irys_db: &DatabaseProvider, @@ -211,7 +226,7 @@ impl ChunkIngressServiceInner { // Fully valid Ok(()) => { debug!( - ingress_proof.data_root = ?ingress_proof.data_root, + data_root = ?ingress_proof.data_root(), "Ingress proof anchor is valid" ); ProofCheckResult { @@ -219,67 +234,61 @@ impl ChunkIngressServiceInner { regeneration_action: RegenAction::DoNotRegenerate, } } - Err(e) => { - match e { - IngressProofError::InvalidAnchor(_block_hash) => { - warn!( - ingress_proof.data_root = ?ingress_proof.data_root, - ingress_proof.anchor = ?ingress_proof.anchor, - "Ingress proof anchor has an invalid anchor", - ); - // Prune, regenerate if not at capacity - ProofCheckResult { - expired_or_invalid: true, - regeneration_action: RegenAction::Reanchor, - } + Err(e) => match e { + IngressProofError::InvalidAnchor(_block_hash) => { + warn!( + data_root = ?ingress_proof.data_root(), + anchor = ?ingress_proof.anchor(), + "Ingress proof anchor has an invalid anchor", + ); + ProofCheckResult { + expired_or_invalid: true, + regeneration_action: RegenAction::Reanchor, } - IngressProofError::InvalidSignature => { - warn!( - ingress_proof.data_root = ?ingress_proof.data_root, - ingress_proof.anchor = ?ingress_proof.anchor, - "Ingress proof anchor has an invalid signature and is going to be pruned", - ); - // Fully regenerate - ProofCheckResult { - expired_or_invalid: true, - regeneration_action: RegenAction::Regenerate, - } + } + IngressProofError::InvalidSignature => { + warn!( + data_root = ?ingress_proof.data_root(), + anchor = ?ingress_proof.anchor(), + "Ingress proof anchor has an invalid signature and is going to be pruned", + ); + ProofCheckResult { + expired_or_invalid: true, + regeneration_action: RegenAction::Regenerate, } - IngressProofError::UnstakedAddress => { - warn!( - ingress_proof.data_root = ?ingress_proof.data_root, - ingress_proof.anchor = ?ingress_proof.anchor, - "Ingress proof has been created by an unstaked address and is going to be pruned", - ); - // Should not happen; prune, our own address should not be unstaked unexpectedly - ProofCheckResult { - expired_or_invalid: true, - regeneration_action: RegenAction::DoNotRegenerate, - } + } + IngressProofError::UnstakedAddress => { + warn!( + data_root = ?ingress_proof.data_root(), + anchor = ?ingress_proof.anchor(), + "Ingress proof has been created by an unstaked address and is going to be pruned", + ); + ProofCheckResult { + expired_or_invalid: true, + regeneration_action: RegenAction::DoNotRegenerate, } - IngressProofError::DatabaseError(message) => { - // Don't do anything, we don't know the proof status - error!( - ingress_proof.data_root = ?ingress_proof.data_root, - "Database error during ingress proof expiration validation: {}", message - ); - ProofCheckResult { - expired_or_invalid: false, - regeneration_action: RegenAction::DoNotRegenerate, - } + } + IngressProofError::DatabaseError(message) => { + error!( + data_root = ?ingress_proof.data_root(), + "Database error during ingress proof expiration validation: {}", message + ); + ProofCheckResult { + expired_or_invalid: false, + regeneration_action: RegenAction::DoNotRegenerate, } - IngressProofError::Other(reason_message) => { - error!( - ingress_proof.data_root = ?ingress_proof.data_root, - "Unexpected error during ingress proof expiration validation: {}", reason_message - ); - ProofCheckResult { - expired_or_invalid: false, - regeneration_action: RegenAction::DoNotRegenerate, - } + } + IngressProofError::Other(reason_message) => { + error!( + data_root = ?ingress_proof.data_root(), + "Unexpected error during ingress proof expiration validation: {}", reason_message + ); + ProofCheckResult { + expired_or_invalid: false, + regeneration_action: RegenAction::DoNotRegenerate, } } - } + }, } } } @@ -433,7 +442,7 @@ pub fn reanchor_and_store_ingress_proof( let (response_sender, response_receiver) = std::sync::mpsc::channel(); if let Err(err) = cache_sender.send_traced(CacheServiceAction::RequestIngressProofGenerationState { - data_root: proof.data_root, + data_root: proof.data_root(), response_sender, }) { @@ -449,23 +458,23 @@ pub fn reanchor_and_store_ingress_proof( })? }; + let data_root = proof.data_root(); + if is_already_generating { return Err(IngressProofGenerationError::AlreadyGenerating); } - if let Err(e) = cache_sender.send_traced(CacheServiceAction::NotifyProofGenerationStarted( - proof.data_root, - )) { - warn!(data_root = ?proof.data_root, "Failed to notify cache of proof generation start: {e}"); - } - if let Err(e) = - calculate_and_validate_data_size(db, proof.data_root, config.consensus.chunk_size) + cache_sender.send_traced(CacheServiceAction::NotifyProofGenerationStarted(data_root)) { + warn!(data_root = ?data_root, "Failed to notify cache of proof generation start: {e}"); + } + + if let Err(e) = calculate_and_validate_data_size(db, data_root, config.consensus.chunk_size) { if let Err(e) = cache_sender.send_traced( - CacheServiceAction::NotifyProofGenerationCompleted(proof.data_root), + CacheServiceAction::NotifyProofGenerationCompleted(data_root), ) { - warn!(data_root = ?proof.data_root, "Failed to notify cache of proof generation completion: {e}"); + warn!(data_root = ?data_root, "Failed to notify cache of proof generation completion: {e}"); } return Err(e); } @@ -476,22 +485,21 @@ pub fn reanchor_and_store_ingress_proof( .block_hash(); let mut proof = proof.clone(); - // Re-anchor and re-sign - proof.anchor = latest_anchor; + proof.set_anchor(latest_anchor); if let Err(e) = signer.sign_ingress_proof(&mut proof) { if let Err(e) = cache_sender.send_traced( - CacheServiceAction::NotifyProofGenerationCompleted(proof.data_root), + CacheServiceAction::NotifyProofGenerationCompleted(data_root), ) { - warn!(data_root = ?proof.data_root, "Failed to notify cache of proof generation completion: {e}"); + warn!(data_root = ?data_root, "Failed to notify cache of proof generation completion: {e}"); } return Err(IngressProofGenerationError::GenerationFailed(e.to_string())); } if let Err(e) = store_ingress_proof(db, &proof, signer) { if let Err(e) = cache_sender.send_traced( - CacheServiceAction::NotifyProofGenerationCompleted(proof.data_root), + CacheServiceAction::NotifyProofGenerationCompleted(data_root), ) { - warn!(data_root = ?proof.data_root, "Failed to notify cache of proof generation completion: {e}"); + warn!(data_root = ?data_root, "Failed to notify cache of proof generation completion: {e}"); } return Err(IngressProofGenerationError::GenerationFailed(e.to_string())); } @@ -499,9 +507,9 @@ pub fn reanchor_and_store_ingress_proof( gossip_ingress_proof(gossip_sender, &proof, block_tree_guard, db, config); if let Err(e) = cache_sender.send_traced(CacheServiceAction::NotifyProofGenerationCompleted( - proof.data_root, + data_root, )) { - warn!(data_root = ?proof.data_root, "Failed to notify cache of proof generation completion: {e}"); + warn!(data_root = ?data_root, "Failed to notify cache of proof generation completion: {e}"); } Ok(proof) } @@ -523,12 +531,12 @@ pub fn gossip_ingress_proof( Ok(()) => { let msg = GossipBroadcastMessageV2::from(ingress_proof.clone()); if let Err(e) = gossip_sender.send_traced(msg) { - tracing::error!(proof.data_root = ?ingress_proof.data_root, "Failed to gossip regenerated ingress proof: {e}"); + tracing::error!(proof.data_root = ?ingress_proof.data_root(), "Failed to gossip regenerated ingress proof: {e}"); } } Err(e) => { // Skip gossip; proof stored for potential later use/regeneration. - tracing::debug!(proof.data_root = ?ingress_proof.data_root, "Generated ingress proof anchor invalid (not gossiped): {e}"); + tracing::debug!(proof.data_root = ?ingress_proof.data_root(), "Generated ingress proof anchor invalid (not gossiped): {e}"); } } } diff --git a/crates/actors/src/mempool_service.rs b/crates/actors/src/mempool_service.rs index 4a161ca4db..a4cf9458d5 100644 --- a/crates/actors/src/mempool_service.rs +++ b/crates/actors/src/mempool_service.rs @@ -493,7 +493,7 @@ impl Inner { min_anchor_height: u64, ingress_proof: &IngressProof, ) -> eyre::Result { - let anchor = ingress_proof.anchor; + let anchor = ingress_proof.anchor(); let anchor_height = match self.get_anchor_height(anchor, true).map_err(|e| { TxIngressError::DatabaseError(format!( "Error getting anchor height for {}: {}", @@ -522,7 +522,8 @@ impl Inner { // TODO: recover the signer's address here? (or compute an ID) warn!( "ingress proof data_root {} signature {:?} anchor {anchor} has height {anchor_height}, which is too old compared to min height {min_anchor_height}", - &ingress_proof.data_root, &ingress_proof.signature + ingress_proof.data_root(), + ingress_proof.signature() ); Ok(false) } @@ -1376,17 +1377,13 @@ impl Inner { // Separate assigned and unassigned proofs let assigned_proof_set: HashSet<_> = assigned_proofs .iter() - .map(|p| &p.proof.0) // Use signature as unique identifier + .map(|p| p.proof_id().0) // Use proof ID as unique identifier .collect(); let unassigned_proofs: Vec = all_tx_proofs .iter() - .filter(|c| !assigned_proof_set.contains(&c.proof.proof.0)) - .filter(|c| { - // Filter out proofs from unstaked signers - epoch_snapshot.is_staked(c.address) - }) - .map(|c| c.proof.clone()) + .filter(|p| !assigned_proof_set.contains(&p.proof_id().0)) + .cloned() .collect(); // Build the final proof list diff --git a/crates/actors/src/mempool_service/facade.rs b/crates/actors/src/mempool_service/facade.rs index 1fad69e37f..4a31a43069 100644 --- a/crates/actors/src/mempool_service/facade.rs +++ b/crates/actors/src/mempool_service/facade.rs @@ -183,6 +183,29 @@ impl MempoolFacade for MempoolServiceFacadeImpl { oneshot_rx.await.expect("to process TxExistenceQuery") } + async fn handle_ingest_ingress_proof( + &self, + ingress_proof: IngressProof, + ) -> Result<(), IngressProofError> { + let (oneshot_tx, oneshot_rx) = tokio::sync::oneshot::channel(); + let data_root = ingress_proof.data_root(); + self.service + .send_traced(MempoolServiceMessage::IngestIngressProof( + ingress_proof, + oneshot_tx, + )) + .map_err(|_| { + IngressProofError::Other(format!( + "Error sending IngestIngressProof message for data_root {:?}", + data_root + )) + })?; + + oneshot_rx + .await + .expect("to process IngestIngressProof message") + } + async fn get_block_header( &self, block_hash: H256, diff --git a/crates/chain-tests/src/external/api.rs b/crates/chain-tests/src/external/api.rs index 936ab98f23..1cafe5cf40 100644 --- a/crates/chain-tests/src/external/api.rs +++ b/crates/chain-tests/src/external/api.rs @@ -112,9 +112,9 @@ async fn external_api() -> eyre::Result<()> { info!( "got ingress proof for data root {}", - &ingress_proof.proof.data_root + &ingress_proof.proof.data_root() ); - assert_eq!(&ingress_proof.proof.data_root, &recv_tx.data_root); + assert_eq!(&ingress_proof.proof.data_root(), &recv_tx.data_root); let id: String = tx_id.to_string(); diff --git a/crates/chain-tests/src/external/programmable_data_basic.rs b/crates/chain-tests/src/external/programmable_data_basic.rs index d858f7a01b..f572c6e71b 100644 --- a/crates/chain-tests/src/external/programmable_data_basic.rs +++ b/crates/chain-tests/src/external/programmable_data_basic.rs @@ -168,9 +168,9 @@ async fn test_programmable_data_basic_external() -> eyre::Result<()> { info!( "got ingress proof for data root {}", - &ingress_proof.proof.data_root + &ingress_proof.proof.data_root() ); - assert_eq!(&ingress_proof.proof.data_root, &recv_tx.data_root); + assert_eq!(&ingress_proof.proof.data_root(), &recv_tx.data_root); let id: String = tx_id.to_string(); diff --git a/crates/chain-tests/src/utils.rs b/crates/chain-tests/src/utils.rs index e0b719d45d..83f2b78b55 100644 --- a/crates/chain-tests/src/utils.rs +++ b/crates/chain-tests/src/utils.rs @@ -1449,7 +1449,7 @@ impl IrysNodeTest { if let Some(tx_proofs) = ingress_proofs_by_root.get(&tx_header.data_root) { if tx_proofs.len() >= num_proofs { for ingress_proof in tx_proofs.iter() { - assert_eq!(ingress_proof.proof.data_root, tx_header.data_root); + assert_eq!(ingress_proof.proof.data_root(), tx_header.data_root); tracing::info!( "proof {} signer: {}", ingress_proof.proof.id(), diff --git a/crates/chain-tests/src/validation/mod.rs b/crates/chain-tests/src/validation/mod.rs index 620b541a17..0234cb6191 100644 --- a/crates/chain-tests/src/validation/mod.rs +++ b/crates/chain-tests/src/validation/mod.rs @@ -943,8 +943,8 @@ async fn heavy_block_duplicate_ingress_proof_signers_gets_rejected() -> eyre::Re )?; // Verify both proofs have the same data_root and can recover the same signer - assert_eq!(proof1.data_root, data_root); - assert_eq!(proof2.data_root, data_root); + assert_eq!(proof1.data_root(), data_root); + assert_eq!(proof2.data_root(), data_root); assert_eq!(proof1.recover_signer()?, test_signer.address()); assert_eq!(proof2.recover_signer()?, test_signer.address()); diff --git a/crates/database/src/database.rs b/crates/database/src/database.rs index a1c9dfd33e..a48efb8027 100644 --- a/crates/database/src/database.rs +++ b/crates/database/src/database.rs @@ -482,12 +482,12 @@ pub fn store_ingress_proof_checked( signer: &IrysSigner, ) -> eyre::Result<()> { if tx - .get::(ingress_proof.data_root)? + .get::(ingress_proof.data_root())? .is_none() { return Err(eyre::eyre!( "Data root {} not found in CachedDataRoots", - ingress_proof.data_root + ingress_proof.data_root() )); } @@ -502,7 +502,7 @@ pub fn store_ingress_proof_checked( } tx.put::( - ingress_proof.data_root, + ingress_proof.data_root(), CompactCachedIngressProof(CachedIngressProof { address, proof: ingress_proof.clone(), @@ -517,12 +517,12 @@ pub fn store_external_ingress_proof_checked( address: IrysAddress, ) -> eyre::Result<()> { if tx - .get::(ingress_proof.data_root)? + .get::(ingress_proof.data_root())? .is_none() { return Err(eyre::eyre!( "Data root {} not found in CachedDataRoots", - ingress_proof.data_root + ingress_proof.data_root() )); } @@ -535,7 +535,7 @@ pub fn store_external_ingress_proof_checked( } tx.put::( - ingress_proof.data_root, + ingress_proof.data_root(), CompactCachedIngressProof(CachedIngressProof { address, proof: ingress_proof.clone(), diff --git a/crates/p2p/src/gossip_data_handler.rs b/crates/p2p/src/gossip_data_handler.rs index 4034c3c458..6227c7fb9c 100644 --- a/crates/p2p/src/gossip_data_handler.rs +++ b/crates/p2p/src/gossip_data_handler.rs @@ -250,11 +250,13 @@ where let source_miner_address = proof_request.miner_address; debug!( "Node {}: Gossip ingress_proof received from peer {}: {:?}", - self.gossip_client.mining_address, source_miner_address, proof_request.data.proof + self.gossip_client.mining_address, + source_miner_address, + proof_request.data.proof_id() ); let proof = proof_request.data; - let proof_hash = proof.proof; + let proof_hash = proof.proof_id(); let already_seen = self.cache.seen_ingress_proof_from_any_peer(&proof_hash)?; diff --git a/crates/p2p/src/server.rs b/crates/p2p/src/server.rs index b1f1f0c551..657408fa80 100644 --- a/crates/p2p/src/server.rs +++ b/crates/p2p/src/server.rs @@ -519,7 +519,7 @@ where ) -> HttpResponse { if !server.data_handler.sync_state.is_gossip_reception_enabled() { let node_id = server.data_handler.gossip_client.mining_address; - let data_root = proof_json.0.data.data_root; + let data_root = proof_json.0.data.data_root(); warn!( "Node {}: Gossip reception is disabled, ignoring the ingress proof for data_root: {:?}", node_id, data_root @@ -880,7 +880,7 @@ where ) -> HttpResponse { if !server.data_handler.sync_state.is_gossip_reception_enabled() { let node_id = server.data_handler.gossip_client.mining_address; - let data_root = proof_json.0.data.data_root; + let data_root = proof_json.0.data.data_root(); warn!( "Node {}: Gossip reception is disabled, ignoring the ingress proof for data_root: {:?}", node_id, data_root diff --git a/crates/types/src/gossip.rs b/crates/types/src/gossip.rs index ade713128a..da9bd00be4 100644 --- a/crates/types/src/gossip.rs +++ b/crates/types/src/gossip.rs @@ -124,7 +124,7 @@ pub mod v1 { Self::IngressProof(ingress_proof) => { format!( "ingress proof for data_root: {:?} from {:?}", - ingress_proof.data_root, + ingress_proof.data_root(), ingress_proof.recover_signer() ) } @@ -321,7 +321,7 @@ pub mod v2 { Self::IngressProof(ingress_proof) => { format!( "ingress proof for data_root: {:?} from {:?}", - ingress_proof.data_root, + ingress_proof.data_root(), ingress_proof.recover_signer() ) } @@ -423,7 +423,7 @@ impl GossipCacheKey { } pub fn ingress_proof(ingress_proof: &IngressProof) -> Self { - Self::IngressProof(ingress_proof.proof) + Self::IngressProof(ingress_proof.proof_id()) } } diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index ad6a14fdea..c96d1c08b2 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -1,4 +1,5 @@ use crate::irys::IrysSigner; +use crate::kzg::KzgCommitmentBytes; use crate::{ decode_rlp_version, encode_rlp_version, generate_data_root, generate_ingress_leaves, DataRoot, IrysAddress, IrysSignature, Node, Signable, VersionDiscriminant, Versioned, H256, @@ -14,7 +15,6 @@ use reth_db::DatabaseError; use reth_db_api::table::{Compress, Decompress}; use reth_primitives_traits::crypto::secp256k1::recover_signer; use serde::{Deserialize, Serialize}; -use std::ops::{Deref, DerefMut}; #[derive(Debug, Clone, PartialEq, IntegerTagged, Eq, Compact, Arbitrary)] #[repr(u8)] @@ -22,6 +22,8 @@ use std::ops::{Deref, DerefMut}; pub enum IngressProof { #[integer_tagged(version = 1)] V1(IngressProofV1) = 1, + #[integer_tagged(version = 2)] + V2(IngressProofV2) = 2, } impl Default for IngressProof { @@ -30,31 +32,12 @@ impl Default for IngressProof { } } -impl Deref for IngressProof { - type Target = IngressProofV1; - - fn deref(&self) -> &Self::Target { - match self { - Self::V1(inner) => inner, - } - } -} - -impl DerefMut for IngressProof { - fn deref_mut(&mut self) -> &mut Self::Target { - match self { - Self::V1(inner) => inner, - } - } -} - impl alloy_rlp::Encodable for IngressProof { fn encode(&self, out: &mut dyn BufMut) { let mut buf = Vec::new(); match self { - Self::V1(inner) => { - inner.encode(&mut buf); - } + Self::V1(inner) => inner.encode(&mut buf), + Self::V2(inner) => inner.encode(&mut buf), } encode_rlp_version(buf, self.version(), out); } @@ -70,6 +53,10 @@ impl alloy_rlp::Decodable for IngressProof { let inner = IngressProofV1::decode(inner_buf)?; Ok(Self::V1(inner)) } + 2 => { + let inner = IngressProofV2::decode(inner_buf)?; + Ok(Self::V2(inner)) + } _ => Err(alloy_rlp::Error::Custom("Unknown version")), } } @@ -85,24 +72,73 @@ impl VersionDiscriminant for IngressProof { fn version(&self) -> u8 { match self { Self::V1(_) => 1, + Self::V2(_) => 2, } } } impl IngressProof { + pub fn data_root(&self) -> H256 { + match self { + Self::V1(v1) => v1.data_root, + Self::V2(v2) => v2.data_root, + } + } + + pub fn chain_id(&self) -> ChainId { + match self { + Self::V1(v1) => v1.chain_id, + Self::V2(v2) => v2.chain_id, + } + } + + pub fn anchor(&self) -> H256 { + match self { + Self::V1(v1) => v1.anchor, + Self::V2(v2) => v2.anchor, + } + } + + pub fn signature(&self) -> &IrysSignature { + match self { + Self::V1(v1) => &v1.signature, + Self::V2(v2) => &v2.signature, + } + } + + pub fn signature_mut(&mut self) -> &mut IrysSignature { + match self { + Self::V1(v1) => &mut v1.signature, + Self::V2(v2) => &mut v2.signature, + } + } + + pub fn set_anchor(&mut self, anchor: H256) { + match self { + Self::V1(v1) => v1.anchor = anchor, + Self::V2(v2) => v2.anchor = anchor, + } + } + + /// Returns the V1 merkle proof hash, or V2 composite commitment. + /// Used as a unique proof identifier (e.g. for gossip deduplication). + pub fn proof_id(&self) -> H256 { + match self { + Self::V1(v1) => v1.proof, + Self::V2(v2) => v2.composite_commitment, + } + } + pub fn recover_signer(&self) -> eyre::Result { let prehash = self.signature_hash(); - self.signature.recover_signer(prehash) + self.signature().recover_signer(prehash) } /// Validates that the proof matches the provided data_root and recovers the signer address - /// This method ensures the proof is for the correct data_root before validating the signature pub fn pre_validate(&self, data_root: &H256) -> eyre::Result { - // Validate that the data_root matches - if self.data_root != *data_root { + if self.data_root() != *data_root { return Err(eyre::eyre!("Ingress proof data_root mismatch")); } - // Recover and return the signer address self.recover_signer() } @@ -145,6 +181,152 @@ impl Versioned for IngressProofV1 { const VERSION: u8 = 1; } +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash)] +#[repr(u8)] +pub enum DataSourceType { + #[default] + NativeData = 0, + EvmBlob = 1, +} + +impl DataSourceType { + pub fn from_u8(val: u8) -> Self { + match val { + 0 => Self::NativeData, + 1 => Self::EvmBlob, + _ => Self::NativeData, + } + } +} + +impl<'a> Arbitrary<'a> for DataSourceType { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + Ok(Self::from_u8(u.int_in_range(0..=1)?)) + } +} + +impl Compact for DataSourceType { + fn to_compact>(&self, buf: &mut B) -> usize { + buf.put_u8(*self as u8); + 1 + } + + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + (Self::from_u8(buf[0]), &buf[1..]) + } +} + +#[derive(Debug, Default, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct IngressProofV2 { + pub signature: IrysSignature, + pub data_root: H256, + pub kzg_commitment: KzgCommitmentBytes, + pub composite_commitment: H256, + pub chain_id: ChainId, + pub anchor: H256, + pub source_type: DataSourceType, +} + +impl Compact for IngressProofV2 { + fn to_compact>(&self, buf: &mut B) -> usize { + let mut flags = 0_usize; + // signature has no flag — always present, written first + flags += self.signature.to_compact(buf); + flags += self.data_root.to_compact(buf); + flags += self.kzg_commitment.to_compact(buf); + flags += self.composite_commitment.to_compact(buf); + flags += self.chain_id.to_compact(buf); + flags += self.anchor.to_compact(buf); + flags += self.source_type.to_compact(buf); + flags + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (signature, buf) = IrysSignature::from_compact(buf, len); + let (data_root, buf) = H256::from_compact(buf, buf.len()); + let (kzg_commitment, buf) = KzgCommitmentBytes::from_compact(buf, buf.len()); + let (composite_commitment, buf) = H256::from_compact(buf, buf.len()); + let (chain_id, buf) = ChainId::from_compact(buf, buf.len()); + let (anchor, buf) = H256::from_compact(buf, buf.len()); + let (source_type, buf) = DataSourceType::from_compact(buf, buf.len()); + ( + Self { + signature, + data_root, + kzg_commitment, + composite_commitment, + chain_id, + anchor, + source_type, + }, + buf, + ) + } +} + +impl Arbitrary<'_> for IngressProofV2 { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(Self { + signature: u.arbitrary()?, + data_root: u.arbitrary()?, + kzg_commitment: u.arbitrary()?, + composite_commitment: u.arbitrary()?, + chain_id: u.arbitrary()?, + anchor: u.arbitrary()?, + source_type: u.arbitrary()?, + }) + } +} + +impl Versioned for IngressProofV2 { + const VERSION: u8 = 2; +} + +impl alloy_rlp::Encodable for IngressProofV2 { + fn encode(&self, out: &mut dyn BufMut) { + let header = alloy_rlp::Header { + list: true, + payload_length: self.data_root.length() + + self.kzg_commitment.length() + + self.composite_commitment.length() + + self.chain_id.length() + + self.anchor.length() + + (self.source_type as u8).length(), + }; + header.encode(out); + self.data_root.encode(out); + self.kzg_commitment.encode(out); + self.composite_commitment.encode(out); + self.chain_id.encode(out); + self.anchor.encode(out); + (self.source_type as u8).encode(out); + } +} + +impl alloy_rlp::Decodable for IngressProofV2 { + fn decode(buf: &mut &[u8]) -> Result { + let header = alloy_rlp::Header::decode(buf)?; + if !header.list { + return Err(alloy_rlp::Error::UnexpectedString); + } + let data_root = alloy_rlp::Decodable::decode(buf)?; + let kzg_commitment = alloy_rlp::Decodable::decode(buf)?; + let composite_commitment = alloy_rlp::Decodable::decode(buf)?; + let chain_id = alloy_rlp::Decodable::decode(buf)?; + let anchor = alloy_rlp::Decodable::decode(buf)?; + let source_type_u8: u8 = alloy_rlp::Decodable::decode(buf)?; + Ok(Self { + signature: Default::default(), + data_root, + kzg_commitment, + composite_commitment, + chain_id, + anchor, + source_type: DataSourceType::from_u8(source_type_u8), + }) + } +} + impl Compress for IngressProofV1 { type Compressed = Vec; fn compress_to_buf>(&self, buf: &mut B) { @@ -199,16 +381,15 @@ pub fn verify_ingress_proof>( chunks: impl IntoIterator, chain_id: ChainId, ) -> eyre::Result { - if chain_id != proof.chain_id { - return Ok(false); // Chain ID mismatch + if chain_id != proof.chain_id() { + return Ok(false); } - let sig = proof.signature.as_bytes(); + let sig = proof.signature().as_bytes(); let prehash = proof.signature_hash(); let recovered_address = recover_signer(&sig[..].try_into()?, prehash.into())?; - // re-compute the ingress proof & regular trees & roots let (proof_root, regular_root) = generate_ingress_proof_tree(chunks.into_iter().map(Ok), recovered_address.into(), true)?; @@ -218,18 +399,15 @@ pub fn verify_ingress_proof>( .id, ); - // re-compute the prehash (combining data_root, proof, and chain_id) - let new_prehash = IngressProof::V1(IngressProofV1 { signature: Default::default(), data_root, proof: H256(proof_root.id), chain_id, - anchor: proof.anchor, + anchor: proof.anchor(), }) .signature_hash(); - // make sure they match Ok(new_prehash == prehash) } @@ -403,7 +581,10 @@ mod tests { // Create a modified proof where we try to use testnet proof with mainnet chain_id let mut replay_attack_proof = testnet_proof; - replay_attack_proof.chain_id = mainnet_chain_id; + match &mut replay_attack_proof { + IngressProof::V1(v1) => v1.chain_id = mainnet_chain_id, + IngressProof::V2(v2) => v2.chain_id = mainnet_chain_id, + } // This should fail verification because the signature was created with testnet chain_id // but we're trying to verify it with mainnet chain_id diff --git a/crates/types/src/irys.rs b/crates/types/src/irys.rs index f7ed0b6703..7381a7028b 100644 --- a/crates/types/src/irys.rs +++ b/crates/types/src/irys.rs @@ -188,7 +188,7 @@ impl IrysSigner { pub fn sign_ingress_proof(&self, proof: &mut IngressProof) -> Result<()> { let prehash = proof.signature_hash(); let signature: Signature = self.signer.sign_prehash_recoverable(&prehash)?.into(); - proof.signature = IrysSignature::new(signature); + *proof.signature_mut() = IrysSignature::new(signature); Ok(()) } diff --git a/crates/types/src/kzg.rs b/crates/types/src/kzg.rs index 95c3710a12..985f035514 100644 --- a/crates/types/src/kzg.rs +++ b/crates/types/src/kzg.rs @@ -1,10 +1,135 @@ +use crate::{IrysAddress, H256}; use alloy_eips::eip4844::env_settings::EnvKzgSettings; +use bytes::BufMut; use c_kzg::{Blob, KzgCommitment, KzgSettings}; use openssl::sha; +use reth_codecs::Compact; +use serde::{Deserialize, Serialize}; pub const BLOB_SIZE: usize = 131_072; // 128KB = 4096 * 32 bytes pub const CHUNK_SIZE_FOR_KZG: usize = 262_144; // 256KB = 2 * BLOB_SIZE pub const COMMITMENT_SIZE: usize = 48; // Compressed G1 point +pub const DOMAIN_SEPARATOR: &[u8] = b"IRYS_KZG_INGRESS_V1"; + +/// A 48-byte KZG commitment (compressed BLS12-381 G1 point). +/// +/// Newtype wrapper around `[u8; 48]` providing the trait implementations +/// that raw arrays lack (serde for N>32, Default for N>32, Compact). +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct KzgCommitmentBytes(pub [u8; COMMITMENT_SIZE]); + +impl Default for KzgCommitmentBytes { + fn default() -> Self { + Self([0_u8; COMMITMENT_SIZE]) + } +} + +impl std::fmt::Debug for KzgCommitmentBytes { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "0x")?; + for byte in &self.0 { + write!(f, "{byte:02x}")?; + } + Ok(()) + } +} + +impl std::ops::Deref for KzgCommitmentBytes { + type Target = [u8; COMMITMENT_SIZE]; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef<[u8; COMMITMENT_SIZE]> for KzgCommitmentBytes { + fn as_ref(&self) -> &[u8; COMMITMENT_SIZE] { + &self.0 + } +} + +impl From<[u8; COMMITMENT_SIZE]> for KzgCommitmentBytes { + fn from(bytes: [u8; COMMITMENT_SIZE]) -> Self { + Self(bytes) + } +} + +impl From for [u8; COMMITMENT_SIZE] { + fn from(val: KzgCommitmentBytes) -> Self { + val.0 + } +} + +impl Serialize for KzgCommitmentBytes { + fn serialize(&self, serializer: S) -> Result { + if serializer.is_human_readable() { + let hex = alloy_primitives::hex::encode(self.0); + serializer.serialize_str(&format!("0x{hex}")) + } else { + serializer.serialize_bytes(&self.0) + } + } +} + +impl<'de> Deserialize<'de> for KzgCommitmentBytes { + fn deserialize>(deserializer: D) -> Result { + if deserializer.is_human_readable() { + let s = String::deserialize(deserializer)?; + let s = s.strip_prefix("0x").unwrap_or(&s); + let bytes = alloy_primitives::hex::decode(s).map_err(serde::de::Error::custom)?; + let arr: [u8; COMMITMENT_SIZE] = bytes.try_into().map_err(|v: Vec| { + serde::de::Error::custom(format!( + "expected {COMMITMENT_SIZE} bytes, got {}", + v.len() + )) + })?; + Ok(Self(arr)) + } else { + let bytes = >::deserialize(deserializer)?; + let arr: [u8; COMMITMENT_SIZE] = bytes.try_into().map_err(|v: Vec| { + serde::de::Error::custom(format!( + "expected {COMMITMENT_SIZE} bytes, got {}", + v.len() + )) + })?; + Ok(Self(arr)) + } + } +} + +impl Compact for KzgCommitmentBytes { + fn to_compact>(&self, buf: &mut B) -> usize { + self.0.to_compact(buf) + } + + fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { + let (arr, rest) = <[u8; COMMITMENT_SIZE]>::from_compact(buf, len); + (Self(arr), rest) + } +} + +impl arbitrary::Arbitrary<'_> for KzgCommitmentBytes { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + let bytes: [u8; COMMITMENT_SIZE] = u.arbitrary()?; + Ok(Self(bytes)) + } +} + +impl alloy_rlp::Encodable for KzgCommitmentBytes { + fn encode(&self, out: &mut dyn BufMut) { + self.0.encode(out); + } + + fn length(&self) -> usize { + self.0.length() + } +} + +impl alloy_rlp::Decodable for KzgCommitmentBytes { + fn decode(buf: &mut &[u8]) -> Result { + let arr = <[u8; COMMITMENT_SIZE]>::decode(buf)?; + Ok(Self(arr)) + } +} /// Returns a reference to the default (Ethereum mainnet) trusted setup KZG settings. /// Lazily initialized on first call, thread-safe. @@ -123,6 +248,22 @@ pub fn compute_chunk_commitment( aggregate_commitments(&c1, &c2) } +/// Compute a composite commitment binding a KZG commitment to a signer's address. +/// +/// `composite = SHA256(DOMAIN_SEPARATOR || kzg_commitment || signer_address)` +/// +/// This prevents one signer from claiming another's KZG commitment as their own. +pub fn compute_composite_commitment( + kzg_commitment: &[u8; COMMITMENT_SIZE], + signer_address: &IrysAddress, +) -> H256 { + let mut hasher = sha::Sha256::new(); + hasher.update(DOMAIN_SEPARATOR); + hasher.update(kzg_commitment); + hasher.update(&signer_address.0 .0); + H256(hasher.finish()) +} + #[cfg(test)] mod tests { use super::*; @@ -224,6 +365,35 @@ mod tests { assert!(result.is_err()); } + #[test] + fn composite_commitment_deterministic() { + let kzg = [42_u8; COMMITMENT_SIZE]; + let addr = IrysAddress::from([1_u8; 20]); + let c1 = compute_composite_commitment(&kzg, &addr); + let c2 = compute_composite_commitment(&kzg, &addr); + assert_eq!(c1, c2); + } + + #[test] + fn composite_commitment_different_addresses() { + let kzg = [42_u8; COMMITMENT_SIZE]; + let addr1 = IrysAddress::from([1_u8; 20]); + let addr2 = IrysAddress::from([2_u8; 20]); + let c1 = compute_composite_commitment(&kzg, &addr1); + let c2 = compute_composite_commitment(&kzg, &addr2); + assert_ne!(c1, c2); + } + + #[test] + fn composite_commitment_different_kzg_commitments() { + let kzg1 = [1_u8; COMMITMENT_SIZE]; + let kzg2 = [2_u8; COMMITMENT_SIZE]; + let addr = IrysAddress::from([42_u8; 20]); + let c1 = compute_composite_commitment(&kzg1, &addr); + let c2 = compute_composite_commitment(&kzg2, &addr); + assert_ne!(c1, c2); + } + // BLS12-381 field modulus starts with 0x73; filling a blob with any byte // >= 0x74 (116) makes each 32-byte field element exceed the modulus, // causing C_KZG_BADARGS. Seeds must stay in 0..114 for uniform-fill blobs. diff --git a/crates/types/tests/transaction_signing_versioned_tests.rs b/crates/types/tests/transaction_signing_versioned_tests.rs index 1103413ff7..34fac375e0 100644 --- a/crates/types/tests/transaction_signing_versioned_tests.rs +++ b/crates/types/tests/transaction_signing_versioned_tests.rs @@ -114,6 +114,6 @@ fn ingress_proof_signing_uses_discriminant() { // Verify the signature is valid let sig_hash = proof.signature_hash(); assert!(proof - .signature + .signature() .validate_signature(sig_hash, signer.address())); } From c6a1f59d4ef9160f0eb50de8c79839d6da8bcaf9 Mon Sep 17 00:00:00 2001 From: jason Date: Thu, 19 Feb 2026 18:18:08 +0000 Subject: [PATCH 03/13] feat: kzg proof generation --- .../src/chunk_ingress_service/chunks.rs | 24 +++++++-- .../chunk_ingress_service/ingress_proofs.rs | 1 + crates/types/src/config/consensus.rs | 9 ++++ crates/types/src/ingress.rs | 48 +++++++++++++++++ crates/types/src/kzg.rs | 51 +++++++++++++++++++ 5 files changed, 128 insertions(+), 5 deletions(-) diff --git a/crates/actors/src/chunk_ingress_service/chunks.rs b/crates/actors/src/chunk_ingress_service/chunks.rs index 29e465bda9..677ce2d63f 100644 --- a/crates/actors/src/chunk_ingress_service/chunks.rs +++ b/crates/actors/src/chunk_ingress_service/chunks.rs @@ -778,6 +778,7 @@ pub fn generate_ingress_proof( chain_id: ChainId, anchor: H256, enable_shadow_kzg_logging: bool, + use_kzg_ingress_proofs: bool, ) -> eyre::Result { // load the chunks from the DB // TODO: for now we assume the chunks all all in the DB chunk cache @@ -837,10 +838,23 @@ pub fn generate_ingress_proof( Ok(chunk_bin) }); - // generate the ingress proof hash - let proof = irys_types::ingress::generate_ingress_proof( - &signer, data_root, iter, chain_id, anchor, - )?; + let proof = if use_kzg_ingress_proofs { + // V2: collect chunks for KZG commitment computation + let chunks: Vec> = iter.collect::>>()?; + irys_types::ingress::generate_ingress_proof_v2( + &signer, + data_root, + &chunks, + chain_id, + anchor, + irys_types::kzg::default_kzg_settings(), + )? + } else { + // V1: pass lazy iterator for SHA256 merkle proof + irys_types::ingress::generate_ingress_proof( + &signer, data_root, iter, chain_id, anchor, + )? + }; Ok((proof, total_data_size, chunk_count)) })?; @@ -855,7 +869,7 @@ pub fn generate_ingress_proof( db.update(|rw_tx| irys_database::store_ingress_proof_checked(rw_tx, &proof, &signer))??; - if enable_shadow_kzg_logging { + if enable_shadow_kzg_logging && !use_kzg_ingress_proofs { if let Err(e) = shadow_log_kzg_commitments(&db, data_root) { warn!( data_root = %data_root, diff --git a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs index a0b4d4fd38..44239d100b 100644 --- a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs +++ b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs @@ -393,6 +393,7 @@ pub fn generate_and_store_ingress_proof( chain_id, anchor, config.consensus.enable_shadow_kzg_logging, + config.consensus.use_kzg_ingress_proofs, ); let proof = match proof_res { diff --git a/crates/types/src/config/consensus.rs b/crates/types/src/config/consensus.rs index c10bd24628..e849496fc1 100644 --- a/crates/types/src/config/consensus.rs +++ b/crates/types/src/config/consensus.rs @@ -121,6 +121,12 @@ pub struct ConsensusConfig { #[serde(default)] pub enable_shadow_kzg_logging: bool, + /// Generate V2 (KZG-based) ingress proofs instead of V1 (SHA256 merkle only). + /// When enabled, new ingress proofs include a KZG commitment and composite + /// commitment binding the data to the signer's address. + #[serde(default)] + pub use_kzg_ingress_proofs: bool, + /// Target number of years data should be preserved on the network /// Determines long-term storage pricing and incentives pub safe_minimum_number_of_years: u64, @@ -622,6 +628,7 @@ impl ConsensusConfig { // Toggles full ingress proof validation on or off enable_full_ingress_proof_validation: false, enable_shadow_kzg_logging: false, + use_kzg_ingress_proofs: false, // Fee required to stake a mining address in Irys tokens stake_value: Amount::token(dec!(400_000)).expect("valid token amount"), // Base fee required for pledging a partition in Irys tokens @@ -761,6 +768,7 @@ impl ConsensusConfig { minimum_term_fee_usd: Amount::token(dec!(0.01)).expect("valid token amount"), // $0.01 USD minimum enable_full_ingress_proof_validation: false, enable_shadow_kzg_logging: false, + use_kzg_ingress_proofs: false, max_future_timestamp_drift_millis: 15_000, // Hardfork configuration - testnet uses 1 proof for easier testing hardforks: IrysHardforkConfig { @@ -812,6 +820,7 @@ impl ConsensusConfig { minimum_term_fee_usd: Amount::token(dec!(0.01)).expect("valid token amount"), // $0.01 USD minimum enable_full_ingress_proof_validation: false, enable_shadow_kzg_logging: false, + use_kzg_ingress_proofs: false, max_future_timestamp_drift_millis: 15_000, genesis: GenesisConfig { diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index c96d1c08b2..dc63aa6e66 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -376,6 +376,54 @@ pub fn generate_ingress_proof>( Ok(proof) } +/// Generate a V2 ingress proof with KZG commitment for native Irys data. +/// +/// Unlike V1 which only hashes chunks into a merkle tree, V2 also computes +/// a KZG commitment over the chunk data and binds it to the signer's address +/// via a composite commitment. +pub fn generate_ingress_proof_v2( + signer: &IrysSigner, + data_root: DataRoot, + chunks: &[impl AsRef<[u8]>], + chain_id: u64, + anchor: H256, + kzg_settings: &c_kzg::KzgSettings, +) -> eyre::Result { + use crate::kzg::{ + aggregate_all_commitments, compute_chunk_commitment, compute_composite_commitment, + KzgCommitmentBytes, + }; + + // Step 1: Compute per-chunk KZG commitments and aggregate + let chunk_commitments: Vec = chunks + .iter() + .map(|chunk| compute_chunk_commitment(chunk.as_ref(), kzg_settings)) + .collect::>>()?; + + let aggregated = aggregate_all_commitments(&chunk_commitments)?; + let kzg_bytes: [u8; 48] = aggregated + .as_ref() + .try_into() + .map_err(|_| eyre::eyre!("KZG commitment is not 48 bytes"))?; + + // Step 2: Compute composite commitment binding KZG to signer + let composite = compute_composite_commitment(&kzg_bytes, &signer.address()); + + // Step 3: Build and sign the V2 proof + let mut proof = IngressProof::V2(IngressProofV2 { + signature: Default::default(), + data_root, + kzg_commitment: KzgCommitmentBytes::from(kzg_bytes), + composite_commitment: composite, + chain_id, + anchor, + source_type: DataSourceType::NativeData, + }); + + signer.sign_ingress_proof(&mut proof)?; + Ok(proof) +} + pub fn verify_ingress_proof>( proof: &IngressProof, chunks: impl IntoIterator, diff --git a/crates/types/src/kzg.rs b/crates/types/src/kzg.rs index 985f035514..f46b3c7b7c 100644 --- a/crates/types/src/kzg.rs +++ b/crates/types/src/kzg.rs @@ -248,6 +248,25 @@ pub fn compute_chunk_commitment( aggregate_commitments(&c1, &c2) } +/// Aggregate an arbitrary number of KZG commitments into a single commitment +/// via iterative pairwise aggregation: `C = aggregate(C_prev, C_next)`. +/// +/// Returns an error if `commitments` is empty. +/// For a single commitment, returns it unchanged. +pub fn aggregate_all_commitments(commitments: &[KzgCommitment]) -> eyre::Result { + match commitments.len() { + 0 => Err(eyre::eyre!("cannot aggregate zero commitments")), + 1 => Ok(commitments[0]), + _ => { + let mut acc = commitments[0]; + for c in &commitments[1..] { + acc = aggregate_commitments(&acc, c)?; + } + Ok(acc) + } + } +} + /// Compute a composite commitment binding a KZG commitment to a signer's address. /// /// `composite = SHA256(DOMAIN_SEPARATOR || kzg_commitment || signer_address)` @@ -394,6 +413,38 @@ mod tests { assert_ne!(c1, c2); } + #[test] + fn aggregate_all_single_commitment() { + let data = [1_u8; BLOB_SIZE]; + let c = compute_blob_commitment(&data, kzg_settings()).unwrap(); + let agg = aggregate_all_commitments(&[c]).unwrap(); + assert_eq!(commitment_bytes(&c), commitment_bytes(&agg)); + } + + #[test] + fn aggregate_all_empty_returns_error() { + assert!(aggregate_all_commitments(&[]).is_err()); + } + + #[test] + fn aggregate_all_deterministic() { + let c1 = compute_blob_commitment(&[1_u8; BLOB_SIZE], kzg_settings()).unwrap(); + let c2 = compute_blob_commitment(&[2_u8; BLOB_SIZE], kzg_settings()).unwrap(); + let c3 = compute_blob_commitment(&[3_u8; BLOB_SIZE], kzg_settings()).unwrap(); + let agg1 = aggregate_all_commitments(&[c1, c2, c3]).unwrap(); + let agg2 = aggregate_all_commitments(&[c1, c2, c3]).unwrap(); + assert_eq!(commitment_bytes(&agg1), commitment_bytes(&agg2)); + } + + #[test] + fn aggregate_all_order_matters() { + let c1 = compute_blob_commitment(&[1_u8; BLOB_SIZE], kzg_settings()).unwrap(); + let c2 = compute_blob_commitment(&[2_u8; BLOB_SIZE], kzg_settings()).unwrap(); + let agg_12 = aggregate_all_commitments(&[c1, c2]).unwrap(); + let agg_21 = aggregate_all_commitments(&[c2, c1]).unwrap(); + assert_ne!(commitment_bytes(&agg_12), commitment_bytes(&agg_21)); + } + // BLS12-381 field modulus starts with 0x73; filling a blob with any byte // >= 0x74 (116) makes each 32-byte field element exceed the modulus, // causing C_KZG_BADARGS. Seeds must stay in 0..114 for uniform-fill blobs. From 0c9e28013a8265d60f08d05b5c697bf6600cf6b0 Mon Sep 17 00:00:00 2001 From: jason Date: Fri, 20 Feb 2026 10:08:00 +0000 Subject: [PATCH 04/13] feat(proof): add V2 hardfork gating, verification, and tests --- crates/actors/src/block_validation.rs | 17 + .../chunk_ingress_service/ingress_proofs.rs | 25 ++ crates/p2p/src/types.rs | 5 + crates/types/src/config/consensus.rs | 17 + crates/types/src/ingress.rs | 302 ++++++++++++++++-- 5 files changed, 343 insertions(+), 23 deletions(-) diff --git a/crates/actors/src/block_validation.rs b/crates/actors/src/block_validation.rs index 6a3e9cac64..bd882e6d1e 100644 --- a/crates/actors/src/block_validation.rs +++ b/crates/actors/src/block_validation.rs @@ -74,6 +74,8 @@ pub enum PreValidationError { IngressProofsMissing, #[error("Invalid ingress proof signature: {0}")] IngressProofSignatureInvalid(String), + #[error("Rejected ingress proof version: {0}")] + IngressProofVersionRejected(String), #[error( "Invalid promotion, transaction {txid:?} data size {got:?} does not match confirmed data root size {expected:?}" )] @@ -712,6 +714,21 @@ pub async fn prevalidate_block( let tx_proofs = get_ingress_proofs(publish_ledger, &tx_header.id) .map_err(|_| PreValidationError::IngressProofsMissing)?; for proof in tx_proofs.iter() { + // Check proof version is accepted + match proof { + IngressProof::V2(_) if !config.consensus.accept_kzg_ingress_proofs => { + return Err(PreValidationError::IngressProofVersionRejected( + "V2 proofs not accepted".into(), + )); + } + IngressProof::V1(_) if config.consensus.require_kzg_ingress_proofs => { + return Err(PreValidationError::IngressProofVersionRejected( + "V1 proofs rejected (V2 required)".into(), + )); + } + _ => {} + } + proof .pre_validate(&tx_header.data_root) .map_err(|e| PreValidationError::IngressProofSignatureInvalid(e.to_string()))?; diff --git a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs index 44239d100b..dfe0aef4c2 100644 --- a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs +++ b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs @@ -68,6 +68,21 @@ impl ChunkIngressServiceInner { &self, ingress_proof: IngressProof, ) -> Result<(), IngressProofError> { + // Check proof version is accepted by current config + match &ingress_proof { + IngressProof::V2(_) if !self.config.consensus.accept_kzg_ingress_proofs => { + return Err(IngressProofError::RejectedVersion( + "V2 proofs not accepted (accept_kzg_ingress_proofs = false)".into(), + )); + } + IngressProof::V1(_) if self.config.consensus.require_kzg_ingress_proofs => { + return Err(IngressProofError::RejectedVersion( + "V1 proofs rejected (require_kzg_ingress_proofs = true)".into(), + )); + } + _ => {} + } + // Validate the proofs signature and basic details let data_root_val = ingress_proof.data_root(); let address = ingress_proof @@ -278,6 +293,16 @@ impl ChunkIngressServiceInner { regeneration_action: RegenAction::DoNotRegenerate, } } + IngressProofError::RejectedVersion(reason) => { + warn!( + data_root = ?ingress_proof.data_root(), + "Ingress proof version rejected: {}", reason + ); + ProofCheckResult { + expired_or_invalid: true, + regeneration_action: RegenAction::DoNotRegenerate, + } + } IngressProofError::Other(reason_message) => { error!( data_root = ?ingress_proof.data_root(), diff --git a/crates/p2p/src/types.rs b/crates/p2p/src/types.rs index 05794f5f0a..e7eae414a5 100644 --- a/crates/p2p/src/types.rs +++ b/crates/p2p/src/types.rs @@ -58,6 +58,9 @@ impl From for GossipError { IngressProofError::InvalidAnchor(anchor) => { Self::InvalidData(InvalidDataError::IngressProofAnchor(anchor)) } + IngressProofError::RejectedVersion(reason) => { + Self::InvalidData(InvalidDataError::IngressProofVersionRejected(reason)) + } } } } @@ -182,6 +185,8 @@ pub enum InvalidDataError { IngressProofSignature, #[error("Invalid ingress proof anchor: {0}")] IngressProofAnchor(irys_types::BlockHash), + #[error("Rejected ingress proof version: {0}")] + IngressProofVersionRejected(String), #[error("Block body transactions do not match the header")] BlockBodyTransactionsMismatch, #[error("Invalid transaction version {version}, minimum required is {minimum}")] diff --git a/crates/types/src/config/consensus.rs b/crates/types/src/config/consensus.rs index e849496fc1..4e0d97353b 100644 --- a/crates/types/src/config/consensus.rs +++ b/crates/types/src/config/consensus.rs @@ -127,6 +127,17 @@ pub struct ConsensusConfig { #[serde(default)] pub use_kzg_ingress_proofs: bool, + /// Accept V2 (KZG-based) ingress proofs from peers. When false, V2 proofs + /// received via gossip or in blocks are rejected. Must be true when + /// `use_kzg_ingress_proofs` is true. + #[serde(default)] + pub accept_kzg_ingress_proofs: bool, + + /// Require V2 (KZG-based) ingress proofs exclusively. When true, V1 proofs + /// are rejected. Implies `accept_kzg_ingress_proofs = true`. + #[serde(default)] + pub require_kzg_ingress_proofs: bool, + /// Target number of years data should be preserved on the network /// Determines long-term storage pricing and incentives pub safe_minimum_number_of_years: u64, @@ -629,6 +640,8 @@ impl ConsensusConfig { enable_full_ingress_proof_validation: false, enable_shadow_kzg_logging: false, use_kzg_ingress_proofs: false, + accept_kzg_ingress_proofs: false, + require_kzg_ingress_proofs: false, // Fee required to stake a mining address in Irys tokens stake_value: Amount::token(dec!(400_000)).expect("valid token amount"), // Base fee required for pledging a partition in Irys tokens @@ -769,6 +782,8 @@ impl ConsensusConfig { enable_full_ingress_proof_validation: false, enable_shadow_kzg_logging: false, use_kzg_ingress_proofs: false, + accept_kzg_ingress_proofs: false, + require_kzg_ingress_proofs: false, max_future_timestamp_drift_millis: 15_000, // Hardfork configuration - testnet uses 1 proof for easier testing hardforks: IrysHardforkConfig { @@ -821,6 +836,8 @@ impl ConsensusConfig { enable_full_ingress_proof_validation: false, enable_shadow_kzg_logging: false, use_kzg_ingress_proofs: false, + accept_kzg_ingress_proofs: false, + require_kzg_ingress_proofs: false, max_future_timestamp_drift_millis: 15_000, genesis: GenesisConfig { diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index dc63aa6e66..0525b5c603 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -433,30 +433,63 @@ pub fn verify_ingress_proof>( return Ok(false); } - let sig = proof.signature().as_bytes(); - let prehash = proof.signature_hash(); - - let recovered_address = recover_signer(&sig[..].try_into()?, prehash.into())?; - - let (proof_root, regular_root) = - generate_ingress_proof_tree(chunks.into_iter().map(Ok), recovered_address.into(), true)?; - - let data_root = H256( - regular_root - .ok_or_eyre("expected regular_root to be Some")? - .id, - ); + match proof { + IngressProof::V1(_) => { + let sig = proof.signature().as_bytes(); + let prehash = proof.signature_hash(); + let recovered_address = recover_signer(&sig[..].try_into()?, prehash.into())?; + + let (proof_root, regular_root) = generate_ingress_proof_tree( + chunks.into_iter().map(Ok), + recovered_address.into(), + true, + )?; + + let data_root = H256( + regular_root + .ok_or_eyre("expected regular_root to be Some")? + .id, + ); + + let new_prehash = IngressProof::V1(IngressProofV1 { + signature: Default::default(), + data_root, + proof: H256(proof_root.id), + chain_id, + anchor: proof.anchor(), + }) + .signature_hash(); - let new_prehash = IngressProof::V1(IngressProofV1 { - signature: Default::default(), - data_root, - proof: H256(proof_root.id), - chain_id, - anchor: proof.anchor(), - }) - .signature_hash(); + Ok(new_prehash == prehash) + } + IngressProof::V2(v2) => { + let sig = v2.signature.as_bytes(); + let prehash = proof.signature_hash(); + let recovered_address: IrysAddress = + recover_signer(&sig[..].try_into()?, prehash.into())?.into(); + + let settings = crate::kzg::default_kzg_settings(); + let chunks_vec: Vec<_> = chunks.into_iter().collect(); + let chunk_commitments: Vec = chunks_vec + .iter() + .map(|c| crate::kzg::compute_chunk_commitment(c.as_ref(), settings)) + .collect::>>()?; + + let aggregated = crate::kzg::aggregate_all_commitments(&chunk_commitments)?; + let kzg_bytes: [u8; 48] = aggregated + .as_ref() + .try_into() + .map_err(|_| eyre::eyre!("KZG commitment is not 48 bytes"))?; + + if kzg_bytes != v2.kzg_commitment.0 { + return Ok(false); + } - Ok(new_prehash == prehash) + let expected_composite = + crate::kzg::compute_composite_commitment(&kzg_bytes, &recovered_address); + Ok(expected_composite == v2.composite_commitment) + } + } } #[cfg(test)] @@ -471,7 +504,14 @@ mod tests { ConsensusConfig, IngressProof, H256, }; - use super::generate_ingress_proof; + use super::{generate_ingress_proof, generate_ingress_proof_v2}; + + /// Generate KZG-safe data: each 32-byte field element's first byte must be < 0x74. + /// Uses a simple fill value that satisfies the BLS12-381 modulus constraint. + fn kzg_safe_data(size: usize, fill: u8) -> Vec { + assert!(fill < 0x74, "fill byte must be < 0x74 for KZG safety"); + vec![fill; size] + } #[test] fn ingress_proof_rlp_roundtrip_test() { @@ -652,4 +692,220 @@ mod tests { Ok(()) } + + #[test] + fn v2_generate_and_verify_roundtrip() -> eyre::Result<()> { + let config = ConsensusConfig::testing(); + let chunk_size = config.chunk_size as usize; + let data_bytes = kzg_safe_data((chunk_size as f64 * 2.5).round() as usize, 42); + + let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; + let root = generate_data_root(leaves)?; + let data_root = H256(root.id); + + let signer = IrysSigner::random_signer(&config); + let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); + + let chain_id = 1_u64; + let anchor = H256::random(); + let kzg_settings = crate::kzg::default_kzg_settings(); + + let proof = + generate_ingress_proof_v2(&signer, data_root, &chunks, chain_id, anchor, kzg_settings)?; + + assert!(matches!(proof, IngressProof::V2(_))); + assert!(verify_ingress_proof(&proof, chunks.iter(), chain_id)?); + + Ok(()) + } + + #[test] + fn v2_wrong_chunks_fails_verification() -> eyre::Result<()> { + let config = ConsensusConfig::testing(); + let chunk_size = config.chunk_size as usize; + let data_bytes = kzg_safe_data((chunk_size as f64 * 2.5).round() as usize, 42); + + let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; + let root = generate_data_root(leaves)?; + let data_root = H256(root.id); + + let signer = IrysSigner::random_signer(&config); + let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); + + let chain_id = 1_u64; + let anchor = H256::random(); + let kzg_settings = crate::kzg::default_kzg_settings(); + + let proof = + generate_ingress_proof_v2(&signer, data_root, &chunks, chain_id, anchor, kzg_settings)?; + + // Tampered chunk: use a different safe fill value + let mut bad_chunks = chunks.clone(); + bad_chunks[0] = kzg_safe_data(bad_chunks[0].len(), 7); + assert!(!verify_ingress_proof(&proof, bad_chunks.iter(), chain_id)?); + + // Reversed chunks should fail (only if >1 chunk) + if chunks.len() > 1 { + let mut reversed = chunks; + reversed.reverse(); + assert!(!verify_ingress_proof(&proof, reversed.iter(), chain_id)?); + } + + Ok(()) + } + + #[test] + fn v2_wrong_chain_id_fails_verification() -> eyre::Result<()> { + let config = ConsensusConfig::testing(); + let chunk_size = config.chunk_size as usize; + let data_bytes = kzg_safe_data(chunk_size * 2, 42); + + let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; + let root = generate_data_root(leaves)?; + let data_root = H256(root.id); + + let signer = IrysSigner::random_signer(&config); + let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); + + let anchor = H256::random(); + let kzg_settings = crate::kzg::default_kzg_settings(); + + let proof = + generate_ingress_proof_v2(&signer, data_root, &chunks, 1, anchor, kzg_settings)?; + + assert!(!verify_ingress_proof(&proof, chunks.iter(), 2)?); + + Ok(()) + } + + #[test] + fn v2_composite_commitment_binds_to_signer() -> eyre::Result<()> { + let config = ConsensusConfig::testing(); + let chunk_size = config.chunk_size as usize; + let data_bytes = kzg_safe_data(chunk_size * 2, 42); + + let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; + let root = generate_data_root(leaves)?; + let data_root = H256(root.id); + + let signer_a = IrysSigner::random_signer(&config); + let signer_b = IrysSigner::random_signer(&config); + let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); + + let chain_id = 1_u64; + let anchor = H256::random(); + let kzg_settings = crate::kzg::default_kzg_settings(); + + let proof_a = generate_ingress_proof_v2( + &signer_a, + data_root, + &chunks, + chain_id, + anchor, + kzg_settings, + )?; + let proof_b = generate_ingress_proof_v2( + &signer_b, + data_root, + &chunks, + chain_id, + anchor, + kzg_settings, + )?; + + // Same data → same KZG commitment, but different composite commitments + let (kzg_a, composite_a) = match &proof_a { + IngressProof::V2(v2) => (v2.kzg_commitment, v2.composite_commitment), + _ => unreachable!(), + }; + let (kzg_b, composite_b) = match &proof_b { + IngressProof::V2(v2) => (v2.kzg_commitment, v2.composite_commitment), + _ => unreachable!(), + }; + + assert_eq!(kzg_a, kzg_b); + assert_ne!(composite_a, composite_b); + + assert!(verify_ingress_proof(&proof_a, chunks.iter(), chain_id)?); + assert!(verify_ingress_proof(&proof_b, chunks.iter(), chain_id)?); + + Ok(()) + } + + #[test] + fn v2_rlp_roundtrip() -> eyre::Result<()> { + use bytes::BytesMut; + + let config = ConsensusConfig::testing(); + let chunk_size = config.chunk_size as usize; + let data_bytes = kzg_safe_data(chunk_size, 42); + + let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; + let root = generate_data_root(leaves)?; + let data_root = H256(root.id); + + let signer = IrysSigner::random_signer(&config); + let chunks: Vec> = vec![data_bytes]; + let kzg_settings = crate::kzg::default_kzg_settings(); + + let original = generate_ingress_proof_v2( + &signer, + data_root, + &chunks, + 42, + H256::random(), + kzg_settings, + )?; + + let mut buf = BytesMut::new(); + alloy_rlp::Encodable::encode(&original, &mut buf); + let mut slice = buf.as_ref(); + let decoded = IngressProof::decode(&mut slice)?; + + match (&original, &decoded) { + (IngressProof::V2(orig), IngressProof::V2(dec)) => { + assert_eq!(orig.data_root, dec.data_root); + assert_eq!(orig.kzg_commitment, dec.kzg_commitment); + assert_eq!(orig.composite_commitment, dec.composite_commitment); + assert_eq!(orig.chain_id, dec.chain_id); + assert_eq!(orig.anchor, dec.anchor); + assert_eq!(orig.source_type, dec.source_type); + } + _ => panic!("expected V2 proofs"), + } + + Ok(()) + } + + #[test] + fn v2_tampered_kzg_commitment_fails() -> eyre::Result<()> { + let config = ConsensusConfig::testing(); + let chunk_size = config.chunk_size as usize; + let data_bytes = kzg_safe_data(chunk_size * 2, 42); + + let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; + let root = generate_data_root(leaves)?; + let data_root = H256(root.id); + + let signer = IrysSigner::random_signer(&config); + let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); + + let kzg_settings = crate::kzg::default_kzg_settings(); + let mut proof = generate_ingress_proof_v2( + &signer, + data_root, + &chunks, + 1, + H256::random(), + kzg_settings, + )?; + + if let IngressProof::V2(ref mut v2) = proof { + v2.kzg_commitment.0[0] ^= 0xFF; + } + + assert!(!verify_ingress_proof(&proof, chunks.iter(), 1)?); + + Ok(()) + } } From 9c54c485d00dc5af86d49f973eba2a53bc69feeb Mon Sep 17 00:00:00 2001 From: jason Date: Fri, 20 Feb 2026 16:56:35 +0000 Subject: [PATCH 05/13] feat(blobs): add enable_blobs config gate for EIP-4844 support --- crates/actors/src/block_validation.rs | 81 +++++++++++++-------------- crates/irys-reth/src/evm.rs | 31 +++++----- crates/irys-reth/src/lib.rs | 38 ++++++++----- crates/reth-node-bridge/src/node.rs | 7 ++- crates/types/src/config/consensus.rs | 8 +++ 5 files changed, 95 insertions(+), 70 deletions(-) diff --git a/crates/actors/src/block_validation.rs b/crates/actors/src/block_validation.rs index bd882e6d1e..e14272afd8 100644 --- a/crates/actors/src/block_validation.rs +++ b/crates/actors/src/block_validation.rs @@ -1359,38 +1359,36 @@ pub async fn shadow_transactions_are_valid( "withdrawals must always be empty" ); - // Reject any blob gas usage in the payload - if payload_v3.blob_gas_used != 0 { - tracing::debug!( - block.hash = %block.block_hash, - block.evm_block_hash = %block.evm_block_hash, - payload.blob_gas_used = payload_v3.blob_gas_used, - "Rejecting block: blob_gas_used must be zero", - ); - eyre::bail!("block has non-zero blob_gas_used which is disabled"); - } - if payload_v3.excess_blob_gas != 0 { - tracing::debug!( - block.block_hash = %block.block_hash, - block.evm_block_hash = %block.evm_block_hash, - payload.excess_blob_gas = payload_v3.excess_blob_gas, - "Rejecting block: excess_blob_gas must be zero", - ); - eyre::bail!("block has non-zero excess_blob_gas which is disabled"); - } - - // Reject any block that carries blob sidecars (EIP-4844). - // We keep Cancun active but disable blobs/sidecars entirely. - if let Some(versioned_hashes) = sidecar.versioned_hashes() - && !versioned_hashes.is_empty() - { - tracing::debug!( - block.block_hash = %block.block_hash, - block.evm_block_hash = %block.evm_block_hash, - block.versioned_hashes_len = versioned_hashes.len(), - "Rejecting block: EIP-4844 blobs/sidecars are not supported", - ); - eyre::bail!("block contains EIP-4844 blobs/sidecars which are disabled"); + if !config.consensus.enable_blobs { + if payload_v3.blob_gas_used != 0 { + tracing::debug!( + block.hash = %block.block_hash, + block.evm_block_hash = %block.evm_block_hash, + payload.blob_gas_used = payload_v3.blob_gas_used, + "Rejecting block: blob_gas_used must be zero", + ); + eyre::bail!("block has non-zero blob_gas_used which is disabled"); + } + if payload_v3.excess_blob_gas != 0 { + tracing::debug!( + block.block_hash = %block.block_hash, + block.evm_block_hash = %block.evm_block_hash, + payload.excess_blob_gas = payload_v3.excess_blob_gas, + "Rejecting block: excess_blob_gas must be zero", + ); + eyre::bail!("block has non-zero excess_blob_gas which is disabled"); + } + if let Some(versioned_hashes) = sidecar.versioned_hashes() + && !versioned_hashes.is_empty() + { + tracing::debug!( + block.block_hash = %block.block_hash, + block.evm_block_hash = %block.evm_block_hash, + block.versioned_hashes_len = versioned_hashes.len(), + "Rejecting block: EIP-4844 blobs/sidecars are not supported", + ); + eyre::bail!("block contains EIP-4844 blobs/sidecars which are disabled"); + } } // Requests are disabled: reject if any present or if header-level requests hash is set. if let Some(requests) = sidecar.requests() @@ -1428,15 +1426,16 @@ pub async fn shadow_transactions_are_valid( eyre::bail!("block contains EIP-7685 requests_hash which is disabled"); } - // 2. Enforce that no EIP-4844 (blob) transactions are present in the block - for tx in evm_block.body.transactions.iter() { - if tx.is_eip4844() { - tracing::debug!( - block.block_hash = %block.block_hash, - block.evm_block_hash = %block.evm_block_hash, - "Rejecting block: contains EIP-4844 transaction which is disabled", - ); - eyre::bail!("block contains EIP-4844 transaction which is disabled"); + if !config.consensus.enable_blobs { + for tx in evm_block.body.transactions.iter() { + if tx.is_eip4844() { + tracing::debug!( + block.block_hash = %block.block_hash, + block.evm_block_hash = %block.evm_block_hash, + "Rejecting block: contains EIP-4844 transaction which is disabled", + ); + eyre::bail!("block contains EIP-4844 transaction which is disabled"); + } } } diff --git a/crates/irys-reth/src/evm.rs b/crates/irys-reth/src/evm.rs index 032c31e659..8ee5403dfd 100644 --- a/crates/irys-reth/src/evm.rs +++ b/crates/irys-reth/src/evm.rs @@ -173,7 +173,7 @@ where /// This factory produces [`IrysBlockExecutor`] instances that can handle both /// regular Ethereum transactions and Irys-specific shadow transactions. It wraps /// the standard Ethereum block executor factory with Irys-specific configuration. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone)] pub struct IrysBlockExecutorFactory { inner: EthBlockExecutorFactory, IrysEvmFactory>, } @@ -313,13 +313,14 @@ impl ConfigureEvm for IrysEvmConfig { } } -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct IrysEvmFactory {} +#[derive(Debug, Clone, Copy)] +pub struct IrysEvmFactory { + enable_blobs: bool, +} impl IrysEvmFactory { - pub fn new() -> Self { - Self {} + pub fn new(enable_blobs: bool) -> Self { + Self { enable_blobs } } } @@ -345,6 +346,7 @@ impl EvmFactory for IrysEvmFactory { PrecompileSpecId::from_spec_id(spec_id), ))), false, + self.enable_blobs, ) } @@ -365,6 +367,7 @@ impl EvmFactory for IrysEvmFactory { PrecompileSpecId::from_spec_id(spec_id), ))), true, + self.enable_blobs, ) } } @@ -411,6 +414,7 @@ pub struct IrysEvm { EthFrame, >, inspect: bool, + enable_blobs: bool, state: revm_primitives::map::foldhash::HashMap, } @@ -428,10 +432,12 @@ impl IrysEvm { EthFrame, >, inspect: bool, + enable_blobs: bool, ) -> Self { Self { inner: evm, inspect, + enable_blobs, state: Default::default(), } } @@ -500,11 +506,10 @@ where } fn transact_raw(&mut self, tx: Self::Tx) -> Result { - // Reject blob-carrying transactions (EIP-4844) at execution time. - // We keep Cancun active but explicitly disable blobs/sidecars. - if !tx.blob_hashes.is_empty() - || tx.max_fee_per_blob_gas != 0 - || tx.tx_type == EIP4844_TX_TYPE_ID + if !self.enable_blobs + && (!tx.blob_hashes.is_empty() + || tx.max_fee_per_blob_gas != 0 + || tx.tx_type == EIP4844_TX_TYPE_ID) { tracing::debug!( tx.blob_hashes_len = tx.blob_hashes.len(), @@ -1330,7 +1335,7 @@ mod tests { #[test] fn evm_rejects_eip4844_blob_fields_in_transact_raw() { // Build minimal EVM env with Cancun spec enabled - let factory = IrysEvmFactory::new(); + let factory = IrysEvmFactory::new(false); let mut cfg_env = CfgEnv::default(); cfg_env.spec = SpecId::CANCUN; cfg_env.chain_id = 1; @@ -1367,7 +1372,7 @@ mod tests { /// Ensure a regular non-shadow, non-blob transaction executes successfully at the EVM layer. #[test] fn evm_processes_normal_tx_success() { - let factory = IrysEvmFactory::new(); + let factory = IrysEvmFactory::new(false); // Cancun spec, chain id 1, zero basefee and ample gas limit let mut cfg_env = CfgEnv::default(); diff --git a/crates/irys-reth/src/lib.rs b/crates/irys-reth/src/lib.rs index d741e90d26..f66ff77d08 100644 --- a/crates/irys-reth/src/lib.rs +++ b/crates/irys-reth/src/lib.rs @@ -99,8 +99,10 @@ pub fn compose_shadow_tx( } /// Type configuration for an Irys-Ethereum node. -#[derive(Debug, Clone, Default)] -pub struct IrysEthereumNode; +#[derive(Debug, Clone)] +pub struct IrysEthereumNode { + pub enable_blobs: bool, +} impl NodeTypes for IrysEthereumNode { type Primitives = EthPrimitives; @@ -132,8 +134,12 @@ impl IrysEthereumNode { { ComponentsBuilder::default() .node_types::() - .pool(IrysPoolBuilder::default()) - .executor(IrysExecutorBuilder) + .pool(IrysPoolBuilder { + enable_blobs: self.enable_blobs, + }) + .executor(IrysExecutorBuilder { + enable_blobs: self.enable_blobs, + }) .payload(IyrsPayloadServiceBuilder::new(IrysPayloadBuilderBuilder)) .network(EthereumNetworkBuilder::default()) .consensus(EthereumConsensusBuilder::default()) @@ -206,9 +212,10 @@ impl> DebugNode for IrysEthereumNode { } /// A custom pool builder for Irys shadow transaction validation and pool configuration. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct IrysPoolBuilder; +#[derive(Debug, Clone)] +pub struct IrysPoolBuilder { + pub enable_blobs: bool, +} /// Implement the [`PoolBuilder`] trait for the Irys pool builder /// @@ -276,6 +283,7 @@ where let validator = TransactionValidationTaskExecutor { validator: Arc::new(IrysShadowTxValidator { eth_tx_validator: validator.validator, + enable_blobs: self.enable_blobs, }), to_validation_task: validator.to_validation_task, }; @@ -309,6 +317,7 @@ where #[derive(Debug)] pub struct IrysShadowTxValidator { eth_tx_validator: Arc>, + enable_blobs: bool, } impl IrysShadowTxValidator @@ -341,8 +350,7 @@ where Ok(None) => {} } - // once we support blobs, we can start accepting eip4844 txs - if tx.is_eip4844() { + if !self.enable_blobs && tx.is_eip4844() { return Err(TransactionValidationOutcome::Invalid( tx, reth_transaction_pool::error::InvalidPoolTransactionError::Consensus( @@ -384,8 +392,10 @@ where } /// A regular ethereum evm and executor builder. -#[derive(Debug, Default, Clone, Copy)] -pub struct IrysExecutorBuilder; +#[derive(Debug, Clone, Copy)] +pub struct IrysExecutorBuilder { + pub enable_blobs: bool, +} impl ExecutorBuilder for IrysExecutorBuilder where @@ -398,7 +408,7 @@ where let evm_config = EthEvmConfig::new(ctx.chain_spec()); let spec = ctx.chain_spec(); - let evm_factory = IrysEvmFactory::new(); + let evm_factory = IrysEvmFactory::new(self.enable_blobs); let evm_config = evm::IrysEvmConfig { inner: evm_config, assembler: IrysBlockAssembler::new(ctx.chain_spec()), @@ -3383,7 +3393,9 @@ pub mod test_utils { node_exit_future: _, } = NodeBuilder::new(node_config.clone()) .testing_node(tasks.clone()) - .node(IrysEthereumNode) + .node(IrysEthereumNode { + enable_blobs: false, + }) .launch() .await?; diff --git a/crates/reth-node-bridge/src/node.rs b/crates/reth-node-bridge/src/node.rs index 7d41db37b1..0c6364c0d8 100644 --- a/crates/reth-node-bridge/src/node.rs +++ b/crates/reth-node-bridge/src/node.rs @@ -163,8 +163,7 @@ pub async fn run_node( reth_config.txpool.queued_max_count = subpool_max_tx_count; reth_config.txpool.queued_max_size = subpool_max_size_mb; - // important: keep blobs disabled in our mempool - reth_config.txpool.disable_blobs_support = true; + reth_config.txpool.disable_blobs_support = !node_config.consensus_config().enable_blobs; if cfg!(debug_assertions) { reth_config.engine.cross_block_cache_size = 10; @@ -219,7 +218,9 @@ pub async fn run_node( .with_launch_context(task_executor.clone()); let handle = builder - .node(IrysEthereumNode) + .node(IrysEthereumNode { + enable_blobs: node_config.consensus_config().enable_blobs, + }) .launch_with_debug_capabilities() .into_future() .in_current_span() diff --git a/crates/types/src/config/consensus.rs b/crates/types/src/config/consensus.rs index 4e0d97353b..c149bcca4d 100644 --- a/crates/types/src/config/consensus.rs +++ b/crates/types/src/config/consensus.rs @@ -138,6 +138,11 @@ pub struct ConsensusConfig { #[serde(default)] pub require_kzg_ingress_proofs: bool, + /// Enable EIP-4844 blob transaction support. When false, blob transactions + /// are rejected at the txpool, EVM execution, and block validation layers. + #[serde(default)] + pub enable_blobs: bool, + /// Target number of years data should be preserved on the network /// Determines long-term storage pricing and incentives pub safe_minimum_number_of_years: u64, @@ -642,6 +647,7 @@ impl ConsensusConfig { use_kzg_ingress_proofs: false, accept_kzg_ingress_proofs: false, require_kzg_ingress_proofs: false, + enable_blobs: false, // Fee required to stake a mining address in Irys tokens stake_value: Amount::token(dec!(400_000)).expect("valid token amount"), // Base fee required for pledging a partition in Irys tokens @@ -784,6 +790,7 @@ impl ConsensusConfig { use_kzg_ingress_proofs: false, accept_kzg_ingress_proofs: false, require_kzg_ingress_proofs: false, + enable_blobs: false, max_future_timestamp_drift_millis: 15_000, // Hardfork configuration - testnet uses 1 proof for easier testing hardforks: IrysHardforkConfig { @@ -838,6 +845,7 @@ impl ConsensusConfig { use_kzg_ingress_proofs: false, accept_kzg_ingress_proofs: false, require_kzg_ingress_proofs: false, + enable_blobs: false, max_future_timestamp_drift_millis: 15_000, genesis: GenesisConfig { From 69af4c8a280119ebc000a0bfee955212f9b5fa69 Mon Sep 17 00:00:00 2001 From: jason Date: Mon, 23 Feb 2026 11:06:03 +0000 Subject: [PATCH 06/13] feat(blobs): add blob extraction service and config coupling fix --- crates/actors/src/blob_extraction_service.rs | 161 ++++++++++++++++ crates/actors/src/block_producer.rs | 27 +++ crates/actors/src/lib.rs | 1 + crates/actors/src/mempool_service.rs | 65 +++++++ crates/actors/src/services.rs | 7 + crates/chain/src/chain.rs | 13 ++ crates/types/src/config/consensus.rs | 74 +++++++- crates/types/src/config/mod.rs | 15 +- crates/types/src/ingress.rs | 188 ++++++++++++++++++- 9 files changed, 542 insertions(+), 9 deletions(-) create mode 100644 crates/actors/src/blob_extraction_service.rs diff --git a/crates/actors/src/blob_extraction_service.rs b/crates/actors/src/blob_extraction_service.rs new file mode 100644 index 0000000000..67bf0b8fd0 --- /dev/null +++ b/crates/actors/src/blob_extraction_service.rs @@ -0,0 +1,161 @@ +use irys_types::H256; +use reth::revm::primitives::B256; +use reth_transaction_pool::blobstore::{BlobStore, BlobStoreError}; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedReceiver; +use tracing::{debug, warn}; + +use crate::mempool_service::MempoolServiceMessage; + +/// Messages sent to the blob extraction service. +#[derive(Debug)] +pub enum BlobExtractionMessage { + /// Extract blobs from a locally-produced block containing EIP-4844 transactions. + ExtractBlobs { + block_hash: H256, + blob_tx_hashes: Vec, + }, +} + +/// Extracts EIP-4844 blob data from the Reth blob store after block production, +/// converts blobs into Irys chunks with IngressProofV2, and injects synthetic +/// data transactions into the mempool. +pub struct BlobExtractionService { + blob_store: S, + mempool_sender: tokio::sync::mpsc::UnboundedSender, + config: Arc, +} + +impl BlobExtractionService { + pub fn spawn_service( + blob_store: S, + mempool_sender: tokio::sync::mpsc::UnboundedSender, + config: Arc, + rx: UnboundedReceiver, + runtime_handle: tokio::runtime::Handle, + ) { + let service = Self { + blob_store, + mempool_sender, + config, + }; + + runtime_handle.spawn(service.start(rx)); + } + + async fn start(self, mut rx: UnboundedReceiver) { + debug!("Blob extraction service started"); + while let Some(msg) = rx.recv().await { + match msg { + BlobExtractionMessage::ExtractBlobs { + block_hash, + blob_tx_hashes, + } => { + if let Err(e) = self.handle_extract_blobs(block_hash, &blob_tx_hashes) { + warn!( + block.hash = %block_hash, + error = %e, + "Failed to extract blobs from block", + ); + } + } + } + } + debug!("Blob extraction service stopped"); + } + + fn handle_extract_blobs(&self, block_hash: H256, blob_tx_hashes: &[B256]) -> eyre::Result<()> { + use irys_types::ingress::generate_ingress_proof_v2_from_blob; + + let signer = self.config.irys_signer(); + let chain_id = self.config.consensus.chain_id; + let anchor: H256 = block_hash; + + let mut total_blobs = 0_u64; + + for tx_hash in blob_tx_hashes { + let sidecar_variant = match self.blob_store.get(*tx_hash) { + Ok(Some(s)) => s, + Ok(None) => { + warn!(tx.hash = %tx_hash, "Blob sidecar not found in store (may be pruned)"); + continue; + } + Err(BlobStoreError::Other(e)) => { + warn!(tx.hash = %tx_hash, error = %e, "Blob store error"); + continue; + } + Err(e) => { + warn!(tx.hash = %tx_hash, error = ?e, "Blob store error"); + continue; + } + }; + + let sidecar = match sidecar_variant.as_eip4844() { + Some(s) => s, + None => { + warn!(tx.hash = %tx_hash, "Sidecar is not EIP-4844 format, skipping"); + continue; + } + }; + + for (blob_idx, blob) in sidecar.blobs.iter().enumerate() { + let commitment_bytes: &[u8; 48] = sidecar.commitments[blob_idx].as_ref(); + + let proof = generate_ingress_proof_v2_from_blob( + &signer, + blob.as_ref(), + commitment_bytes, + chain_id, + anchor, + )?; + + let data_root = proof.data_root(); + + let tx_header = irys_types::transaction::DataTransactionHeader::V1( + irys_types::transaction::DataTransactionHeaderV1WithMetadata { + tx: irys_types::transaction::DataTransactionHeaderV1 { + id: H256::zero(), + anchor, + signer: signer.address(), + data_root, + data_size: blob.len() as u64, + header_size: 0, + term_fee: Default::default(), + perm_fee: None, + ledger_id: irys_types::block::DataLedger::Submit as u32, + chain_id, + signature: Default::default(), + bundle_format: None, + }, + metadata: irys_types::transaction::DataTransactionMetadata::new(), + }, + ); + + // Zero-pad blob to 256KB Irys chunk + let mut chunk_data = vec![0_u8; irys_types::kzg::CHUNK_SIZE_FOR_KZG]; + chunk_data[..blob.len()].copy_from_slice(blob.as_ref()); + + let _ = self + .mempool_sender + .send(MempoolServiceMessage::IngestBlobDerivedTx { + tx_header, + ingress_proof: proof, + chunk_data, + }); + + total_blobs += 1; + } + } + + if total_blobs > 0 { + debug!( + block.hash = %block_hash, + blobs.count = total_blobs, + txs.count = blob_tx_hashes.len(), + "Extracted blobs from block", + ); + } + + Ok(()) + } +} diff --git a/crates/actors/src/block_producer.rs b/crates/actors/src/block_producer.rs index 2d1cd718e5..9c7c632325 100644 --- a/crates/actors/src/block_producer.rs +++ b/crates/actors/src/block_producer.rs @@ -1,4 +1,5 @@ use crate::{ + blob_extraction_service::BlobExtractionMessage, block_discovery::{BlockDiscoveryError, BlockDiscoveryFacade as _, BlockDiscoveryFacadeImpl}, mempool_guard::MempoolReadGuard, mempool_service::{MempoolServiceMessage, MempoolTxs}, @@ -797,6 +798,32 @@ pub trait BlockProdStrategy { .broadcast_block(block, stats, ð_built_payload) .await?; let Some(block) = block else { return Ok(None) }; + // Extract blobs from any EIP-4844 transactions in the produced block + if self.inner().config.consensus.enable_blobs { + let blob_tx_hashes: Vec = eth_built_payload + .block() + .body() + .transactions + .iter() + .filter(|tx| tx.is_eip4844()) + .map(|tx| *tx.hash()) + .collect(); + + if !blob_tx_hashes.is_empty() { + debug!( + block.hash = %block.block_hash, + blob_txs = blob_tx_hashes.len(), + "Triggering blob extraction for EIP-4844 transactions", + ); + let _ = self.inner().service_senders.blob_extraction.send( + BlobExtractionMessage::ExtractBlobs { + block_hash: block.block_hash, + blob_tx_hashes, + }, + ); + } + } + Ok(Some((block, eth_built_payload))) } diff --git a/crates/actors/src/lib.rs b/crates/actors/src/lib.rs index 52b9660b5a..a0277d8280 100644 --- a/crates/actors/src/lib.rs +++ b/crates/actors/src/lib.rs @@ -1,3 +1,4 @@ +pub mod blob_extraction_service; pub mod block_discovery; pub mod block_migration_service; pub mod block_producer; diff --git a/crates/actors/src/mempool_service.rs b/crates/actors/src/mempool_service.rs index a4cf9458d5..a8ff7abcc1 100644 --- a/crates/actors/src/mempool_service.rs +++ b/crates/actors/src/mempool_service.rs @@ -251,6 +251,13 @@ pub enum MempoolServiceMessage { /// `GetDataTxs`) when possible, and avoid holding the guard across long‑running /// operations to prevent reducing mempool write throughput. GetReadGuard(oneshot::Sender), + /// Ingest a blob-derived data transaction with its pre-computed ingress proof + /// and zero-padded chunk data. Created by the blob extraction service. + IngestBlobDerivedTx { + tx_header: DataTransactionHeader, + ingress_proof: IngressProof, + chunk_data: Vec, + }, } impl MempoolServiceMessage { @@ -274,6 +281,7 @@ impl MempoolServiceMessage { Self::CloneStakeAndPledgeWhitelist(_) => "CloneStakeAndPledgeWhitelist", Self::GetMempoolStatus(_) => "GetMempoolStatus", Self::GetReadGuard(_) => "GetReadGuard", + Self::IngestBlobDerivedTx { .. } => "IngestBlobDerivedTx", } } } @@ -426,10 +434,67 @@ impl Inner { tracing::error!("response.send() error: {:?}", e); }; } + MempoolServiceMessage::IngestBlobDerivedTx { + tx_header, + ingress_proof, + chunk_data, + } => { + self.handle_ingest_blob_derived_tx(tx_header, ingress_proof, chunk_data) + .await; + } } Ok(()) } + async fn handle_ingest_blob_derived_tx( + &self, + tx_header: DataTransactionHeader, + ingress_proof: IngressProof, + chunk_data: Vec, + ) { + if matches!(&ingress_proof, IngressProof::V2(_)) + && !self.config.consensus.accept_kzg_ingress_proofs + { + warn!( + data_root = %tx_header.data_root, + "Dropping blob-derived tx: V2 proofs not accepted by config" + ); + return; + } + + let data_root = tx_header.data_root; + debug!( + data_root = %data_root, + data_size = tx_header.data_size, + chunk_data_len = chunk_data.len(), + "Ingesting blob-derived data transaction", + ); + + // 1. Cache the chunk data first (creates CachedDataRoots entry) + let chunk = UnpackedChunk { + data_root, + data_size: chunk_data.len() as u64, + data_path: Default::default(), + bytes: chunk_data.into(), + tx_offset: TxChunkOffset(0), + }; + if let Err(e) = self.handle_chunk_ingress_message(chunk).await { + warn!(data_root = %data_root, error = ?e, "Failed to cache blob chunk data"); + return; + } + + // 2. Store the data tx header via the gossip ingress path + if let Err(e) = self.handle_data_tx_ingress_message_gossip(tx_header).await { + warn!(data_root = %data_root, error = ?e, "Failed to ingest blob-derived data tx"); + return; + } + + // 3. Store the ingress proof + if let Err(e) = self.handle_ingest_ingress_proof(ingress_proof) { + warn!(data_root = %data_root, error = ?e, "Failed to store blob ingress proof"); + } + } + #[tracing::instrument(level = "trace", skip_all)] async fn handle_get_mempool_status(&self) -> Result { Ok(self diff --git a/crates/actors/src/services.rs b/crates/actors/src/services.rs index 290b5580e6..3fc78c3afd 100644 --- a/crates/actors/src/services.rs +++ b/crates/actors/src/services.rs @@ -1,3 +1,4 @@ +use crate::blob_extraction_service::BlobExtractionMessage; use crate::chunk_ingress_service::ChunkIngressMessage; use crate::mining_bus::{MiningBroadcastEvent, MiningBus}; use crate::{ @@ -104,6 +105,7 @@ pub struct ServiceReceivers { pub peer_events: broadcast::Receiver, pub peer_network: UnboundedReceiver, pub block_discovery: UnboundedReceiver>, + pub blob_extraction: UnboundedReceiver, pub packing: tokio::sync::mpsc::Receiver, } @@ -126,6 +128,7 @@ pub struct ServiceSendersInner { pub peer_events: broadcast::Sender, pub peer_network: PeerNetworkSender, pub block_discovery: UnboundedSender>, + pub blob_extraction: UnboundedSender, pub mining_bus: MiningBus, pub packing_sender: PackingSender, } @@ -161,6 +164,8 @@ impl ServiceSendersInner { let (peer_network_sender, peer_network_receiver) = tokio::sync::mpsc::unbounded_channel(); let (block_discovery_sender, block_discovery_receiver) = unbounded_channel::>(); + let (blob_extraction_sender, blob_extraction_receiver) = + unbounded_channel::(); let (packing_sender, packing_receiver) = PackingService::channel(5_000); let mining_bus = MiningBus::new(); @@ -182,6 +187,7 @@ impl ServiceSendersInner { peer_events: peer_events_sender, peer_network: PeerNetworkSender::new(peer_network_sender), block_discovery: block_discovery_sender, + blob_extraction: blob_extraction_sender, mining_bus, packing_sender, }; @@ -203,6 +209,7 @@ impl ServiceSendersInner { peer_events: peer_events_receiver, peer_network: peer_network_receiver, block_discovery: block_discovery_receiver, + blob_extraction: blob_extraction_receiver, packing: packing_receiver, }; (senders, receivers) diff --git a/crates/chain/src/chain.rs b/crates/chain/src/chain.rs index 7cfd7f7ae5..96bab8ba7e 100644 --- a/crates/chain/src/chain.rs +++ b/crates/chain/src/chain.rs @@ -6,6 +6,7 @@ use base58::ToBase58 as _; use eyre::Context as _; use futures::FutureExt as _; use irys_actors::{ + blob_extraction_service::BlobExtractionService, block_discovery::{ BlockDiscoveryFacadeImpl, BlockDiscoveryMessage, BlockDiscoveryService, BlockDiscoveryServiceInner, @@ -1597,6 +1598,18 @@ impl IrysNode { )?; let mempool_facade = MempoolServiceFacadeImpl::from(&service_senders); + // Spawn blob extraction service (when blobs are enabled) + if config.consensus.enable_blobs { + let blob_store = reth_node_adapter.inner.pool.blob_store().clone(); + BlobExtractionService::spawn_service( + blob_store, + service_senders.mempool.clone(), + Arc::new(config.clone()), + receivers.blob_extraction, + runtime_handle.clone(), + ); + } + // Get the mempool state to create the pledge provider let (tx, rx) = oneshot::channel(); service_senders diff --git a/crates/types/src/config/consensus.rs b/crates/types/src/config/consensus.rs index c149bcca4d..6db7b6a5b9 100644 --- a/crates/types/src/config/consensus.rs +++ b/crates/types/src/config/consensus.rs @@ -481,6 +481,29 @@ impl ConsensusConfig { // discrepancies when using GPU mining pub const CHUNK_SIZE: u64 = 256 * 1024; + /// Enforce logical implications between KZG/blob config flags. + /// Call before wrapping in `Arc` to fix contradictions early. + pub fn normalize(&mut self) { + if self.enable_blobs && !self.accept_kzg_ingress_proofs { + tracing::warn!( + "enable_blobs=true requires accept_kzg_ingress_proofs=true, auto-enabling" + ); + self.accept_kzg_ingress_proofs = true; + } + if self.require_kzg_ingress_proofs && !self.accept_kzg_ingress_proofs { + tracing::warn!( + "require_kzg_ingress_proofs=true requires accept_kzg_ingress_proofs=true, auto-enabling" + ); + self.accept_kzg_ingress_proofs = true; + } + if self.use_kzg_ingress_proofs && !self.accept_kzg_ingress_proofs { + tracing::warn!( + "use_kzg_ingress_proofs=true requires accept_kzg_ingress_proofs=true, auto-enabling" + ); + self.accept_kzg_ingress_proofs = true; + } + } + // 20TB, with ~10% overhead, aligned to the nearest recall range (400 chunks) pub const CHUNKS_PER_PARTITION_20TB: u64 = 75_534_400; @@ -968,7 +991,6 @@ mod tests { let mut peer_config = ConsensusConfig::testing(); peer_config.expected_genesis_hash = Some(fake_hash); - // Simulate what Genesis node does at runtime genesis_config.expected_genesis_hash = Some(fake_hash); assert_eq!( @@ -980,11 +1002,6 @@ mod tests { #[test] fn test_consensus_hash_regression() { - // This test verifies that the hash of the testing config remains stable. - // If this test fails, it indicates a breaking change in either: - // - The ConsensusConfig structure or field order - // - The canonical JSON serialization implementation - // - The serde serialization of dependency types let config = ConsensusConfig::testing(); let expected_hash = H256::from_base58("FqweVVmuY7LZDbEduJ2Yf5HGkkYpP59xGfvKzzopCjVE"); assert_eq!( @@ -993,4 +1010,49 @@ mod tests { "Hash changed—this may indicate a breaking change in the consensus config or its dependencies" ); } + + #[test] + fn normalize_enable_blobs_forces_accept_kzg() { + let mut config = ConsensusConfig::testing(); + config.enable_blobs = true; + config.accept_kzg_ingress_proofs = false; + config.normalize(); + assert!(config.accept_kzg_ingress_proofs); + } + + #[test] + fn normalize_require_kzg_forces_accept_kzg() { + let mut config = ConsensusConfig::testing(); + config.require_kzg_ingress_proofs = true; + config.accept_kzg_ingress_proofs = false; + config.normalize(); + assert!(config.accept_kzg_ingress_proofs); + } + + #[test] + fn normalize_use_kzg_forces_accept_kzg() { + let mut config = ConsensusConfig::testing(); + config.use_kzg_ingress_proofs = true; + config.accept_kzg_ingress_proofs = false; + config.normalize(); + assert!(config.accept_kzg_ingress_proofs); + } + + #[test] + fn normalize_idempotent() { + let mut config = ConsensusConfig::testing(); + config.enable_blobs = true; + config.normalize(); + let snapshot_accept = config.accept_kzg_ingress_proofs; + config.normalize(); + assert_eq!(config.accept_kzg_ingress_proofs, snapshot_accept); + } + + #[test] + fn normalize_noop_when_consistent() { + let mut config = ConsensusConfig::testing(); + let before = config.accept_kzg_ingress_proofs; + config.normalize(); + assert_eq!(config.accept_kzg_ingress_proofs, before); + } } diff --git a/crates/types/src/config/mod.rs b/crates/types/src/config/mod.rs index 2c86aeec09..c248a45763 100644 --- a/crates/types/src/config/mod.rs +++ b/crates/types/src/config/mod.rs @@ -1,4 +1,4 @@ -use eyre::ensure; +use eyre::{bail, ensure}; use serde::{Deserialize, Serialize}; use std::{ops::Deref, sync::Arc}; @@ -16,7 +16,8 @@ pub struct Config(Arc); impl Config { pub fn new(node_config: NodeConfig, peer_id: IrysPeerId) -> Self { - let consensus = node_config.consensus_config(); + let mut consensus = node_config.consensus_config(); + consensus.normalize(); Self(Arc::new(CombinedConfigInner { consensus, @@ -146,6 +147,16 @@ impl Config { "mempool.max_pending_chunk_items must be > 0 (a zero-capacity pending chunk cache would silently drop all pre-header chunks)" ); + if self.consensus.require_kzg_ingress_proofs && !self.consensus.accept_kzg_ingress_proofs { + bail!("require_kzg_ingress_proofs=true but accept_kzg_ingress_proofs=false — contradictory config"); + } + if self.consensus.enable_blobs && !self.consensus.accept_kzg_ingress_proofs { + bail!("enable_blobs=true but accept_kzg_ingress_proofs=false — blob V2 proofs would be rejected"); + } + if self.consensus.use_kzg_ingress_proofs && !self.consensus.accept_kzg_ingress_proofs { + bail!("use_kzg_ingress_proofs=true but accept_kzg_ingress_proofs=false — generated proofs would be rejected"); + } + Ok(()) } } diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index 0525b5c603..50b589d115 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -424,6 +424,56 @@ pub fn generate_ingress_proof_v2( Ok(proof) } +/// Generate a V2 ingress proof for blob-derived data (EIP-4844). +/// +/// The KZG commitment is taken directly from the blob transaction sidecar +/// rather than recomputed — the sidecar uses the same c-kzg trusted setup. +/// The blob data (128KB) is zero-padded to a 256KB Irys chunk for data_root +/// computation. +pub fn generate_ingress_proof_v2_from_blob( + signer: &IrysSigner, + blob_data: &[u8], + kzg_commitment: &[u8; 48], + chain_id: u64, + anchor: H256, +) -> eyre::Result { + use crate::kzg::{compute_composite_commitment, KzgCommitmentBytes, CHUNK_SIZE_FOR_KZG}; + + eyre::ensure!( + blob_data.len() <= CHUNK_SIZE_FOR_KZG, + "blob data exceeds max chunk size ({})", + CHUNK_SIZE_FOR_KZG, + ); + + // Zero-pad blob to 256KB Irys chunk size + let mut padded = vec![0_u8; CHUNK_SIZE_FOR_KZG]; + padded[..blob_data.len()].copy_from_slice(blob_data); + + // Compute data_root from the padded chunk using the standard merkle tree + let (leaves, _) = generate_ingress_leaves( + std::iter::once(Ok(padded.as_slice())), + signer.address(), + false, + )?; + let root = generate_data_root(leaves)?; + + // Composite commitment binds the sidecar KZG commitment to the signer + let composite = compute_composite_commitment(kzg_commitment, &signer.address()); + + let mut proof = IngressProof::V2(IngressProofV2 { + signature: Default::default(), + data_root: root.id.into(), + kzg_commitment: KzgCommitmentBytes::from(*kzg_commitment), + composite_commitment: composite, + chain_id, + anchor, + source_type: DataSourceType::EvmBlob, + }); + + signer.sign_ingress_proof(&mut proof)?; + Ok(proof) +} + pub fn verify_ingress_proof>( proof: &IngressProof, chunks: impl IntoIterator, @@ -504,7 +554,9 @@ mod tests { ConsensusConfig, IngressProof, H256, }; - use super::{generate_ingress_proof, generate_ingress_proof_v2}; + use super::{ + generate_ingress_proof, generate_ingress_proof_v2, generate_ingress_proof_v2_from_blob, + }; /// Generate KZG-safe data: each 32-byte field element's first byte must be < 0x74. /// Uses a simple fill value that satisfies the BLS12-381 modulus constraint. @@ -908,4 +960,138 @@ mod tests { Ok(()) } + + #[test] + fn v2_blob_generate_and_verify_roundtrip() -> eyre::Result<()> { + use crate::ingress::DataSourceType; + + let config = ConsensusConfig::testing(); + let signer = IrysSigner::random_signer(&config); + let chain_id = 1_u64; + let anchor = H256::random(); + + // Simulate a 128KB EIP-4844 blob with KZG-safe data + let blob_size = 131_072; + let blob_data = kzg_safe_data(blob_size, 42); + + // Compute a real KZG commitment from the blob data (zero-padded to 256KB) + let kzg_settings = crate::kzg::default_kzg_settings(); + let kzg_commitment = crate::kzg::compute_chunk_commitment(&blob_data, kzg_settings)?; + let commitment_bytes: [u8; 48] = kzg_commitment.as_ref().try_into().unwrap(); + + let proof = generate_ingress_proof_v2_from_blob( + &signer, + &blob_data, + &commitment_bytes, + chain_id, + anchor, + )?; + + // Source type must be EvmBlob + match &proof { + IngressProof::V2(v2) => { + assert_eq!(v2.source_type, DataSourceType::EvmBlob); + assert_eq!(v2.kzg_commitment.0, commitment_bytes); + } + _ => panic!("expected V2 proof"), + } + + // Verify with the zero-padded chunk (256KB) — the verifier recomputes + // the KZG commitment from the provided chunks + let mut padded = vec![0_u8; crate::kzg::CHUNK_SIZE_FOR_KZG]; + padded[..blob_data.len()].copy_from_slice(&blob_data); + + assert!(verify_ingress_proof(&proof, [padded.as_slice()], chain_id)?); + + Ok(()) + } + + #[test] + fn v2_blob_sidecar_commitment_preserved() -> eyre::Result<()> { + let config = ConsensusConfig::testing(); + let signer = IrysSigner::random_signer(&config); + + let blob_data = kzg_safe_data(131_072, 55); + + // Compute commitment from the blob data + let kzg_settings = crate::kzg::default_kzg_settings(); + let kzg_commitment = crate::kzg::compute_chunk_commitment(&blob_data, kzg_settings)?; + let commitment_bytes: [u8; 48] = kzg_commitment.as_ref().try_into().unwrap(); + + let proof = generate_ingress_proof_v2_from_blob( + &signer, + &blob_data, + &commitment_bytes, + 1, + H256::random(), + )?; + + // The KZG commitment in the proof must be exactly the one we provided + match &proof { + IngressProof::V2(v2) => assert_eq!(v2.kzg_commitment.0, commitment_bytes), + _ => panic!("expected V2 proof"), + } + + Ok(()) + } + + #[test] + fn v2_blob_wrong_data_fails_verification() -> eyre::Result<()> { + let config = ConsensusConfig::testing(); + let signer = IrysSigner::random_signer(&config); + let chain_id = 1_u64; + + let blob_data = kzg_safe_data(131_072, 42); + let kzg_settings = crate::kzg::default_kzg_settings(); + let kzg_commitment = crate::kzg::compute_chunk_commitment(&blob_data, kzg_settings)?; + let commitment_bytes: [u8; 48] = kzg_commitment.as_ref().try_into().unwrap(); + + let proof = generate_ingress_proof_v2_from_blob( + &signer, + &blob_data, + &commitment_bytes, + chain_id, + H256::random(), + )?; + + // Verify with different data (wrong fill value) — should fail + let bad_blob = kzg_safe_data(131_072, 7); + let mut bad_padded = vec![0_u8; crate::kzg::CHUNK_SIZE_FOR_KZG]; + bad_padded[..bad_blob.len()].copy_from_slice(&bad_blob); + + assert!(!verify_ingress_proof( + &proof, + [bad_padded.as_slice()], + chain_id + )?); + + Ok(()) + } + + #[test] + fn v2_blob_wrong_chain_id_fails() -> eyre::Result<()> { + let config = ConsensusConfig::testing(); + let signer = IrysSigner::random_signer(&config); + + let blob_data = kzg_safe_data(131_072, 42); + let kzg_settings = crate::kzg::default_kzg_settings(); + let kzg_commitment = crate::kzg::compute_chunk_commitment(&blob_data, kzg_settings)?; + let commitment_bytes: [u8; 48] = kzg_commitment.as_ref().try_into().unwrap(); + + let proof = generate_ingress_proof_v2_from_blob( + &signer, + &blob_data, + &commitment_bytes, + 1, + H256::random(), + )?; + + let mut padded = vec![0_u8; crate::kzg::CHUNK_SIZE_FOR_KZG]; + padded[..blob_data.len()].copy_from_slice(&blob_data); + + // Verify with wrong chain_id — should fail + assert!(!verify_ingress_proof(&proof, [padded.as_slice()], 2)?); + + Ok(()) + } } From e4e28ff234e56eb10f59dd24da7a609c9b6bf1a6 Mon Sep 17 00:00:00 2001 From: jason Date: Mon, 23 Feb 2026 11:47:58 +0000 Subject: [PATCH 07/13] chore(kzg): review comments --- crates/actors/src/blob_extraction_service.rs | 28 ++++++--- crates/actors/src/block_producer.rs | 6 +- .../src/chunk_ingress_service/chunks.rs | 16 ++--- crates/actors/src/mempool_service.rs | 14 +++-- crates/types/src/ingress.rs | 44 +++++++------- crates/types/src/kzg.rs | 59 ------------------- 6 files changed, 62 insertions(+), 105 deletions(-) diff --git a/crates/actors/src/blob_extraction_service.rs b/crates/actors/src/blob_extraction_service.rs index 67bf0b8fd0..75a5d6c689 100644 --- a/crates/actors/src/blob_extraction_service.rs +++ b/crates/actors/src/blob_extraction_service.rs @@ -67,6 +67,11 @@ impl BlobExtractionService { fn handle_extract_blobs(&self, block_hash: H256, blob_tx_hashes: &[B256]) -> eyre::Result<()> { use irys_types::ingress::generate_ingress_proof_v2_from_blob; + if !self.config.consensus.enable_blobs { + warn!("Received blob extraction request but blobs are disabled"); + return Ok(()); + } + let signer = self.config.irys_signer(); let chain_id = self.config.consensus.chain_id; let anchor: H256 = block_hash; @@ -111,6 +116,9 @@ impl BlobExtractionService { let data_root = proof.data_root(); + let blob_len = + u64::try_from(blob.len()).map_err(|_| eyre::eyre!("blob length overflow"))?; + let tx_header = irys_types::transaction::DataTransactionHeader::V1( irys_types::transaction::DataTransactionHeaderV1WithMetadata { tx: irys_types::transaction::DataTransactionHeaderV1 { @@ -118,7 +126,7 @@ impl BlobExtractionService { anchor, signer: signer.address(), data_root, - data_size: blob.len() as u64, + data_size: blob_len, header_size: 0, term_fee: Default::default(), perm_fee: None, @@ -131,17 +139,19 @@ impl BlobExtractionService { }, ); - // Zero-pad blob to 256KB Irys chunk let mut chunk_data = vec![0_u8; irys_types::kzg::CHUNK_SIZE_FOR_KZG]; chunk_data[..blob.len()].copy_from_slice(blob.as_ref()); - let _ = self - .mempool_sender - .send(MempoolServiceMessage::IngestBlobDerivedTx { - tx_header, - ingress_proof: proof, - chunk_data, - }); + if let Err(e) = + self.mempool_sender + .send(MempoolServiceMessage::IngestBlobDerivedTx { + tx_header, + ingress_proof: proof, + chunk_data, + }) + { + warn!(data_root = %data_root, error = %e, "Failed to send blob-derived tx to mempool"); + } total_blobs += 1; } diff --git a/crates/actors/src/block_producer.rs b/crates/actors/src/block_producer.rs index 9c7c632325..c8e51dda4f 100644 --- a/crates/actors/src/block_producer.rs +++ b/crates/actors/src/block_producer.rs @@ -815,12 +815,14 @@ pub trait BlockProdStrategy { blob_txs = blob_tx_hashes.len(), "Triggering blob extraction for EIP-4844 transactions", ); - let _ = self.inner().service_senders.blob_extraction.send( + if let Err(e) = self.inner().service_senders.blob_extraction.send( BlobExtractionMessage::ExtractBlobs { block_hash: block.block_hash, blob_tx_hashes, }, - ); + ) { + warn!(error = %e, "Failed to send blob extraction request"); + } } } diff --git a/crates/actors/src/chunk_ingress_service/chunks.rs b/crates/actors/src/chunk_ingress_service/chunks.rs index 677ce2d63f..115a7dde7d 100644 --- a/crates/actors/src/chunk_ingress_service/chunks.rs +++ b/crates/actors/src/chunk_ingress_service/chunks.rs @@ -839,7 +839,6 @@ pub fn generate_ingress_proof( }); let proof = if use_kzg_ingress_proofs { - // V2: collect chunks for KZG commitment computation let chunks: Vec> = iter.collect::>>()?; irys_types::ingress::generate_ingress_proof_v2( &signer, @@ -850,7 +849,6 @@ pub fn generate_ingress_proof( irys_types::kzg::default_kzg_settings(), )? } else { - // V1: pass lazy iterator for SHA256 merkle proof irys_types::ingress::generate_ingress_proof( &signer, data_root, iter, chain_id, anchor, )? @@ -908,11 +906,15 @@ fn shadow_log_kzg_commitments(db: &DatabaseProvider, data_root: DataRoot) -> eyr let chunk_start = Instant::now(); match compute_chunk_commitment(&chunk_bin, settings) { Ok(commitment) => { - let hex: String = commitment - .as_ref() - .iter() - .map(|b| format!("{b:02x}")) - .collect(); + let hex = + commitment + .as_ref() + .iter() + .fold(String::with_capacity(96), |mut s, b| { + use std::fmt::Write as _; + let _ = write!(s, "{b:02x}"); + s + }); info!( data_root = %data_root, chunk_index = i, diff --git a/crates/actors/src/mempool_service.rs b/crates/actors/src/mempool_service.rs index a8ff7abcc1..20540faf2d 100644 --- a/crates/actors/src/mempool_service.rs +++ b/crates/actors/src/mempool_service.rs @@ -463,17 +463,23 @@ impl Inner { } let data_root = tx_header.data_root; + let chunk_len = match u64::try_from(chunk_data.len()) { + Ok(len) => len, + Err(_) => { + warn!(data_root = %data_root, "Chunk data length overflows u64"); + return; + } + }; debug!( data_root = %data_root, data_size = tx_header.data_size, - chunk_data_len = chunk_data.len(), + chunk_data_len = chunk_len, "Ingesting blob-derived data transaction", ); - // 1. Cache the chunk data first (creates CachedDataRoots entry) let chunk = UnpackedChunk { data_root, - data_size: chunk_data.len() as u64, + data_size: chunk_len, data_path: Default::default(), bytes: chunk_data.into(), tx_offset: TxChunkOffset(0), @@ -483,13 +489,11 @@ impl Inner { return; } - // 2. Store the data tx header via the gossip ingress path if let Err(e) = self.handle_data_tx_ingress_message_gossip(tx_header).await { warn!(data_root = %data_root, error = ?e, "Failed to ingest blob-derived data tx"); return; } - // 3. Store the ingress proof if let Err(e) = self.handle_ingest_ingress_proof(ingress_proof) { warn!(data_root = %data_root, error = ?e, "Failed to store blob ingress proof"); } diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index 50b589d115..0e7d3b48d0 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -190,18 +190,18 @@ pub enum DataSourceType { } impl DataSourceType { - pub fn from_u8(val: u8) -> Self { + pub fn from_u8(val: u8) -> eyre::Result { match val { - 0 => Self::NativeData, - 1 => Self::EvmBlob, - _ => Self::NativeData, + 0 => Ok(Self::NativeData), + 1 => Ok(Self::EvmBlob), + _ => Err(eyre::eyre!("unknown DataSourceType discriminant: {val}")), } } } impl<'a> Arbitrary<'a> for DataSourceType { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - Ok(Self::from_u8(u.int_in_range(0..=1)?)) + Self::from_u8(u.int_in_range(0..=1)?).map_err(|_| arbitrary::Error::IncorrectFormat) } } @@ -212,7 +212,10 @@ impl Compact for DataSourceType { } fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { - (Self::from_u8(buf[0]), &buf[1..]) + // Compact deserialization: default to NativeData for forward compatibility + // with unknown discriminants in stored data + let source = Self::from_u8(buf[0]).unwrap_or_default(); + (source, &buf[1..]) } } @@ -229,16 +232,15 @@ pub struct IngressProofV2 { impl Compact for IngressProofV2 { fn to_compact>(&self, buf: &mut B) -> usize { - let mut flags = 0_usize; - // signature has no flag — always present, written first - flags += self.signature.to_compact(buf); - flags += self.data_root.to_compact(buf); - flags += self.kzg_commitment.to_compact(buf); - flags += self.composite_commitment.to_compact(buf); - flags += self.chain_id.to_compact(buf); - flags += self.anchor.to_compact(buf); - flags += self.source_type.to_compact(buf); - flags + let mut written = 0_usize; + written += self.signature.to_compact(buf); + written += self.data_root.to_compact(buf); + written += self.kzg_commitment.to_compact(buf); + written += self.composite_commitment.to_compact(buf); + written += self.chain_id.to_compact(buf); + written += self.anchor.to_compact(buf); + written += self.source_type.to_compact(buf); + written } fn from_compact(buf: &[u8], len: usize) -> (Self, &[u8]) { @@ -282,6 +284,7 @@ impl Versioned for IngressProofV2 { const VERSION: u8 = 2; } +/// Signature excluded from RLP: this encoding is used for signature_hash computation impl alloy_rlp::Encodable for IngressProofV2 { fn encode(&self, out: &mut dyn BufMut) { let header = alloy_rlp::Header { @@ -322,7 +325,8 @@ impl alloy_rlp::Decodable for IngressProofV2 { composite_commitment, chain_id, anchor, - source_type: DataSourceType::from_u8(source_type_u8), + source_type: DataSourceType::from_u8(source_type_u8) + .map_err(|_| alloy_rlp::Error::Custom("unknown DataSourceType discriminant"))?, }) } } @@ -394,7 +398,6 @@ pub fn generate_ingress_proof_v2( KzgCommitmentBytes, }; - // Step 1: Compute per-chunk KZG commitments and aggregate let chunk_commitments: Vec = chunks .iter() .map(|chunk| compute_chunk_commitment(chunk.as_ref(), kzg_settings)) @@ -406,10 +409,8 @@ pub fn generate_ingress_proof_v2( .try_into() .map_err(|_| eyre::eyre!("KZG commitment is not 48 bytes"))?; - // Step 2: Compute composite commitment binding KZG to signer let composite = compute_composite_commitment(&kzg_bytes, &signer.address()); - // Step 3: Build and sign the V2 proof let mut proof = IngressProof::V2(IngressProofV2 { signature: Default::default(), data_root, @@ -445,11 +446,9 @@ pub fn generate_ingress_proof_v2_from_blob( CHUNK_SIZE_FOR_KZG, ); - // Zero-pad blob to 256KB Irys chunk size let mut padded = vec![0_u8; CHUNK_SIZE_FOR_KZG]; padded[..blob_data.len()].copy_from_slice(blob_data); - // Compute data_root from the padded chunk using the standard merkle tree let (leaves, _) = generate_ingress_leaves( std::iter::once(Ok(padded.as_slice())), signer.address(), @@ -457,7 +456,6 @@ pub fn generate_ingress_proof_v2_from_blob( )?; let root = generate_data_root(leaves)?; - // Composite commitment binds the sidecar KZG commitment to the signer let composite = compute_composite_commitment(kzg_commitment, &signer.address()); let mut proof = IngressProof::V2(IngressProofV2 { diff --git a/crates/types/src/kzg.rs b/crates/types/src/kzg.rs index f46b3c7b7c..5a88fc499c 100644 --- a/crates/types/src/kzg.rs +++ b/crates/types/src/kzg.rs @@ -229,7 +229,6 @@ pub fn compute_chunk_commitment( )); } - // Zero-pad to 256KB let mut padded = [0_u8; CHUNK_SIZE_FOR_KZG]; padded[..chunk_data.len()].copy_from_slice(chunk_data); @@ -298,47 +297,6 @@ mod tests { c.as_ref() } - #[test] - fn commitment_size_is_48_bytes() { - let data = [0_u8; BLOB_SIZE]; - let commitment = compute_blob_commitment(&data, kzg_settings()).unwrap(); - assert_eq!(commitment.as_ref().len(), COMMITMENT_SIZE); - } - - #[test] - fn same_data_produces_same_commitment() { - let data = [42_u8; BLOB_SIZE]; - let c1 = compute_blob_commitment(&data, kzg_settings()).unwrap(); - let c2 = compute_blob_commitment(&data, kzg_settings()).unwrap(); - assert_eq!(commitment_bytes(&c1), commitment_bytes(&c2)); - } - - #[test] - fn different_data_produces_different_commitment() { - let data_a = [1_u8; BLOB_SIZE]; - let data_b = [2_u8; BLOB_SIZE]; - let c1 = compute_blob_commitment(&data_a, kzg_settings()).unwrap(); - let c2 = compute_blob_commitment(&data_b, kzg_settings()).unwrap(); - assert_ne!(commitment_bytes(&c1), commitment_bytes(&c2)); - } - - #[test] - fn chunk_commitment_deterministic() { - let data = vec![7_u8; CHUNK_SIZE_FOR_KZG]; - let c1 = compute_chunk_commitment(&data, kzg_settings()).unwrap(); - let c2 = compute_chunk_commitment(&data, kzg_settings()).unwrap(); - assert_eq!(commitment_bytes(&c1), commitment_bytes(&c2)); - } - - #[test] - fn chunk_commitment_different_data() { - let data_a = vec![1_u8; CHUNK_SIZE_FOR_KZG]; - let data_b = vec![2_u8; CHUNK_SIZE_FOR_KZG]; - let c1 = compute_chunk_commitment(&data_a, kzg_settings()).unwrap(); - let c2 = compute_chunk_commitment(&data_b, kzg_settings()).unwrap(); - assert_ne!(commitment_bytes(&c1), commitment_bytes(&c2)); - } - #[test] fn aggregate_commitment_produces_valid_point() { let data_a = [1_u8; BLOB_SIZE]; @@ -384,15 +342,6 @@ mod tests { assert!(result.is_err()); } - #[test] - fn composite_commitment_deterministic() { - let kzg = [42_u8; COMMITMENT_SIZE]; - let addr = IrysAddress::from([1_u8; 20]); - let c1 = compute_composite_commitment(&kzg, &addr); - let c2 = compute_composite_commitment(&kzg, &addr); - assert_eq!(c1, c2); - } - #[test] fn composite_commitment_different_addresses() { let kzg = [42_u8; COMMITMENT_SIZE]; @@ -413,14 +362,6 @@ mod tests { assert_ne!(c1, c2); } - #[test] - fn aggregate_all_single_commitment() { - let data = [1_u8; BLOB_SIZE]; - let c = compute_blob_commitment(&data, kzg_settings()).unwrap(); - let agg = aggregate_all_commitments(&[c]).unwrap(); - assert_eq!(commitment_bytes(&c), commitment_bytes(&agg)); - } - #[test] fn aggregate_all_empty_returns_error() { assert!(aggregate_all_commitments(&[]).is_err()); From 0b5b85795d6cbbeb476452f7ab21fe88cd59ba21 Mon Sep 17 00:00:00 2001 From: jason Date: Mon, 23 Feb 2026 15:19:55 +0000 Subject: [PATCH 08/13] feat(custody): add KZG opening proof primitives and per-chunk commitment storage --- crates/actors/src/blob_extraction_service.rs | 15 +- crates/actors/src/block_validation.rs | 20 +- .../src/chunk_ingress_service/chunks.rs | 48 +- .../chunk_ingress_service/ingress_proofs.rs | 25 +- crates/actors/src/mempool_service.rs | 10 +- crates/chain/src/chain.rs | 5 +- crates/database/src/database.rs | 54 +- crates/database/src/tables.rs | 11 + crates/irys-reth/src/evm.rs | 1 - crates/types/src/config/consensus.rs | 52 +- crates/types/src/ingress.rs | 297 +++++------ crates/types/src/kzg.rs | 471 ++++++++++++++++-- 12 files changed, 722 insertions(+), 287 deletions(-) diff --git a/crates/actors/src/blob_extraction_service.rs b/crates/actors/src/blob_extraction_service.rs index 75a5d6c689..e9ff173790 100644 --- a/crates/actors/src/blob_extraction_service.rs +++ b/crates/actors/src/blob_extraction_service.rs @@ -1,16 +1,13 @@ use irys_types::H256; use reth::revm::primitives::B256; use reth_transaction_pool::blobstore::{BlobStore, BlobStoreError}; -use std::sync::Arc; use tokio::sync::mpsc::UnboundedReceiver; use tracing::{debug, warn}; use crate::mempool_service::MempoolServiceMessage; -/// Messages sent to the blob extraction service. #[derive(Debug)] pub enum BlobExtractionMessage { - /// Extract blobs from a locally-produced block containing EIP-4844 transactions. ExtractBlobs { block_hash: H256, blob_tx_hashes: Vec, @@ -23,14 +20,14 @@ pub enum BlobExtractionMessage { pub struct BlobExtractionService { blob_store: S, mempool_sender: tokio::sync::mpsc::UnboundedSender, - config: Arc, + config: irys_types::Config, } impl BlobExtractionService { pub fn spawn_service( blob_store: S, mempool_sender: tokio::sync::mpsc::UnboundedSender, - config: Arc, + config: irys_types::Config, rx: UnboundedReceiver, runtime_handle: tokio::runtime::Handle, ) { @@ -116,8 +113,8 @@ impl BlobExtractionService { let data_root = proof.data_root(); - let blob_len = - u64::try_from(blob.len()).map_err(|_| eyre::eyre!("blob length overflow"))?; + let chunk_size = u64::try_from(irys_types::kzg::CHUNK_SIZE_FOR_KZG) + .map_err(|_| eyre::eyre!("chunk size overflow"))?; let tx_header = irys_types::transaction::DataTransactionHeader::V1( irys_types::transaction::DataTransactionHeaderV1WithMetadata { @@ -126,11 +123,11 @@ impl BlobExtractionService { anchor, signer: signer.address(), data_root, - data_size: blob_len, + data_size: chunk_size, header_size: 0, term_fee: Default::default(), perm_fee: None, - ledger_id: irys_types::block::DataLedger::Submit as u32, + ledger_id: u32::from(irys_types::block::DataLedger::Submit), chain_id, signature: Default::default(), bundle_format: None, diff --git a/crates/actors/src/block_validation.rs b/crates/actors/src/block_validation.rs index e14272afd8..fca3f9bedb 100644 --- a/crates/actors/src/block_validation.rs +++ b/crates/actors/src/block_validation.rs @@ -714,20 +714,12 @@ pub async fn prevalidate_block( let tx_proofs = get_ingress_proofs(publish_ledger, &tx_header.id) .map_err(|_| PreValidationError::IngressProofsMissing)?; for proof in tx_proofs.iter() { - // Check proof version is accepted - match proof { - IngressProof::V2(_) if !config.consensus.accept_kzg_ingress_proofs => { - return Err(PreValidationError::IngressProofVersionRejected( - "V2 proofs not accepted".into(), - )); - } - IngressProof::V1(_) if config.consensus.require_kzg_ingress_proofs => { - return Err(PreValidationError::IngressProofVersionRejected( - "V1 proofs rejected (V2 required)".into(), - )); - } - _ => {} - } + proof + .check_version_accepted( + config.consensus.accept_kzg_ingress_proofs, + config.consensus.require_kzg_ingress_proofs, + ) + .map_err(|msg| PreValidationError::IngressProofVersionRejected(msg.into()))?; proof .pre_validate(&tx_header.data_root) diff --git a/crates/actors/src/chunk_ingress_service/chunks.rs b/crates/actors/src/chunk_ingress_service/chunks.rs index 115a7dde7d..681daa3dbe 100644 --- a/crates/actors/src/chunk_ingress_service/chunks.rs +++ b/crates/actors/src/chunk_ingress_service/chunks.rs @@ -787,7 +787,7 @@ pub fn generate_ingress_proof( let expected_chunk_count = data_size_to_chunk_count(size, chunk_size)?; - let (proof, actual_data_size, actual_chunk_count) = db.view_eyre(|tx| { + let (proof, per_chunk_commitments, actual_data_size, actual_chunk_count) = db.view_eyre(|tx| { let mut dup_cursor = tx.cursor_dup_read::()?; // start from first duplicate entry for this root_hash @@ -838,23 +838,25 @@ pub fn generate_ingress_proof( Ok(chunk_bin) }); - let proof = if use_kzg_ingress_proofs { + let (proof, per_chunk_commitments) = if use_kzg_ingress_proofs { let chunks: Vec> = iter.collect::>>()?; - irys_types::ingress::generate_ingress_proof_v2( + let (proof, per_chunk) = irys_types::ingress::generate_ingress_proof_v2( &signer, data_root, &chunks, chain_id, anchor, irys_types::kzg::default_kzg_settings(), - )? + )?; + (proof, Some(per_chunk)) } else { - irys_types::ingress::generate_ingress_proof( + let proof = irys_types::ingress::generate_ingress_proof( &signer, data_root, iter, chain_id, anchor, - )? + )?; + (proof, None) }; - Ok((proof, total_data_size, chunk_count)) + Ok((proof, per_chunk_commitments, total_data_size, chunk_count)) })?; info!( @@ -862,10 +864,33 @@ pub fn generate_ingress_proof( &proof.proof_id(), &data_root ); - assert_eq!(actual_data_size, size); - assert_eq!(actual_chunk_count, expected_chunk_count); + eyre::ensure!( + actual_data_size == size, + "data size mismatch: actual {actual_data_size} != expected {size}" + ); + eyre::ensure!( + actual_chunk_count == expected_chunk_count, + "chunk count mismatch: actual {actual_chunk_count} != expected {expected_chunk_count}" + ); + + db.update(|rw_tx| -> eyre::Result<()> { + irys_database::store_ingress_proof_checked(rw_tx, &proof, &signer)?; + + if let Some(ref per_chunk) = per_chunk_commitments { + let indexed: Vec<(u32, irys_types::kzg::KzgCommitmentBytes)> = per_chunk + .iter() + .enumerate() + .map(|(i, c)| { + let idx = + u32::try_from(i).map_err(|_| eyre::eyre!("chunk index exceeds u32"))?; + Ok((idx, *c)) + }) + .collect::>>()?; + irys_database::store_per_chunk_kzg_commitments(rw_tx, data_root, &indexed)?; + } - db.update(|rw_tx| irys_database::store_ingress_proof_checked(rw_tx, &proof, &signer))??; + Ok(()) + })??; if enable_shadow_kzg_logging && !use_kzg_ingress_proofs { if let Err(e) = shadow_log_kzg_commitments(&db, data_root) { @@ -912,7 +937,8 @@ fn shadow_log_kzg_commitments(db: &DatabaseProvider, data_root: DataRoot) -> eyr .iter() .fold(String::with_capacity(96), |mut s, b| { use std::fmt::Write as _; - let _ = write!(s, "{b:02x}"); + // write! to String is infallible + write!(s, "{b:02x}").expect("write to String cannot fail"); s }); info!( diff --git a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs index dfe0aef4c2..5db205c68a 100644 --- a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs +++ b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs @@ -68,20 +68,12 @@ impl ChunkIngressServiceInner { &self, ingress_proof: IngressProof, ) -> Result<(), IngressProofError> { - // Check proof version is accepted by current config - match &ingress_proof { - IngressProof::V2(_) if !self.config.consensus.accept_kzg_ingress_proofs => { - return Err(IngressProofError::RejectedVersion( - "V2 proofs not accepted (accept_kzg_ingress_proofs = false)".into(), - )); - } - IngressProof::V1(_) if self.config.consensus.require_kzg_ingress_proofs => { - return Err(IngressProofError::RejectedVersion( - "V1 proofs rejected (require_kzg_ingress_proofs = true)".into(), - )); - } - _ => {} - } + ingress_proof + .check_version_accepted( + self.config.consensus.accept_kzg_ingress_proofs, + self.config.consensus.require_kzg_ingress_proofs, + ) + .map_err(|msg| IngressProofError::RejectedVersion(msg.into()))?; // Validate the proofs signature and basic details let data_root_val = ingress_proof.data_root(); @@ -178,8 +170,9 @@ impl ChunkIngressServiceInner { } }; - let min_anchor_height = latest_height - .saturating_sub(config.consensus.mempool.ingress_proof_anchor_expiry_depth as u64); + let min_anchor_height = latest_height.saturating_sub(u64::from( + config.consensus.mempool.ingress_proof_anchor_expiry_depth, + )); let too_old = anchor_height < min_anchor_height; diff --git a/crates/actors/src/mempool_service.rs b/crates/actors/src/mempool_service.rs index 20540faf2d..9337c595ed 100644 --- a/crates/actors/src/mempool_service.rs +++ b/crates/actors/src/mempool_service.rs @@ -452,12 +452,14 @@ impl Inner { ingress_proof: IngressProof, chunk_data: Vec, ) { - if matches!(&ingress_proof, IngressProof::V2(_)) - && !self.config.consensus.accept_kzg_ingress_proofs - { + if let Err(reason) = ingress_proof.check_version_accepted( + self.config.consensus.accept_kzg_ingress_proofs, + self.config.consensus.require_kzg_ingress_proofs, + ) { warn!( data_root = %tx_header.data_root, - "Dropping blob-derived tx: V2 proofs not accepted by config" + reason, + "Dropping blob-derived tx: proof version rejected by config" ); return; } diff --git a/crates/chain/src/chain.rs b/crates/chain/src/chain.rs index 96bab8ba7e..cf6f54b23f 100644 --- a/crates/chain/src/chain.rs +++ b/crates/chain/src/chain.rs @@ -1598,13 +1598,12 @@ impl IrysNode { )?; let mempool_facade = MempoolServiceFacadeImpl::from(&service_senders); - // Spawn blob extraction service (when blobs are enabled) if config.consensus.enable_blobs { let blob_store = reth_node_adapter.inner.pool.blob_store().clone(); BlobExtractionService::spawn_service( blob_store, - service_senders.mempool.clone(), - Arc::new(config.clone()), + service_senders.mempool.clone(), // clone: UnboundedSender is cheaply cloneable + config.clone(), // clone: Config is Arc-wrapped internally receivers.blob_extraction, runtime_handle.clone(), ); diff --git a/crates/database/src/database.rs b/crates/database/src/database.rs index a48efb8027..5e4e9fd1dd 100644 --- a/crates/database/src/database.rs +++ b/crates/database/src/database.rs @@ -6,14 +6,16 @@ use crate::db_cache::{ }; use crate::tables::{ CachedChunks, CachedChunksIndex, CachedDataRoots, CompactCachedIngressProof, - CompactLedgerIndexItem, IngressProofs, IrysBlockHeaders, IrysBlockIndexItems, IrysCommitments, - IrysDataTxHeaders, IrysPoAChunks, Metadata, MigratedBlockHashes, PeerListItems, + CompactLedgerIndexItem, CompactPerChunkCommitment, IngressProofs, IrysBlockHeaders, + IrysBlockIndexItems, IrysCommitments, IrysDataTxHeaders, IrysPoAChunks, Metadata, + MigratedBlockHashes, PeerListItems, PerChunkKzgCommitments, }; use crate::metadata::MetadataKey; use crate::reth_ext::IrysRethDatabaseEnvMetricsExt as _; use irys_types::ingress::CachedIngressProof; use irys_types::irys::IrysSigner; +use irys_types::kzg::{KzgCommitmentBytes, PerChunkCommitment}; use irys_types::{ BlockHash, BlockHeight, BlockIndexItem, ChunkPathHash, CommitmentTransaction, DataLedger, DataRoot, DataTransactionHeader, DatabaseProvider, H256, IngressProof, IrysAddress, @@ -733,6 +735,54 @@ pub fn database_schema_version(tx: &mut T) -> Result, Datab } } +pub fn get_peer_id(tx: &T) -> Result, DatabaseError> { + if let Some(bytes) = tx.get::(MetadataKey::PeerId)? { + let arr: [u8; 20] = bytes.as_slice().try_into().map_err(|_| { + DatabaseError::Other("PeerId metadata does not have exactly 20 bytes".to_string()) + })?; + + Ok(Some(IrysPeerId::from(arr))) + } else { + Ok(None) + } +} + +pub fn set_peer_id(tx: &T, peer_id: IrysPeerId) -> Result<(), DatabaseError> { + let bytes: [u8; 20] = peer_id.into(); + tx.put::(MetadataKey::PeerId, bytes.to_vec()) +} + +/// Store per-chunk KZG commitments for a data_root during V2 ingress proof generation. +pub fn store_per_chunk_kzg_commitments( + tx: &T, + data_root: DataRoot, + commitments: &[(u32, KzgCommitmentBytes)], +) -> eyre::Result<()> { + for &(chunk_index, commitment) in commitments { + tx.put::( + data_root, + CompactPerChunkCommitment(PerChunkCommitment { + chunk_index, + commitment, + }), + )?; + } + Ok(()) +} + +/// Retrieve a single per-chunk KZG commitment by data_root and chunk_index. +pub fn get_per_chunk_kzg_commitment( + tx: &T, + data_root: DataRoot, + chunk_index: u32, +) -> eyre::Result> { + let mut cursor = tx.cursor_dup_read::()?; + Ok(cursor + .seek_by_key_subkey(data_root, chunk_index)? + .filter(|e| e.chunk_index == chunk_index) + .map(|e| e.commitment)) +} + #[cfg(test)] mod tests { use arbitrary::Arbitrary as _; diff --git a/crates/database/src/tables.rs b/crates/database/src/tables.rs index 09767e7a53..096a6023bc 100644 --- a/crates/database/src/tables.rs +++ b/crates/database/src/tables.rs @@ -6,6 +6,7 @@ use crate::{ submodule::tables::ChunkPathHashes, }; use irys_types::ingress::CachedIngressProof; +use irys_types::kzg::PerChunkCommitment; use irys_types::{ Base64, BlockHeight, DataLedger, IrysAddress, IrysPeerId, LedgerIndexItem, PeerListItemInner, }; @@ -87,6 +88,7 @@ add_wrapper_struct!((LedgerIndexItem, CompactLedgerIndexItem)); add_wrapper_struct!((CommitmentTransactionMetadata, CompactCommitmentTxMetadata)); add_wrapper_struct!((DataTransactionMetadata, CompactDataTxMetadata)); add_wrapper_struct!((CachedIngressProof, CompactCachedIngressProof)); +add_wrapper_struct!((PerChunkCommitment, CompactPerChunkCommitment)); impl_compression_for_compact!( CompactIrysBlockHeader, @@ -103,6 +105,7 @@ impl_compression_for_compact!( CompactBase64, CompactCachedIngressProof, CompactLedgerIndexItem, + CompactPerChunkCommitment, CompactCommitmentTxMetadata, CompactDataTxMetadata ); @@ -211,6 +214,14 @@ table CachedChunks { type Value = CachedChunk; } +/// Per-chunk KZG commitments stored during V2 ingress proof generation. +/// Used for custody proof verification (KZG opening proofs). +table PerChunkKzgCommitments { + type Key = DataRoot; + type Value = CompactPerChunkCommitment; + type SubKey = u32; +} + /// Indexes ingress proofs by DataRoot and Address table IngressProofs { type Key = DataRoot; diff --git a/crates/irys-reth/src/evm.rs b/crates/irys-reth/src/evm.rs index 8ee5403dfd..f08748b7bf 100644 --- a/crates/irys-reth/src/evm.rs +++ b/crates/irys-reth/src/evm.rs @@ -1334,7 +1334,6 @@ mod tests { /// Ensure EVM layer rejects EIP-4844 blob-carrying transactions regardless of mempool filters. #[test] fn evm_rejects_eip4844_blob_fields_in_transact_raw() { - // Build minimal EVM env with Cancun spec enabled let factory = IrysEvmFactory::new(false); let mut cfg_env = CfgEnv::default(); cfg_env.spec = SpecId::CANCUN; diff --git a/crates/types/src/config/consensus.rs b/crates/types/src/config/consensus.rs index 6db7b6a5b9..c40e795ae8 100644 --- a/crates/types/src/config/consensus.rs +++ b/crates/types/src/config/consensus.rs @@ -143,6 +143,18 @@ pub struct ConsensusConfig { #[serde(default)] pub enable_blobs: bool, + /// Enable custody proofs via KZG opening. Requires `accept_kzg_ingress_proofs`. + #[serde(default)] + pub enable_custody_proofs: bool, + + /// Number of chunks challenged per custody proof (K). Default: 20. + #[serde(default = "default_custody_challenge_count")] + pub custody_challenge_count: u32, + + /// Number of blocks a miner has to respond to a custody challenge. Default: 10. + #[serde(default = "default_custody_response_window")] + pub custody_response_window: u64, + /// Target number of years data should be preserved on the network /// Determines long-term storage pricing and incentives pub safe_minimum_number_of_years: u64, @@ -212,6 +224,14 @@ fn default_disable_full_ingress_proof_validation() -> bool { false } +fn default_custody_challenge_count() -> u32 { + 20 +} + +fn default_custody_response_window() -> u64 { + 10 +} + /// # Consensus Configuration Source /// /// Specifies where the node should obtain its consensus rules from. @@ -502,6 +522,12 @@ impl ConsensusConfig { ); self.accept_kzg_ingress_proofs = true; } + if self.enable_custody_proofs && !self.accept_kzg_ingress_proofs { + tracing::warn!( + "enable_custody_proofs=true requires accept_kzg_ingress_proofs=true, auto-enabling" + ); + self.accept_kzg_ingress_proofs = true; + } } // 20TB, with ~10% overhead, aligned to the nearest recall range (400 chunks) @@ -671,6 +697,9 @@ impl ConsensusConfig { accept_kzg_ingress_proofs: false, require_kzg_ingress_proofs: false, enable_blobs: false, + enable_custody_proofs: false, + custody_challenge_count: 20, + custody_response_window: 10, // Fee required to stake a mining address in Irys tokens stake_value: Amount::token(dec!(400_000)).expect("valid token amount"), // Base fee required for pledging a partition in Irys tokens @@ -814,6 +843,9 @@ impl ConsensusConfig { accept_kzg_ingress_proofs: false, require_kzg_ingress_proofs: false, enable_blobs: false, + enable_custody_proofs: false, + custody_challenge_count: 20, + custody_response_window: 10, max_future_timestamp_drift_millis: 15_000, // Hardfork configuration - testnet uses 1 proof for easier testing hardforks: IrysHardforkConfig { @@ -869,6 +901,9 @@ impl ConsensusConfig { accept_kzg_ingress_proofs: false, require_kzg_ingress_proofs: false, enable_blobs: false, + enable_custody_proofs: false, + custody_challenge_count: 20, + custody_response_window: 10, max_future_timestamp_drift_millis: 15_000, genesis: GenesisConfig { @@ -1039,20 +1074,11 @@ mod tests { } #[test] - fn normalize_idempotent() { - let mut config = ConsensusConfig::testing(); - config.enable_blobs = true; - config.normalize(); - let snapshot_accept = config.accept_kzg_ingress_proofs; - config.normalize(); - assert_eq!(config.accept_kzg_ingress_proofs, snapshot_accept); - } - - #[test] - fn normalize_noop_when_consistent() { + fn normalize_custody_proofs_forces_accept_kzg() { let mut config = ConsensusConfig::testing(); - let before = config.accept_kzg_ingress_proofs; + config.enable_custody_proofs = true; + config.accept_kzg_ingress_proofs = false; config.normalize(); - assert_eq!(config.accept_kzg_ingress_proofs, before); + assert!(config.accept_kzg_ingress_proofs); } } diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index 0e7d3b48d0..9bee0a7304 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -120,6 +120,19 @@ impl IngressProof { } } + /// Check if this proof version is accepted by the given config flags. + pub fn check_version_accepted( + &self, + accept_kzg: bool, + require_kzg: bool, + ) -> Result<(), &'static str> { + match self { + Self::V2(_) if !accept_kzg => Err("V2 proofs not accepted"), + Self::V1(_) if require_kzg => Err("V1 proofs rejected (V2 required)"), + _ => Ok(()), + } + } + /// Returns the V1 merkle proof hash, or V2 composite commitment. /// Used as a unique proof identifier (e.g. for gossip deduplication). pub fn proof_id(&self) -> H256 { @@ -392,7 +405,7 @@ pub fn generate_ingress_proof_v2( chain_id: u64, anchor: H256, kzg_settings: &c_kzg::KzgSettings, -) -> eyre::Result { +) -> eyre::Result<(IngressProof, Vec)> { use crate::kzg::{ aggregate_all_commitments, compute_chunk_commitment, compute_composite_commitment, KzgCommitmentBytes, @@ -403,6 +416,17 @@ pub fn generate_ingress_proof_v2( .map(|chunk| compute_chunk_commitment(chunk.as_ref(), kzg_settings)) .collect::>>()?; + let per_chunk_bytes: Vec = chunk_commitments + .iter() + .map(|c| { + let bytes: [u8; 48] = c + .as_ref() + .try_into() + .map_err(|_| eyre::eyre!("KZG commitment is not 48 bytes"))?; + Ok(KzgCommitmentBytes::from(bytes)) + }) + .collect::>>()?; + let aggregated = aggregate_all_commitments(&chunk_commitments)?; let kzg_bytes: [u8; 48] = aggregated .as_ref() @@ -422,7 +446,7 @@ pub fn generate_ingress_proof_v2( }); signer.sign_ingress_proof(&mut proof)?; - Ok(proof) + Ok((proof, per_chunk_bytes)) } /// Generate a V2 ingress proof for blob-derived data (EIP-4844). @@ -449,12 +473,16 @@ pub fn generate_ingress_proof_v2_from_blob( let mut padded = vec![0_u8; CHUNK_SIZE_FOR_KZG]; padded[..blob_data.len()].copy_from_slice(blob_data); - let (leaves, _) = generate_ingress_leaves( + // Use regular leaves (without signer) for data_root — consistent with native V2 path + let (_, regular_leaves) = generate_ingress_leaves( std::iter::once(Ok(padded.as_slice())), signer.address(), - false, + true, + )?; + let root = generate_data_root( + regular_leaves + .ok_or_eyre("generate_ingress_leaves with and_regular=true must return Some")?, )?; - let root = generate_data_root(leaves)?; let composite = compute_composite_commitment(kzg_commitment, &signer.address()); @@ -533,6 +561,20 @@ pub fn verify_ingress_proof>( return Ok(false); } + // Verify data_root matches the merkle root of the provided chunks + let (_, regular_leaves) = generate_ingress_leaves( + chunks_vec.iter().map(|c| Ok(c.as_ref())), + recovered_address, + true, + )?; + let computed_root = generate_data_root( + regular_leaves + .ok_or_eyre("generate_ingress_leaves with and_regular=true must return Some")?, + )?; + if H256(computed_root.id) != v2.data_root { + return Ok(false); + } + let expected_composite = crate::kzg::compute_composite_commitment(&kzg_bytes, &recovered_address); Ok(expected_composite == v2.composite_commitment) @@ -743,127 +785,104 @@ mod tests { Ok(()) } - #[test] - fn v2_generate_and_verify_roundtrip() -> eyre::Result<()> { - let config = ConsensusConfig::testing(); - let chunk_size = config.chunk_size as usize; - let data_bytes = kzg_safe_data((chunk_size as f64 * 2.5).round() as usize, 42); - - let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; - let root = generate_data_root(leaves)?; - let data_root = H256(root.id); + fn test_chunk_size() -> usize { + usize::try_from(ConsensusConfig::testing().chunk_size).expect("chunk_size fits in usize") + } - let signer = IrysSigner::random_signer(&config); - let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); + struct V2TestSetup { + data_root: H256, + signer: IrysSigner, + chunks: Vec>, + chain_id: u64, + anchor: H256, + kzg_settings: &'static c_kzg::KzgSettings, + } - let chain_id = 1_u64; - let anchor = H256::random(); - let kzg_settings = crate::kzg::default_kzg_settings(); + impl V2TestSetup { + fn new(byte_count: usize) -> eyre::Result { + let config = ConsensusConfig::testing(); + let chunk_size = test_chunk_size(); + let data_bytes = kzg_safe_data(byte_count, 42); + let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; + let root = generate_data_root(leaves)?; + Ok(Self { + data_root: H256(root.id), + signer: IrysSigner::random_signer(&config), + chunks: data_bytes.chunks(chunk_size).map(Vec::from).collect(), + chain_id: 1, + anchor: H256::random(), + kzg_settings: crate::kzg::default_kzg_settings(), + }) + } - let proof = - generate_ingress_proof_v2(&signer, data_root, &chunks, chain_id, anchor, kzg_settings)?; + fn generate_proof(&self) -> eyre::Result { + let (proof, _per_chunk) = generate_ingress_proof_v2( + &self.signer, + self.data_root, + &self.chunks, + self.chain_id, + self.anchor, + self.kzg_settings, + )?; + Ok(proof) + } + } + #[test] + fn v2_generate_and_verify_roundtrip() -> eyre::Result<()> { + let cs = test_chunk_size(); + let s = V2TestSetup::new(cs * 5 / 2)?; + let proof = s.generate_proof()?; assert!(matches!(proof, IngressProof::V2(_))); - assert!(verify_ingress_proof(&proof, chunks.iter(), chain_id)?); - + assert!(verify_ingress_proof(&proof, s.chunks.iter(), s.chain_id)?); Ok(()) } #[test] fn v2_wrong_chunks_fails_verification() -> eyre::Result<()> { - let config = ConsensusConfig::testing(); - let chunk_size = config.chunk_size as usize; - let data_bytes = kzg_safe_data((chunk_size as f64 * 2.5).round() as usize, 42); - - let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; - let root = generate_data_root(leaves)?; - let data_root = H256(root.id); + let cs = test_chunk_size(); + let s = V2TestSetup::new(cs * 5 / 2)?; + let proof = s.generate_proof()?; - let signer = IrysSigner::random_signer(&config); - let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); - - let chain_id = 1_u64; - let anchor = H256::random(); - let kzg_settings = crate::kzg::default_kzg_settings(); - - let proof = - generate_ingress_proof_v2(&signer, data_root, &chunks, chain_id, anchor, kzg_settings)?; - - // Tampered chunk: use a different safe fill value - let mut bad_chunks = chunks.clone(); + let mut bad_chunks = s.chunks.clone(); // clone: need original for reversed test bad_chunks[0] = kzg_safe_data(bad_chunks[0].len(), 7); - assert!(!verify_ingress_proof(&proof, bad_chunks.iter(), chain_id)?); + assert!(!verify_ingress_proof( + &proof, + bad_chunks.iter(), + s.chain_id + )?); - // Reversed chunks should fail (only if >1 chunk) - if chunks.len() > 1 { - let mut reversed = chunks; + if s.chunks.len() > 1 { + let mut reversed = s.chunks; reversed.reverse(); - assert!(!verify_ingress_proof(&proof, reversed.iter(), chain_id)?); + assert!(!verify_ingress_proof(&proof, reversed.iter(), s.chain_id)?); } - Ok(()) } #[test] fn v2_wrong_chain_id_fails_verification() -> eyre::Result<()> { - let config = ConsensusConfig::testing(); - let chunk_size = config.chunk_size as usize; - let data_bytes = kzg_safe_data(chunk_size * 2, 42); - - let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; - let root = generate_data_root(leaves)?; - let data_root = H256(root.id); - - let signer = IrysSigner::random_signer(&config); - let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); - - let anchor = H256::random(); - let kzg_settings = crate::kzg::default_kzg_settings(); - - let proof = - generate_ingress_proof_v2(&signer, data_root, &chunks, 1, anchor, kzg_settings)?; - - assert!(!verify_ingress_proof(&proof, chunks.iter(), 2)?); - + let s = V2TestSetup::new(test_chunk_size() * 2)?; + let proof = s.generate_proof()?; + assert!(!verify_ingress_proof(&proof, s.chunks.iter(), 2)?); Ok(()) } #[test] fn v2_composite_commitment_binds_to_signer() -> eyre::Result<()> { - let config = ConsensusConfig::testing(); - let chunk_size = config.chunk_size as usize; - let data_bytes = kzg_safe_data(chunk_size * 2, 42); + let s = V2TestSetup::new(test_chunk_size() * 2)?; + let signer_b = IrysSigner::random_signer(&ConsensusConfig::testing()); - let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; - let root = generate_data_root(leaves)?; - let data_root = H256(root.id); - - let signer_a = IrysSigner::random_signer(&config); - let signer_b = IrysSigner::random_signer(&config); - let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); - - let chain_id = 1_u64; - let anchor = H256::random(); - let kzg_settings = crate::kzg::default_kzg_settings(); - - let proof_a = generate_ingress_proof_v2( - &signer_a, - data_root, - &chunks, - chain_id, - anchor, - kzg_settings, - )?; - let proof_b = generate_ingress_proof_v2( + let proof_a = s.generate_proof()?; + let (proof_b, _) = generate_ingress_proof_v2( &signer_b, - data_root, - &chunks, - chain_id, - anchor, - kzg_settings, + s.data_root, + &s.chunks, + s.chain_id, + s.anchor, + s.kzg_settings, )?; - // Same data → same KZG commitment, but different composite commitments let (kzg_a, composite_a) = match &proof_a { IngressProof::V2(v2) => (v2.kzg_commitment, v2.composite_commitment), _ => unreachable!(), @@ -875,10 +894,8 @@ mod tests { assert_eq!(kzg_a, kzg_b); assert_ne!(composite_a, composite_b); - - assert!(verify_ingress_proof(&proof_a, chunks.iter(), chain_id)?); - assert!(verify_ingress_proof(&proof_b, chunks.iter(), chain_id)?); - + assert!(verify_ingress_proof(&proof_a, s.chunks.iter(), s.chain_id)?); + assert!(verify_ingress_proof(&proof_b, s.chunks.iter(), s.chain_id)?); Ok(()) } @@ -886,26 +903,8 @@ mod tests { fn v2_rlp_roundtrip() -> eyre::Result<()> { use bytes::BytesMut; - let config = ConsensusConfig::testing(); - let chunk_size = config.chunk_size as usize; - let data_bytes = kzg_safe_data(chunk_size, 42); - - let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; - let root = generate_data_root(leaves)?; - let data_root = H256(root.id); - - let signer = IrysSigner::random_signer(&config); - let chunks: Vec> = vec![data_bytes]; - let kzg_settings = crate::kzg::default_kzg_settings(); - - let original = generate_ingress_proof_v2( - &signer, - data_root, - &chunks, - 42, - H256::random(), - kzg_settings, - )?; + let s = V2TestSetup::new(test_chunk_size())?; + let original = s.generate_proof()?; let mut buf = BytesMut::new(); alloy_rlp::Encodable::encode(&original, &mut buf); @@ -923,39 +922,18 @@ mod tests { } _ => panic!("expected V2 proofs"), } - Ok(()) } #[test] fn v2_tampered_kzg_commitment_fails() -> eyre::Result<()> { - let config = ConsensusConfig::testing(); - let chunk_size = config.chunk_size as usize; - let data_bytes = kzg_safe_data(chunk_size * 2, 42); - - let leaves = generate_leaves(vec![data_bytes.clone()].into_iter().map(Ok), chunk_size)?; - let root = generate_data_root(leaves)?; - let data_root = H256(root.id); - - let signer = IrysSigner::random_signer(&config); - let chunks: Vec> = data_bytes.chunks(chunk_size).map(Vec::from).collect(); - - let kzg_settings = crate::kzg::default_kzg_settings(); - let mut proof = generate_ingress_proof_v2( - &signer, - data_root, - &chunks, - 1, - H256::random(), - kzg_settings, - )?; + let s = V2TestSetup::new(test_chunk_size() * 2)?; + let mut proof = s.generate_proof()?; if let IngressProof::V2(ref mut v2) = proof { v2.kzg_commitment.0[0] ^= 0xFF; } - - assert!(!verify_ingress_proof(&proof, chunks.iter(), 1)?); - + assert!(!verify_ingress_proof(&proof, s.chunks.iter(), s.chain_id)?); Ok(()) } @@ -1004,35 +982,6 @@ mod tests { Ok(()) } - #[test] - fn v2_blob_sidecar_commitment_preserved() -> eyre::Result<()> { - let config = ConsensusConfig::testing(); - let signer = IrysSigner::random_signer(&config); - - let blob_data = kzg_safe_data(131_072, 55); - - // Compute commitment from the blob data - let kzg_settings = crate::kzg::default_kzg_settings(); - let kzg_commitment = crate::kzg::compute_chunk_commitment(&blob_data, kzg_settings)?; - let commitment_bytes: [u8; 48] = kzg_commitment.as_ref().try_into().unwrap(); - - let proof = generate_ingress_proof_v2_from_blob( - &signer, - &blob_data, - &commitment_bytes, - 1, - H256::random(), - )?; - - // The KZG commitment in the proof must be exactly the one we provided - match &proof { - IngressProof::V2(v2) => assert_eq!(v2.kzg_commitment.0, commitment_bytes), - _ => panic!("expected V2 proof"), - } - - Ok(()) - } - #[test] fn v2_blob_wrong_data_fails_verification() -> eyre::Result<()> { let config = ConsensusConfig::testing(); diff --git a/crates/types/src/kzg.rs b/crates/types/src/kzg.rs index 5a88fc499c..211c68384f 100644 --- a/crates/types/src/kzg.rs +++ b/crates/types/src/kzg.rs @@ -6,9 +6,11 @@ use openssl::sha; use reth_codecs::Compact; use serde::{Deserialize, Serialize}; -pub const BLOB_SIZE: usize = 131_072; // 128KB = 4096 * 32 bytes -pub const CHUNK_SIZE_FOR_KZG: usize = 262_144; // 256KB = 2 * BLOB_SIZE -pub const COMMITMENT_SIZE: usize = 48; // Compressed G1 point +pub const BLOB_SIZE: usize = 131_072; +pub const CHUNK_SIZE_FOR_KZG: usize = 2 * BLOB_SIZE; +pub const COMMITMENT_SIZE: usize = 48; +pub const PROOF_SIZE: usize = 48; +pub const SCALAR_SIZE: usize = 32; pub const DOMAIN_SEPARATOR: &[u8] = b"IRYS_KZG_INGRESS_V1"; /// A 48-byte KZG commitment (compressed BLS12-381 G1 point). @@ -62,8 +64,10 @@ impl From for [u8; COMMITMENT_SIZE] { impl Serialize for KzgCommitmentBytes { fn serialize(&self, serializer: S) -> Result { if serializer.is_human_readable() { - let hex = alloy_primitives::hex::encode(self.0); - serializer.serialize_str(&format!("0x{hex}")) + let mut s = String::with_capacity(2 + COMMITMENT_SIZE * 2); + s.push_str("0x"); + s.push_str(&alloy_primitives::hex::encode(self.0)); + serializer.serialize_str(&s) } else { serializer.serialize_bytes(&self.0) } @@ -72,26 +76,22 @@ impl Serialize for KzgCommitmentBytes { impl<'de> Deserialize<'de> for KzgCommitmentBytes { fn deserialize>(deserializer: D) -> Result { + fn bytes_to_commitment( + bytes: Vec, + ) -> Result<[u8; COMMITMENT_SIZE], E> { + bytes.try_into().map_err(|v: Vec| { + E::custom(format!("expected {COMMITMENT_SIZE} bytes, got {}", v.len())) + }) + } + if deserializer.is_human_readable() { let s = String::deserialize(deserializer)?; let s = s.strip_prefix("0x").unwrap_or(&s); let bytes = alloy_primitives::hex::decode(s).map_err(serde::de::Error::custom)?; - let arr: [u8; COMMITMENT_SIZE] = bytes.try_into().map_err(|v: Vec| { - serde::de::Error::custom(format!( - "expected {COMMITMENT_SIZE} bytes, got {}", - v.len() - )) - })?; - Ok(Self(arr)) + Ok(Self(bytes_to_commitment::(bytes)?)) } else { let bytes = >::deserialize(deserializer)?; - let arr: [u8; COMMITMENT_SIZE] = bytes.try_into().map_err(|v: Vec| { - serde::de::Error::custom(format!( - "expected {COMMITMENT_SIZE} bytes, got {}", - v.len() - )) - })?; - Ok(Self(arr)) + Ok(Self(bytes_to_commitment::(bytes)?)) } } } @@ -131,6 +131,23 @@ impl alloy_rlp::Decodable for KzgCommitmentBytes { } } +/// A single chunk's KZG commitment stored during ingress. +/// Maps (data_root, chunk_index) → KzgCommitmentBytes in the database. +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, Compact)] +pub struct PerChunkCommitment { + pub chunk_index: u32, + pub commitment: KzgCommitmentBytes, +} + +impl arbitrary::Arbitrary<'_> for PerChunkCommitment { + fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { + Ok(Self { + chunk_index: u.arbitrary()?, + commitment: u.arbitrary()?, + }) + } +} + /// Returns a reference to the default (Ethereum mainnet) trusted setup KZG settings. /// Lazily initialized on first call, thread-safe. pub fn default_kzg_settings() -> &'static KzgSettings { @@ -163,46 +180,49 @@ pub fn aggregate_commitments( use blst::min_pk::PublicKey; use blst::{blst_p1, blst_p1_affine, blst_scalar}; - // Compute random challenge: r = SHA256(C1 || C2) let mut hasher = sha::Sha256::new(); hasher.update(c1.as_ref()); hasher.update(c2.as_ref()); let r_bytes = hasher.finish(); - // Convert r to blst scalar (big-endian input) let mut r_scalar = blst_scalar::default(); + // SAFETY: `r_bytes` is a 32-byte SHA256 digest; `blst_scalar_from_bendian` reads + // exactly 32 bytes from the pointer, which is within bounds. unsafe { blst::blst_scalar_from_bendian(&mut r_scalar, r_bytes.as_ptr()); } - // Decompress C1 and C2 from their 48-byte compressed G1 representations let p1 = PublicKey::from_bytes(c1.as_ref()) .map_err(|e| eyre::eyre!("failed to decompress C1: {e:?}"))?; let p2 = PublicKey::from_bytes(c2.as_ref()) .map_err(|e| eyre::eyre!("failed to decompress C2: {e:?}"))?; - // Get affine points via From trait let p1_affine: &blst_p1_affine = (&p1).into(); let p2_affine: &blst_p1_affine = (&p2).into(); - // Convert C2 to projective, then compute r·C2 let mut p2_proj = blst_p1::default(); let mut r_c2 = blst_p1::default(); + // SAFETY: All blst_p1 types are initialized via `default()`. `blst_p1_from_affine` + // converts a valid affine point (from `PublicKey::from_bytes` which validated the + // curve point) to projective form. `blst_p1_mult` multiplies a valid projective + // point by a 256-bit scalar — both inputs are well-formed. unsafe { blst::blst_p1_from_affine(&mut p2_proj, p2_affine); blst::blst_p1_mult(&mut r_c2, &p2_proj, r_scalar.b.as_ptr(), 256); } - // Compute C1 + r·C2 (using affine + projective variant) let mut result = blst_p1::default(); + // SAFETY: `c1_proj` is initialised from a validated affine point. `r_c2` is the + // result of a valid scalar multiplication. `blst_p1_add` adds two projective points. unsafe { let mut c1_proj = blst_p1::default(); blst::blst_p1_from_affine(&mut c1_proj, p1_affine); blst::blst_p1_add(&mut result, &c1_proj, &r_c2); } - // Compress back to 48-byte representation let mut compressed = [0_u8; COMMITMENT_SIZE]; + // SAFETY: `result` is a valid projective G1 point from the addition above. + // `compressed` is a 48-byte buffer matching the compressed G1 point size. unsafe { blst::blst_p1_compress(compressed.as_mut_ptr(), &result); } @@ -232,7 +252,6 @@ pub fn compute_chunk_commitment( let mut padded = [0_u8; CHUNK_SIZE_FOR_KZG]; padded[..chunk_data.len()].copy_from_slice(chunk_data); - // Split into two 128KB halves let (first_half, second_half) = padded.split_at(BLOB_SIZE); let first_half: &[u8; BLOB_SIZE] = first_half .try_into() @@ -282,6 +301,231 @@ pub fn compute_composite_commitment( H256(hasher.finish()) } +// --------------------------------------------------------------------------- +// BLS12-381 scalar field (Fr) arithmetic helpers +// --------------------------------------------------------------------------- + +/// Convert 32 big-endian bytes into a BLS12-381 scalar field element. +/// The input is automatically reduced modulo the scalar field order. +fn fr_from_bytes(bytes: &[u8; SCALAR_SIZE]) -> blst::blst_fr { + let mut scalar = blst::blst_scalar::default(); + let mut fr = blst::blst_fr::default(); + // SAFETY: `bytes` is exactly 32 bytes; `blst_scalar_from_bendian` reads 32 bytes + // from the pointer. `blst_fr_from_scalar` reduces modulo the field order. + unsafe { + blst::blst_scalar_from_bendian(&mut scalar, bytes.as_ptr()); + blst::blst_fr_from_scalar(&mut fr, &scalar); + } + fr +} + +/// Convert a BLS12-381 scalar field element back to 32 big-endian bytes. +fn fr_to_bytes(fr: &blst::blst_fr) -> [u8; SCALAR_SIZE] { + let mut scalar = blst::blst_scalar::default(); + let mut bytes = [0_u8; SCALAR_SIZE]; + // SAFETY: `blst_scalar_from_fr` writes a valid scalar from the field element. + // `blst_bendian_from_scalar` writes exactly 32 bytes. + unsafe { + blst::blst_scalar_from_fr(&mut scalar, fr); + blst::blst_bendian_from_scalar(bytes.as_mut_ptr(), &scalar); + } + bytes +} + +/// Add two BLS12-381 scalars (mod field order). Inputs/outputs are big-endian. +pub fn bls_fr_add(a: &[u8; SCALAR_SIZE], b: &[u8; SCALAR_SIZE]) -> [u8; SCALAR_SIZE] { + let fr_a = fr_from_bytes(a); + let fr_b = fr_from_bytes(b); + let mut result = blst::blst_fr::default(); + // SAFETY: All `blst_fr` values are initialized. `blst_fr_add` computes + // the modular sum of two valid field elements. + unsafe { + blst::blst_fr_add(&mut result, &fr_a, &fr_b); + } + fr_to_bytes(&result) +} + +/// Multiply two BLS12-381 scalars (mod field order). Inputs/outputs are big-endian. +pub fn bls_fr_mul(a: &[u8; SCALAR_SIZE], b: &[u8; SCALAR_SIZE]) -> [u8; SCALAR_SIZE] { + let fr_a = fr_from_bytes(a); + let fr_b = fr_from_bytes(b); + let mut result = blst::blst_fr::default(); + // SAFETY: Both `blst_fr` values are initialized. `blst_fr_mul` computes + // the modular product of two valid field elements. + unsafe { + blst::blst_fr_mul(&mut result, &fr_a, &fr_b); + } + fr_to_bytes(&result) +} + +/// Compute P1 + scalar·P2 for two compressed BLS12-381 G1 points. +/// +/// Both `p1_bytes` and `p2_bytes` are 48-byte compressed G1 points. +/// `scalar_bytes` is a 32-byte big-endian scalar. +pub fn g1_add_scaled( + p1_bytes: &[u8; PROOF_SIZE], + p2_bytes: &[u8; PROOF_SIZE], + scalar_bytes: &[u8; SCALAR_SIZE], +) -> eyre::Result<[u8; PROOF_SIZE]> { + use blst::min_pk::PublicKey; + use blst::{blst_p1, blst_p1_affine, blst_scalar}; + + let mut r_scalar = blst_scalar::default(); + // SAFETY: `scalar_bytes` is exactly 32 bytes. + unsafe { + blst::blst_scalar_from_bendian(&mut r_scalar, scalar_bytes.as_ptr()); + } + + let p1 = PublicKey::from_bytes(p1_bytes) + .map_err(|e| eyre::eyre!("failed to decompress P1: {e:?}"))?; + let p2 = PublicKey::from_bytes(p2_bytes) + .map_err(|e| eyre::eyre!("failed to decompress P2: {e:?}"))?; + + let p1_affine: &blst_p1_affine = (&p1).into(); + let p2_affine: &blst_p1_affine = (&p2).into(); + + let mut p2_proj = blst_p1::default(); + let mut r_p2 = blst_p1::default(); + // SAFETY: All blst_p1 types are initialized via `default()`. `blst_p1_from_affine` + // converts a validated affine point to projective. `blst_p1_mult` multiplies a + // valid projective point by a 256-bit scalar. + unsafe { + blst::blst_p1_from_affine(&mut p2_proj, p2_affine); + blst::blst_p1_mult(&mut r_p2, &p2_proj, r_scalar.b.as_ptr(), 256); + } + + let mut result = blst_p1::default(); + // SAFETY: Both projective points are valid (from validated affine points + // and scalar multiplication). `blst_p1_add` adds two projective points. + unsafe { + let mut p1_proj = blst_p1::default(); + blst::blst_p1_from_affine(&mut p1_proj, p1_affine); + blst::blst_p1_add(&mut result, &p1_proj, &r_p2); + } + + let mut compressed = [0_u8; PROOF_SIZE]; + // SAFETY: `result` is a valid projective G1 point. `compressed` is a 48-byte + // buffer matching compressed G1 point size. + unsafe { + blst::blst_p1_compress(compressed.as_mut_ptr(), &result); + } + + Ok(compressed) +} + +// --------------------------------------------------------------------------- +// KZG opening proof functions +// --------------------------------------------------------------------------- + +/// Compute a KZG opening proof for a 256KB chunk at evaluation point `z`. +/// +/// Splits the chunk into two 128KB halves (same scheme as `compute_chunk_commitment`), +/// computes per-half KZG proofs, then aggregates: +/// - `π = π1 + r·π2` (G1 point addition) +/// - `y = y1 + r·y2` (scalar field addition) +/// where `r = SHA256(C1 || C2)` and `C1, C2` are the per-half commitments. +/// +/// Returns `(proof_bytes, evaluation_bytes)` = (π, y). +pub fn compute_chunk_opening_proof( + chunk_data: &[u8], + z_bytes: &[u8; SCALAR_SIZE], + settings: &KzgSettings, +) -> eyre::Result<([u8; PROOF_SIZE], [u8; SCALAR_SIZE])> { + if chunk_data.len() > CHUNK_SIZE_FOR_KZG { + return Err(eyre::eyre!( + "chunk data too large: {} bytes (max {})", + chunk_data.len(), + CHUNK_SIZE_FOR_KZG + )); + } + + let mut padded = [0_u8; CHUNK_SIZE_FOR_KZG]; + padded[..chunk_data.len()].copy_from_slice(chunk_data); + + let (first_half, second_half) = padded.split_at(BLOB_SIZE); + let first_half: &[u8; BLOB_SIZE] = first_half + .try_into() + .expect("split_at guarantees BLOB_SIZE"); + let second_half: &[u8; BLOB_SIZE] = second_half + .try_into() + .expect("split_at guarantees BLOB_SIZE"); + + let blob1 = Blob::new(*first_half); + let blob2 = Blob::new(*second_half); + + // Per-half commitments needed for aggregation scalar r + let c1 = compute_blob_commitment(first_half, settings)?; + let c2 = compute_blob_commitment(second_half, settings)?; + + // r = SHA256(C1 || C2) — same derivation as aggregate_commitments + let mut hasher = sha::Sha256::new(); + hasher.update(c1.as_ref()); + hasher.update(c2.as_ref()); + let r_bytes = hasher.finish(); + + // Compute KZG opening proofs for each half + let z = c_kzg::Bytes32::new(*z_bytes); + let (proof1, y1) = settings + .compute_kzg_proof(&blob1, &z) + .map_err(|e| eyre::eyre!("KZG proof computation failed for first half: {e}"))?; + let (proof2, y2) = settings + .compute_kzg_proof(&blob2, &z) + .map_err(|e| eyre::eyre!("KZG proof computation failed for second half: {e}"))?; + + // Aggregate proof: π = π1 + r·π2 + let proof1_bytes: [u8; PROOF_SIZE] = *proof1.to_bytes().as_ref(); + let proof2_bytes: [u8; PROOF_SIZE] = *proof2.to_bytes().as_ref(); + let aggregated_proof = g1_add_scaled(&proof1_bytes, &proof2_bytes, &r_bytes)?; + + // Aggregate evaluation: y = y1 + r·y2 + let y1_bytes: [u8; SCALAR_SIZE] = *y1.as_ref(); + let y2_bytes: [u8; SCALAR_SIZE] = *y2.as_ref(); + let r_y2 = bls_fr_mul(&y2_bytes, &r_bytes); + let aggregated_y = bls_fr_add(&y1_bytes, &r_y2); + + Ok((aggregated_proof, aggregated_y)) +} + +/// Verify a KZG opening proof against a commitment. +/// +/// Checks that `p(z) = y` using the provided proof, where `p` is the polynomial +/// committed to by `commitment`. +pub fn verify_chunk_opening_proof( + commitment: &KzgCommitmentBytes, + z_bytes: &[u8; SCALAR_SIZE], + y_bytes: &[u8; SCALAR_SIZE], + proof_bytes: &[u8; PROOF_SIZE], + settings: &KzgSettings, +) -> eyre::Result { + let commitment_48 = c_kzg::Bytes48::new(commitment.0); + let z = c_kzg::Bytes32::new(*z_bytes); + let y = c_kzg::Bytes32::new(*y_bytes); + let proof_48 = c_kzg::Bytes48::new(*proof_bytes); + + settings + .verify_kzg_proof(&commitment_48, &z, &y, &proof_48) + .map_err(|e| eyre::eyre!("KZG proof verification failed: {e}")) +} + +// --------------------------------------------------------------------------- +// Challenge point derivation +// --------------------------------------------------------------------------- + +/// Derive a BLS12-381 field element from a challenge seed and chunk offset. +/// +/// Used as the evaluation point `z` for custody opening proofs. +/// Result is `SHA256(challenge_seed || chunk_offset_le)` reduced modulo +/// the BLS12-381 scalar field order. +pub fn derive_challenge_point(challenge_seed: &H256, chunk_offset: u32) -> [u8; SCALAR_SIZE] { + let mut hasher = sha::Sha256::new(); + hasher.update(&challenge_seed.0); + hasher.update(&chunk_offset.to_le_bytes()); + let hash = hasher.finish(); + + // Reduce modulo BLS12-381 scalar field order via blst + fr_to_bytes(&fr_from_bytes(&hash)) +} + #[cfg(test)] mod tests { use super::*; @@ -322,19 +566,6 @@ mod tests { .expect("commitment should be a valid G1 point"); } - #[test] - fn partial_chunk_zero_padded() { - let small_data = vec![42_u8; 1000]; - let commitment = compute_chunk_commitment(&small_data, kzg_settings()).unwrap(); - assert_eq!(commitment.as_ref().len(), COMMITMENT_SIZE); - } - - #[test] - fn empty_chunk_produces_valid_commitment() { - let commitment = compute_chunk_commitment(&[], kzg_settings()).unwrap(); - assert_eq!(commitment.as_ref().len(), COMMITMENT_SIZE); - } - #[test] fn oversized_chunk_rejected() { let oversized = vec![0_u8; CHUNK_SIZE_FOR_KZG + 1]; @@ -423,5 +654,165 @@ mod tests { let c2 = compute_chunk_commitment(&data_b, kzg_settings()).unwrap(); prop_assert_ne!(commitment_bytes(&c1), commitment_bytes(&c2)); } + + #[test] + fn opening_proof_roundtrip(seed in 0_u8..MAX_VALID_SEED) { + let data = vec![seed; CHUNK_SIZE_FOR_KZG]; + let settings = kzg_settings(); + let commitment = compute_chunk_commitment(&data, settings).unwrap(); + let commitment_bytes_val = KzgCommitmentBytes::from( + <[u8; COMMITMENT_SIZE]>::try_from(commitment.as_ref()).unwrap(), + ); + + let z = derive_challenge_point(&H256::from([seed; 32]), 0); + let (proof, y) = compute_chunk_opening_proof(&data, &z, settings).unwrap(); + let ok = verify_chunk_opening_proof( + &commitment_bytes_val, &z, &y, &proof, settings, + ).unwrap(); + prop_assert!(ok); + } + } + + // -- BLS scalar field arithmetic ------------------------------------------- + + #[test] + fn bls_fr_add_identity() { + let zero = [0_u8; SCALAR_SIZE]; + let a = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 42, + ]; + assert_eq!(bls_fr_add(&a, &zero), a); + } + + #[test] + fn bls_fr_mul_identity() { + let one = { + let mut b = [0_u8; SCALAR_SIZE]; + b[SCALAR_SIZE - 1] = 1; + b + }; + let a = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 42, + ]; + assert_eq!(bls_fr_mul(&a, &one), a); + } + + #[test] + fn bls_fr_mul_zero() { + let zero = [0_u8; SCALAR_SIZE]; + let a = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 42, + ]; + assert_eq!(bls_fr_mul(&a, &zero), zero); + } + + #[test] + fn g1_add_scaled_valid_points() { + let data1 = [1_u8; BLOB_SIZE]; + let data2 = [2_u8; BLOB_SIZE]; + let c1 = compute_blob_commitment(&data1, kzg_settings()).unwrap(); + let c2 = compute_blob_commitment(&data2, kzg_settings()).unwrap(); + let p1: [u8; PROOF_SIZE] = c1.as_ref().try_into().unwrap(); + let p2: [u8; PROOF_SIZE] = c2.as_ref().try_into().unwrap(); + let scalar = { + let mut s = [0_u8; SCALAR_SIZE]; + s[SCALAR_SIZE - 1] = 1; + s + }; + let result = g1_add_scaled(&p1, &p2, &scalar).unwrap(); + // p1 + 1*p2 should be a valid G1 point + blst::min_pk::PublicKey::from_bytes(&result).expect("result should be a valid G1 point"); + } + + // -- Opening proof tests --------------------------------------------------- + + #[test] + fn opening_proof_wrong_data_fails() { + let data = vec![42_u8; CHUNK_SIZE_FOR_KZG]; + let settings = kzg_settings(); + let commitment = compute_chunk_commitment(&data, settings).unwrap(); + let commitment_bytes_val = KzgCommitmentBytes::from( + <[u8; COMMITMENT_SIZE]>::try_from(commitment.as_ref()).unwrap(), + ); + + let z = derive_challenge_point(&H256::from([1_u8; 32]), 0); + let (_proof, _y) = compute_chunk_opening_proof(&data, &z, settings).unwrap(); + + // Compute proof for different data + let bad_data = vec![7_u8; CHUNK_SIZE_FOR_KZG]; + let (bad_proof, bad_y) = compute_chunk_opening_proof(&bad_data, &z, settings).unwrap(); + + // Verify with original commitment but bad proof/y — should fail + let ok = + verify_chunk_opening_proof(&commitment_bytes_val, &z, &bad_y, &bad_proof, settings) + .unwrap(); + assert!(!ok); + } + + #[test] + fn opening_proof_wrong_z_fails() { + // Non-constant data: vary each 32-byte field element so the + // polynomial is non-trivial and p(z1) != p(z2). + let mut data = vec![0_u8; CHUNK_SIZE_FOR_KZG]; + for (i, chunk) in data.chunks_mut(SCALAR_SIZE).enumerate() { + let val = u8::try_from(i % usize::from(MAX_VALID_SEED)).unwrap_or(0); + chunk[1] = val; // byte 0 stays 0 (< 0x74), byte 1 varies + } + + let settings = kzg_settings(); + let commitment = compute_chunk_commitment(&data, settings).unwrap(); + let commitment_bytes_val = KzgCommitmentBytes::from( + <[u8; COMMITMENT_SIZE]>::try_from(commitment.as_ref()).unwrap(), + ); + + let z1 = derive_challenge_point(&H256::from([1_u8; 32]), 0); + let (proof, y) = compute_chunk_opening_proof(&data, &z1, settings).unwrap(); + + // Verify with a different z — should fail + let z2 = derive_challenge_point(&H256::from([2_u8; 32]), 0); + let ok = + verify_chunk_opening_proof(&commitment_bytes_val, &z2, &y, &proof, settings).unwrap(); + assert!(!ok); + } + + // -- Challenge point derivation tests -------------------------------------- + + #[test] + fn derive_challenge_point_deterministic() { + let seed = H256::from([42_u8; 32]); + let z1 = derive_challenge_point(&seed, 0); + let z2 = derive_challenge_point(&seed, 0); + assert_eq!(z1, z2); + } + + #[test] + fn derive_challenge_point_different_offsets() { + let seed = H256::from([42_u8; 32]); + let z0 = derive_challenge_point(&seed, 0); + let z1 = derive_challenge_point(&seed, 1); + assert_ne!(z0, z1); + } + + #[test] + fn derive_challenge_point_different_seeds() { + let z1 = derive_challenge_point(&H256::from([1_u8; 32]), 0); + let z2 = derive_challenge_point(&H256::from([2_u8; 32]), 0); + assert_ne!(z1, z2); + } + + #[test] + fn derive_challenge_point_valid_field_element() { + // BLS12-381 scalar field order (big-endian) + let bls_order: [u8; 32] = [ + 0x73, 0xed, 0xa7, 0x53, 0x29, 0x9d, 0x7d, 0x48, 0x33, 0x39, 0xd8, 0x08, 0x09, 0xa1, + 0xd8, 0x05, 0x53, 0xbd, 0xa4, 0x02, 0xff, 0xfe, 0x5b, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0x00, 0x00, 0x00, 0x01, + ]; + let z = derive_challenge_point(&H256::from([0xff_u8; 32]), 0); + // z must be strictly less than the field order + assert!(z < bls_order); } } From b0eea5be329370beabfcb2978328416f1c0f4eeb Mon Sep 17 00:00:00 2001 From: jason Date: Mon, 23 Feb 2026 16:20:32 +0000 Subject: [PATCH 09/13] feat(custody): add custody proof types, shadow tx, and block validation --- crates/actors/src/block_validation.rs | 59 +++++ crates/irys-reth/src/evm.rs | 24 +- crates/irys-reth/src/shadow_tx.rs | 107 +++++++++ crates/types/src/config/mod.rs | 3 + crates/types/src/custody.rs | 333 ++++++++++++++++++++++++++ crates/types/src/lib.rs | 1 + 6 files changed, 526 insertions(+), 1 deletion(-) create mode 100644 crates/types/src/custody.rs diff --git a/crates/actors/src/block_validation.rs b/crates/actors/src/block_validation.rs index fca3f9bedb..6396f5fe33 100644 --- a/crates/actors/src/block_validation.rs +++ b/crates/actors/src/block_validation.rs @@ -3062,6 +3062,65 @@ fn get_submit_ledger_slot_addresses( num_addresses_per_slot } +/// Verify custody proofs included in a block. +/// +/// Returns `Ok(())` if all proofs are valid or custody proofs are disabled. +/// This function is not yet wired into `validate_block()` — that happens once +/// blocks carry custody proofs via gossip (Phase 3). +pub fn validate_custody_proofs( + custody_proofs: &[irys_types::custody::CustodyProof], + consensus_config: &ConsensusConfig, + db: &DatabaseProvider, +) -> eyre::Result<()> { + if !consensus_config.enable_custody_proofs { + return Ok(()); + } + + let kzg_settings = irys_types::kzg::default_kzg_settings(); + let tx = db.tx()?; + for proof in custody_proofs { + let result = irys_types::custody::verify_custody_proof( + proof, + |data_root, chunk_index| { + irys_database::get_per_chunk_kzg_commitment(&tx, data_root, chunk_index) + }, + kzg_settings, + consensus_config.custody_challenge_count, + )?; + + match result { + irys_types::custody::CustodyVerificationResult::Valid => {} + irys_types::custody::CustodyVerificationResult::InvalidOpeningCount { + expected, + got, + } => { + eyre::bail!( + "custody proof for miner {:?} partition {:?}: expected {expected} openings, got {got}", + proof.challenged_miner, + proof.partition_hash, + ); + } + irys_types::custody::CustodyVerificationResult::MissingCommitment { + data_root, + chunk_index, + } => { + eyre::bail!( + "custody proof for miner {:?}: missing commitment for data_root={data_root:?} chunk_index={chunk_index}", + proof.challenged_miner, + ); + } + irys_types::custody::CustodyVerificationResult::InvalidProof { chunk_offset } => { + eyre::bail!( + "custody proof for miner {:?}: invalid KZG opening at chunk_offset={chunk_offset}", + proof.challenged_miner, + ); + } + } + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/irys-reth/src/evm.rs b/crates/irys-reth/src/evm.rs index f08748b7bf..d157f6f521 100644 --- a/crates/irys-reth/src/evm.rs +++ b/crates/irys-reth/src/evm.rs @@ -1184,7 +1184,6 @@ where )) } shadow_tx::TransactionPacket::UpdateRewardAddress(update_reward_address_debit) => { - // Fee-only via priority fee (already processed). Emit a log only. let log = Self::create_shadow_log( update_reward_address_debit.target, vec![topic], @@ -1197,6 +1196,29 @@ where let execution_result = Self::create_success_result(log); Ok((Err(execution_result), target)) } + shadow_tx::TransactionPacket::CustodyPenalty(penalty) => { + let log = Self::create_shadow_log( + penalty.target, + vec![topic], + vec![ + DynSolValue::Uint(penalty.amount, 256), + DynSolValue::Address(penalty.target), + ], + ); + let target = penalty.target; + let balance_decrement = shadow_tx::BalanceDecrement { + amount: penalty.amount, + target: penalty.target, + irys_ref: penalty.partition_hash, + }; + let res = self.handle_balance_decrement(log, tx_hash, &balance_decrement)?; + Ok(( + res.map(|(plain_account, execution_result)| { + (plain_account, execution_result, true) + }), + target, + )) + } }, } } diff --git a/crates/irys-reth/src/shadow_tx.rs b/crates/irys-reth/src/shadow_tx.rs index 1f53fe089a..30531160a0 100644 --- a/crates/irys-reth/src/shadow_tx.rs +++ b/crates/irys-reth/src/shadow_tx.rs @@ -75,6 +75,8 @@ pub enum TransactionPacket { UnstakeRefund(BalanceIncrement), /// Update reward address at inclusion: fee-only via priority fee. No amount in packet; log-only. UpdateRewardAddress(UpdateRewardAddressDebit), + /// Custody penalty: slash pledge deposit when miner fails custody challenge. + CustodyPenalty(CustodyPenaltyPacket), } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, arbitrary::Arbitrary)] @@ -144,6 +146,7 @@ impl TransactionPacket { Self::IngressProofReward(inc) => Some(inc.target), Self::PermFeeRefund(inc) => Some(inc.target), Self::UpdateRewardAddress(dec) => Some(dec.target), + Self::CustodyPenalty(p) => Some(p.target), } } } @@ -177,6 +180,8 @@ pub mod shadow_tx_topics { LazyLock::new(|| keccak256("SHADOW_TX_PERM_FEE_REFUND")); pub static UPDATE_REWARD_ADDRESS: LazyLock> = LazyLock::new(|| keccak256("SHADOW_TX_UPDATE_REWARD_ADDRESS")); + pub static CUSTODY_PENALTY: LazyLock> = + LazyLock::new(|| keccak256("SHADOW_TX_CUSTODY_PENALTY")); } impl ShadowTransaction { @@ -250,6 +255,7 @@ impl TransactionPacket { Self::IngressProofReward(_) => *INGRESS_PROOF_REWARD, Self::PermFeeRefund(_) => *PERM_FEE_REFUND, Self::UpdateRewardAddress(_) => *UPDATE_REWARD_ADDRESS, + Self::CustodyPenalty(_) => *CUSTODY_PENALTY, } } } @@ -267,6 +273,7 @@ pub const PERM_FEE_REFUND_ID: u8 = 0x09; pub const UNPLEDGE_REFUND_ID: u8 = 0x0A; pub const UNSTAKE_DEBIT_ID: u8 = 0x0B; pub const UPDATE_REWARD_ADDRESS_ID: u8 = 0x0C; +pub const CUSTODY_PENALTY_ID: u8 = 0x0D; /// Discriminants for EitherIncrementOrDecrement pub const EITHER_INCREMENT_ID: u8 = 0x01; @@ -362,6 +369,10 @@ impl BorshSerialize for TransactionPacket { writer.write_all(&[UPDATE_REWARD_ADDRESS_ID])?; inner.serialize(writer) } + Self::CustodyPenalty(inner) => { + writer.write_all(&[CUSTODY_PENALTY_ID])?; + inner.serialize(writer) + } } } } @@ -393,6 +404,9 @@ impl BorshDeserialize for TransactionPacket { UPDATE_REWARD_ADDRESS_ID => { Self::UpdateRewardAddress(UpdateRewardAddressDebit::deserialize_reader(reader)?) } + CUSTODY_PENALTY_ID => { + Self::CustodyPenalty(CustodyPenaltyPacket::deserialize_reader(reader)?) + } _ => { return Err(borsh::io::Error::new( borsh::io::ErrorKind::InvalidData, @@ -574,6 +588,56 @@ impl BorshDeserialize for UnpledgeDebit { } } +/// Custody penalty: slash pledge deposit when miner fails custody challenge. +#[derive( + serde::Deserialize, + serde::Serialize, + Debug, + Clone, + PartialEq, + Eq, + PartialOrd, + Ord, + Default, + arbitrary::Arbitrary, +)] +pub struct CustodyPenaltyPacket { + /// Amount to deduct from the penalized miner. + pub amount: U256, + /// Address of the penalized miner. + pub target: Address, + /// Partition hash identifying which partition failed the custody challenge. + pub partition_hash: FixedBytes<32>, +} + +impl BorshSerialize for CustodyPenaltyPacket { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + writer.write_all(&self.amount.to_be_bytes::<32>())?; + writer.write_all(self.target.as_slice())?; + writer.write_all(self.partition_hash.as_slice())?; + Ok(()) + } +} + +impl BorshDeserialize for CustodyPenaltyPacket { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let mut amount_buf = [0_u8; 32]; + reader.read_exact(&mut amount_buf)?; + let amount = U256::from_be_bytes(amount_buf); + let mut addr = [0_u8; 20]; + reader.read_exact(&mut addr)?; + let target = Address::from_slice(&addr); + let mut hash_buf = [0_u8; 32]; + reader.read_exact(&mut hash_buf)?; + let partition_hash = FixedBytes::<32>::from_slice(&hash_buf); + Ok(Self { + amount, + target, + partition_hash, + }) + } +} + impl BorshSerialize for UnstakeDebit { fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { writer.write_all(self.target.as_slice())?; @@ -939,6 +1003,11 @@ mod tests { target: test_address, irys_ref: test_ref, }), + TransactionPacket::CustodyPenalty(CustodyPenaltyPacket { + amount: U256::from(1234_u64), + target: test_address, + partition_hash: test_ref, + }), ]; for packet in packets { @@ -958,6 +1027,44 @@ mod tests { } } + #[test] + fn custody_penalty_roundtrip() { + let solution_hash = FixedBytes::<32>::from_slice(&[0xaa; 32]); + let tx = ShadowTransaction::new_v1( + TransactionPacket::CustodyPenalty(CustodyPenaltyPacket { + amount: U256::from(5000_u64), + target: Address::repeat_byte(0x55), + partition_hash: FixedBytes::<32>::from_slice(&[0x77; 32]), + }), + solution_hash, + ); + let mut buf = Vec::new(); + tx.serialize(&mut buf).unwrap(); + let decoded = ShadowTransaction::deserialize_reader(&mut &buf[..]).unwrap(); + assert_eq!(decoded, tx); + } + + #[test] + fn custody_penalty_fee_payer() { + let target = Address::repeat_byte(0x55); + let packet = TransactionPacket::CustodyPenalty(CustodyPenaltyPacket { + amount: U256::from(1_u64), + target, + partition_hash: FixedBytes::<32>::ZERO, + }); + assert_eq!(packet.fee_payer_address(), Some(target)); + } + + #[test] + fn custody_penalty_topic() { + let packet = TransactionPacket::CustodyPenalty(CustodyPenaltyPacket { + amount: U256::from(1_u64), + target: Address::ZERO, + partition_hash: FixedBytes::<32>::ZERO, + }); + assert_eq!(packet.topic(), keccak256("SHADOW_TX_CUSTODY_PENALTY")); + } + /// Test backward compatibility detection - old format without solution hash should fail #[test] fn reject_old_format_without_solution_hash() { diff --git a/crates/types/src/config/mod.rs b/crates/types/src/config/mod.rs index c248a45763..477a33b9d8 100644 --- a/crates/types/src/config/mod.rs +++ b/crates/types/src/config/mod.rs @@ -156,6 +156,9 @@ impl Config { if self.consensus.use_kzg_ingress_proofs && !self.consensus.accept_kzg_ingress_proofs { bail!("use_kzg_ingress_proofs=true but accept_kzg_ingress_proofs=false — generated proofs would be rejected"); } + if self.consensus.enable_custody_proofs && !self.consensus.accept_kzg_ingress_proofs { + bail!("enable_custody_proofs=true but accept_kzg_ingress_proofs=false — custody proofs require KZG commitments"); + } Ok(()) } diff --git a/crates/types/src/custody.rs b/crates/types/src/custody.rs new file mode 100644 index 0000000000..4fef2f7357 --- /dev/null +++ b/crates/types/src/custody.rs @@ -0,0 +1,333 @@ +use crate::kzg::{verify_chunk_opening_proof, KzgCommitmentBytes, PROOF_SIZE, SCALAR_SIZE}; +use crate::{IrysAddress, H256}; +use alloy_primitives::FixedBytes; +use c_kzg::KzgSettings; +use openssl::sha; +use serde::{Deserialize, Serialize}; + +/// A custody challenge targeting a specific miner's partition. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CustodyChallenge { + pub challenged_miner: IrysAddress, + pub partition_hash: H256, + pub challenge_seed: H256, + pub challenge_block_height: u64, +} + +/// A single KZG opening for one challenged chunk. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CustodyOpening { + /// Partition-relative chunk offset + pub chunk_offset: u32, + /// Which data_root owns this chunk + pub data_root: H256, + /// Chunk index within the data_root's transaction + pub tx_chunk_index: u32, + /// Evaluation point z (32-byte BLS12-381 scalar) + pub evaluation_point: FixedBytes, + /// Evaluation value y = p(z) (32-byte BLS12-381 scalar) + pub evaluation_value: FixedBytes, + /// Opening proof pi (48-byte compressed G1 point) + pub opening_proof: FixedBytes, +} + +/// A custody proof responding to a challenge. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct CustodyProof { + pub challenged_miner: IrysAddress, + pub partition_hash: H256, + pub challenge_seed: H256, + pub openings: Vec, +} + +/// Result of verifying a custody proof. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CustodyVerificationResult { + Valid, + InvalidOpeningCount { expected: u32, got: u32 }, + MissingCommitment { data_root: H256, chunk_index: u32 }, + InvalidProof { chunk_offset: u32 }, +} + +/// Derive challenge seed from VDF output and partition hash. +/// +/// `challenge_seed = SHA256(vdf_output || partition_hash)` +pub fn derive_challenge_seed(vdf_output: &[u8; 32], partition_hash: &H256) -> H256 { + let mut hasher = sha::Sha256::new(); + hasher.update(vdf_output); + hasher.update(&partition_hash.0); + H256(hasher.finish()) +} + +/// Select K challenged chunk offsets from a challenge seed. +/// +/// `offset_j = hash_to_u64(SHA256(challenge_seed || j_le)) % num_chunks` +pub fn select_challenged_offsets( + challenge_seed: &H256, + k: u32, + num_chunks_in_partition: u64, +) -> Vec { + (0..k) + .map(|j| { + let mut hasher = sha::Sha256::new(); + hasher.update(&challenge_seed.0); + hasher.update(&j.to_le_bytes()); + let hash = hasher.finish(); + + // Interpret first 8 bytes as little-endian u64 + let val = u64::from_le_bytes( + hash[..8] + .try_into() + .expect("SHA256 output is at least 8 bytes"), + ); + // Safe truncation: partition offsets fit in u32 + u32::try_from(val % num_chunks_in_partition) + .expect("num_chunks_in_partition fits in u32 range for offset") + }) + .collect() +} + +/// Verify all openings in a custody proof against stored per-chunk commitments. +/// +/// `get_commitment` retrieves the KZG commitment for a given (data_root, chunk_index) +/// from the database. Returns `Ok(None)` if the commitment is not found. +pub fn verify_custody_proof( + proof: &CustodyProof, + get_commitment: impl Fn(H256, u32) -> eyre::Result>, + kzg_settings: &KzgSettings, + expected_challenge_count: u32, +) -> eyre::Result { + let got = u32::try_from(proof.openings.len()) + .map_err(|_| eyre::eyre!("opening count exceeds u32"))?; + if got != expected_challenge_count { + return Ok(CustodyVerificationResult::InvalidOpeningCount { + expected: expected_challenge_count, + got, + }); + } + + for opening in &proof.openings { + let commitment = match get_commitment(opening.data_root, opening.tx_chunk_index)? { + Some(c) => c, + None => { + return Ok(CustodyVerificationResult::MissingCommitment { + data_root: opening.data_root, + chunk_index: opening.tx_chunk_index, + }); + } + }; + + let valid = verify_chunk_opening_proof( + &commitment, + opening.evaluation_point.as_ref(), + opening.evaluation_value.as_ref(), + opening.opening_proof.as_ref(), + kzg_settings, + )?; + + if !valid { + return Ok(CustodyVerificationResult::InvalidProof { + chunk_offset: opening.chunk_offset, + }); + } + } + + Ok(CustodyVerificationResult::Valid) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::kzg::{ + compute_chunk_commitment, compute_chunk_opening_proof, default_kzg_settings, + derive_challenge_point, CHUNK_SIZE_FOR_KZG, COMMITMENT_SIZE, + }; + + #[test] + fn derive_challenge_seed_deterministic() { + let vdf = [42_u8; 32]; + let partition = H256::from([7_u8; 32]); + let s1 = derive_challenge_seed(&vdf, &partition); + let s2 = derive_challenge_seed(&vdf, &partition); + assert_eq!(s1, s2); + } + + #[test] + fn derive_challenge_seed_different_inputs() { + let vdf_a = [1_u8; 32]; + let vdf_b = [2_u8; 32]; + let partition = H256::from([7_u8; 32]); + let s1 = derive_challenge_seed(&vdf_a, &partition); + let s2 = derive_challenge_seed(&vdf_b, &partition); + assert_ne!(s1, s2); + } + + #[test] + fn select_challenged_offsets_returns_k() { + let seed = H256::from([42_u8; 32]); + let offsets = select_challenged_offsets(&seed, 20, 1000); + assert_eq!(offsets.len(), 20); + } + + #[test] + fn select_challenged_offsets_within_bounds() { + let seed = H256::from([42_u8; 32]); + let num_chunks = 500_u64; + let offsets = select_challenged_offsets(&seed, 20, num_chunks); + for &offset in &offsets { + assert!(u64::from(offset) < num_chunks); + } + } + + #[test] + fn select_challenged_offsets_different_seeds() { + let offsets_a = select_challenged_offsets(&H256::from([1_u8; 32]), 20, 10_000); + let offsets_b = select_challenged_offsets(&H256::from([2_u8; 32]), 20, 10_000); + assert_ne!(offsets_a, offsets_b); + } + + #[test] + fn verify_custody_proof_roundtrip() { + let settings = default_kzg_settings(); + let chunk_data = vec![42_u8; CHUNK_SIZE_FOR_KZG]; + let commitment = compute_chunk_commitment(&chunk_data, settings).unwrap(); + let commitment_bytes = KzgCommitmentBytes::from( + <[u8; COMMITMENT_SIZE]>::try_from(commitment.as_ref()).unwrap(), + ); + + let challenge_seed = H256::from([99_u8; 32]); + let chunk_offset = 5_u32; + let z = derive_challenge_point(&challenge_seed, chunk_offset); + let (proof_bytes, y_bytes) = + compute_chunk_opening_proof(&chunk_data, &z, settings).unwrap(); + + let opening = CustodyOpening { + chunk_offset, + data_root: H256::from([1_u8; 32]), + tx_chunk_index: 0, + evaluation_point: FixedBytes::from(z), + evaluation_value: FixedBytes::from(y_bytes), + opening_proof: FixedBytes::from(proof_bytes), + }; + + let proof = CustodyProof { + challenged_miner: IrysAddress::from([0xAA_u8; 20]), + partition_hash: H256::from([0xBB_u8; 32]), + challenge_seed, + openings: vec![opening], + }; + + // clone: commitment_bytes is Copy but stored for closure capture + let result = verify_custody_proof( + &proof, + |_data_root, _chunk_index| Ok(Some(commitment_bytes)), + settings, + 1, + ) + .unwrap(); + + assert_eq!(result, CustodyVerificationResult::Valid); + } + + #[test] + fn verify_custody_proof_wrong_proof_fails() { + let settings = default_kzg_settings(); + let chunk_data = vec![42_u8; CHUNK_SIZE_FOR_KZG]; + let commitment = compute_chunk_commitment(&chunk_data, settings).unwrap(); + let commitment_bytes = KzgCommitmentBytes::from( + <[u8; COMMITMENT_SIZE]>::try_from(commitment.as_ref()).unwrap(), + ); + + let challenge_seed = H256::from([99_u8; 32]); + let chunk_offset = 5_u32; + let z = derive_challenge_point(&challenge_seed, chunk_offset); + + // Generate proof for different data + let bad_data = vec![7_u8; CHUNK_SIZE_FOR_KZG]; + let (bad_proof, bad_y) = compute_chunk_opening_proof(&bad_data, &z, settings).unwrap(); + + let opening = CustodyOpening { + chunk_offset, + data_root: H256::from([1_u8; 32]), + tx_chunk_index: 0, + evaluation_point: FixedBytes::from(z), + evaluation_value: FixedBytes::from(bad_y), + opening_proof: FixedBytes::from(bad_proof), + }; + + let proof = CustodyProof { + challenged_miner: IrysAddress::from([0xAA_u8; 20]), + partition_hash: H256::from([0xBB_u8; 32]), + challenge_seed, + openings: vec![opening], + }; + + let result = verify_custody_proof( + &proof, + |_data_root, _chunk_index| Ok(Some(commitment_bytes)), + settings, + 1, + ) + .unwrap(); + + assert_eq!( + result, + CustodyVerificationResult::InvalidProof { chunk_offset: 5 } + ); + } + + #[test] + fn verify_custody_proof_missing_commitment() { + let settings = default_kzg_settings(); + let challenge_seed = H256::from([99_u8; 32]); + let data_root = H256::from([1_u8; 32]); + + let opening = CustodyOpening { + chunk_offset: 5, + data_root, + tx_chunk_index: 0, + evaluation_point: FixedBytes::ZERO, + evaluation_value: FixedBytes::ZERO, + opening_proof: FixedBytes::ZERO, + }; + + let proof = CustodyProof { + challenged_miner: IrysAddress::from([0xAA_u8; 20]), + partition_hash: H256::from([0xBB_u8; 32]), + challenge_seed, + openings: vec![opening], + }; + + let result = verify_custody_proof(&proof, |_dr, _ci| Ok(None), settings, 1).unwrap(); + + assert_eq!( + result, + CustodyVerificationResult::MissingCommitment { + data_root, + chunk_index: 0, + } + ); + } + + #[test] + fn verify_custody_proof_wrong_opening_count() { + let settings = default_kzg_settings(); + + let proof = CustodyProof { + challenged_miner: IrysAddress::from([0xAA_u8; 20]), + partition_hash: H256::from([0xBB_u8; 32]), + challenge_seed: H256::from([99_u8; 32]), + openings: vec![], + }; + + let result = verify_custody_proof(&proof, |_dr, _ci| Ok(None), settings, 5).unwrap(); + + assert_eq!( + result, + CustodyVerificationResult::InvalidOpeningCount { + expected: 5, + got: 0, + } + ); + } +} diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 14a46a65d2..2888452fb2 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -14,6 +14,7 @@ pub mod commitment_v1; pub mod commitment_v2; pub mod config; pub mod conversions; +pub mod custody; pub mod difficulty_adjustment_config; pub mod gossip; pub mod hardfork_config; From 33a014295c5904a0fd414f66e5f57a832c469e10 Mon Sep 17 00:00:00 2001 From: jason Date: Mon, 23 Feb 2026 16:55:06 +0000 Subject: [PATCH 10/13] feat(custody): add custody proof service with P2P gossip support --- crates/actors/src/custody_proof_service.rs | 221 +++++++++++++++++++++ crates/actors/src/lib.rs | 1 + crates/actors/src/services.rs | 7 + crates/chain/src/chain.rs | 11 + crates/p2p/src/cache.rs | 15 ++ crates/p2p/src/gossip_client.rs | 9 + crates/p2p/src/server.rs | 50 ++++- crates/p2p/src/types.rs | 2 + crates/types/src/gossip.rs | 11 +- 9 files changed, 320 insertions(+), 7 deletions(-) create mode 100644 crates/actors/src/custody_proof_service.rs diff --git a/crates/actors/src/custody_proof_service.rs b/crates/actors/src/custody_proof_service.rs new file mode 100644 index 0000000000..48208e4911 --- /dev/null +++ b/crates/actors/src/custody_proof_service.rs @@ -0,0 +1,221 @@ +use irys_domain::StorageModulesReadGuard; +use irys_types::custody::{ + select_challenged_offsets, CustodyChallenge, CustodyOpening, CustodyProof, +}; +use irys_types::kzg::{compute_chunk_opening_proof, default_kzg_settings, derive_challenge_point}; +use irys_types::v2::{GossipBroadcastMessageV2, GossipDataV2}; +use irys_types::{Config, GossipCacheKey, PartitionChunkOffset}; +use reth::revm::primitives::FixedBytes; +use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; +use tracing::{debug, warn}; + +#[derive(Debug)] +pub enum CustodyProofMessage { + Challenge(CustodyChallenge), +} + +pub struct CustodyProofService { + config: Config, + storage_modules_guard: StorageModulesReadGuard, + gossip_sender: UnboundedSender, +} + +impl CustodyProofService { + pub fn spawn_service( + config: Config, + storage_modules_guard: StorageModulesReadGuard, + gossip_sender: UnboundedSender, + rx: UnboundedReceiver, + runtime_handle: tokio::runtime::Handle, + ) { + let service = Self { + config, + storage_modules_guard, + gossip_sender, + }; + + runtime_handle.spawn(service.start(rx)); + } + + async fn start(self, mut rx: UnboundedReceiver) { + debug!("Custody proof service started"); + while let Some(msg) = rx.recv().await { + match msg { + CustodyProofMessage::Challenge(challenge) => { + if let Err(e) = self.handle_challenge(&challenge) { + warn!( + partition.hash = %challenge.partition_hash, + error = %e, + "Failed to handle custody challenge", + ); + } + } + } + } + debug!("Custody proof service stopped"); + } + + fn handle_challenge(&self, challenge: &CustodyChallenge) -> eyre::Result<()> { + let storage_modules = self.storage_modules_guard.read(); + let sm = storage_modules + .iter() + .find(|sm| sm.partition_hash() == Some(challenge.partition_hash)); + + let sm = match sm { + Some(sm) => sm, + None => { + debug!( + partition.hash = %challenge.partition_hash, + "No local storage module for challenged partition, skipping", + ); + return Ok(()); + } + }; + + let offsets = select_challenged_offsets( + &challenge.challenge_seed, + self.config.consensus.custody_challenge_count, + self.config.consensus.num_chunks_in_partition, + ); + + let kzg_settings = default_kzg_settings(); + let chunk_size = usize::try_from(self.config.consensus.chunk_size) + .map_err(|_| eyre::eyre!("chunk_size overflow"))?; + + let mut openings = Vec::with_capacity(offsets.len()); + + for offset in offsets { + let partition_offset = PartitionChunkOffset::from(offset); + + let packed_chunk = match sm.generate_full_chunk(partition_offset)? { + Some(c) => c, + None => { + warn!( + partition.hash = %challenge.partition_hash, + chunk.offset = offset, + "Chunk not found at challenged offset, skipping proof generation", + ); + return Ok(()); + } + }; + + let unpacked = irys_packing::unpack( + &packed_chunk, + self.config.consensus.entropy_packing_iterations, + chunk_size, + self.config.consensus.chain_id, + ); + + let z = derive_challenge_point(&challenge.challenge_seed, offset); + let (proof_bytes, y_bytes) = + compute_chunk_opening_proof(&unpacked.bytes.0, &z, kzg_settings)?; + + openings.push(CustodyOpening { + chunk_offset: offset, + data_root: packed_chunk.data_root, + tx_chunk_index: *packed_chunk.tx_offset, + evaluation_point: FixedBytes::from(z), + evaluation_value: FixedBytes::from(y_bytes), + opening_proof: FixedBytes::from(proof_bytes), + }); + } + + let proof = CustodyProof { + challenged_miner: challenge.challenged_miner, + partition_hash: challenge.partition_hash, + challenge_seed: challenge.challenge_seed, + openings, + }; + + debug!( + partition.hash = %proof.partition_hash, + openings.count = proof.openings.len(), + "Generated custody proof", + ); + + let key = GossipCacheKey::CustodyProof(proof.partition_hash); + let msg = GossipBroadcastMessageV2::new(key, GossipDataV2::CustodyProof(proof)); + + if let Err(e) = self.gossip_sender.send(msg) { + warn!(error = %e, "Failed to send custody proof to gossip broadcast"); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use irys_types::{Config, IrysAddress, NodeConfig, H256}; + use std::sync::{Arc, RwLock}; + use tokio::sync::mpsc::unbounded_channel; + + fn test_config_with_custody() -> Config { + let mut node_config = NodeConfig::testing(); + let consensus = node_config.consensus.get_mut(); + consensus.enable_custody_proofs = true; + consensus.accept_kzg_ingress_proofs = true; + consensus.custody_challenge_count = 3; + Config::new_with_random_peer_id(node_config) + } + + fn empty_storage_guard() -> irys_domain::StorageModulesReadGuard { + irys_domain::StorageModulesReadGuard::new(Arc::new(RwLock::new(Vec::new()))) + } + + #[test] + fn handle_challenge_unknown_partition_returns_ok() { + let config = test_config_with_custody(); + let (gossip_tx, mut gossip_rx) = unbounded_channel(); + let service = CustodyProofService { + config, + storage_modules_guard: empty_storage_guard(), + gossip_sender: gossip_tx, + }; + + let challenge = CustodyChallenge { + challenged_miner: IrysAddress::from([0xAA; 20]), + partition_hash: H256::from([0xBB; 32]), + challenge_seed: H256::from([0xCC; 32]), + challenge_block_height: 100, + }; + + let result = service.handle_challenge(&challenge); + assert!(result.is_ok()); + + // No proof should have been gossiped + assert!(gossip_rx.try_recv().is_err()); + } + + #[tokio::test] + async fn service_spawns_and_shuts_down() { + let config = test_config_with_custody(); + let (gossip_tx, _gossip_rx) = unbounded_channel(); + let (challenge_tx, challenge_rx) = unbounded_channel(); + + CustodyProofService::spawn_service( + config, + empty_storage_guard(), + gossip_tx, + challenge_rx, + tokio::runtime::Handle::current(), + ); + + // Send a challenge for an unknown partition + challenge_tx + .send(CustodyProofMessage::Challenge(CustodyChallenge { + challenged_miner: IrysAddress::from([0xAA; 20]), + partition_hash: H256::from([0xBB; 32]), + challenge_seed: H256::from([0xCC; 32]), + challenge_block_height: 100, + })) + .unwrap(); + + // Drop sender to trigger service shutdown + drop(challenge_tx); + + // Give the service time to process and shut down + tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; + } +} diff --git a/crates/actors/src/lib.rs b/crates/actors/src/lib.rs index a0277d8280..096c47d81a 100644 --- a/crates/actors/src/lib.rs +++ b/crates/actors/src/lib.rs @@ -9,6 +9,7 @@ pub mod cache_service; pub mod chunk_ingress_service; pub mod chunk_migration_service; pub mod commitment_refunds; +pub mod custody_proof_service; pub mod data_sync_service; pub mod mempool_guard; pub mod mempool_service; diff --git a/crates/actors/src/services.rs b/crates/actors/src/services.rs index 3fc78c3afd..b2fd5cd0d4 100644 --- a/crates/actors/src/services.rs +++ b/crates/actors/src/services.rs @@ -1,5 +1,6 @@ use crate::blob_extraction_service::BlobExtractionMessage; use crate::chunk_ingress_service::ChunkIngressMessage; +use crate::custody_proof_service::CustodyProofMessage; use crate::mining_bus::{MiningBroadcastEvent, MiningBus}; use crate::{ DataSyncServiceMessage, StorageModuleServiceMessage, @@ -106,6 +107,7 @@ pub struct ServiceReceivers { pub peer_network: UnboundedReceiver, pub block_discovery: UnboundedReceiver>, pub blob_extraction: UnboundedReceiver, + pub custody_proof: UnboundedReceiver, pub packing: tokio::sync::mpsc::Receiver, } @@ -129,6 +131,7 @@ pub struct ServiceSendersInner { pub peer_network: PeerNetworkSender, pub block_discovery: UnboundedSender>, pub blob_extraction: UnboundedSender, + pub custody_proof: UnboundedSender, pub mining_bus: MiningBus, pub packing_sender: PackingSender, } @@ -166,6 +169,8 @@ impl ServiceSendersInner { unbounded_channel::>(); let (blob_extraction_sender, blob_extraction_receiver) = unbounded_channel::(); + let (custody_proof_sender, custody_proof_receiver) = + unbounded_channel::(); let (packing_sender, packing_receiver) = PackingService::channel(5_000); let mining_bus = MiningBus::new(); @@ -188,6 +193,7 @@ impl ServiceSendersInner { peer_network: PeerNetworkSender::new(peer_network_sender), block_discovery: block_discovery_sender, blob_extraction: blob_extraction_sender, + custody_proof: custody_proof_sender, mining_bus, packing_sender, }; @@ -210,6 +216,7 @@ impl ServiceSendersInner { peer_network: peer_network_receiver, block_discovery: block_discovery_receiver, blob_extraction: blob_extraction_receiver, + custody_proof: custody_proof_receiver, packing: packing_receiver, }; (senders, receivers) diff --git a/crates/chain/src/chain.rs b/crates/chain/src/chain.rs index cf6f54b23f..76e4ebb31e 100644 --- a/crates/chain/src/chain.rs +++ b/crates/chain/src/chain.rs @@ -17,6 +17,7 @@ use irys_actors::{ cache_service::ChunkCacheService, chunk_fetcher::{ChunkFetcherFactory, HttpChunkFetcher}, chunk_migration_service::ChunkMigrationService, + custody_proof_service::CustodyProofService, mempool_guard::MempoolReadGuard, mempool_service::MempoolServiceMessage, mempool_service::{MempoolService, MempoolServiceFacadeImpl}, @@ -1609,6 +1610,16 @@ impl IrysNode { ); } + if config.consensus.enable_custody_proofs { + CustodyProofService::spawn_service( + config.clone(), // clone: Config is Arc-wrapped internally + storage_modules_guard.clone(), // clone: Arc-based read guard + service_senders.gossip_broadcast.clone(), // clone: UnboundedSender is cheaply cloneable + receivers.custody_proof, + runtime_handle.clone(), + ); + } + // Get the mempool state to create the pledge provider let (tx, rx) = oneshot::channel(); service_senders diff --git a/crates/p2p/src/cache.rs b/crates/p2p/src/cache.rs index fafb4a7da8..68ea783fdc 100644 --- a/crates/p2p/src/cache.rs +++ b/crates/p2p/src/cache.rs @@ -23,6 +23,7 @@ pub struct GossipCache { blocks: Cache>>>, payloads: Cache>>>, ingress_proofs: Cache>>>, + custody_proofs: Cache>>>, } impl Default for GossipCache { @@ -40,6 +41,7 @@ impl GossipCache { blocks: Cache::builder().time_to_live(GOSSIP_CACHE_TTL).build(), payloads: Cache::builder().time_to_live(GOSSIP_CACHE_TTL).build(), ingress_proofs: Cache::builder().time_to_live(GOSSIP_CACHE_TTL).build(), + custody_proofs: Cache::builder().time_to_live(GOSSIP_CACHE_TTL).build(), } } @@ -119,6 +121,14 @@ impl GossipCache { }); peer_set.write().unwrap().insert(peer_id); } + GossipCacheKey::CustodyProof(partition_hash) => { + let peer_set = self.custody_proofs.get(&partition_hash).unwrap_or_else(|| { + let new_set = Arc::new(RwLock::new(HashSet::new())); + self.custody_proofs.insert(partition_hash, new_set.clone()); + new_set + }); + peer_set.write().unwrap().insert(peer_id); + } } Ok(()) } @@ -153,6 +163,11 @@ impl GossipCache { .get(proof_hash) .map(|arc| arc.read().unwrap().clone()) .unwrap_or_default(), + GossipCacheKey::CustodyProof(partition_hash) => self + .custody_proofs + .get(partition_hash) + .map(|arc| arc.read().unwrap().clone()) + .unwrap_or_default(), }; Ok(result) diff --git a/crates/p2p/src/gossip_client.rs b/crates/p2p/src/gossip_client.rs index 7afc5c1409..0eb53cae36 100644 --- a/crates/p2p/src/gossip_client.rs +++ b/crates/p2p/src/gossip_client.rs @@ -1078,6 +1078,15 @@ impl GossipClient { ) .await } + GossipDataV2::CustodyProof(custody_proof) => { + self.send_data_internal( + &peer.address.gossip, + GossipRoutes::CustodyProof, + custody_proof, + ProtocolVersion::V2, + ) + .await + } } } diff --git a/crates/p2p/src/server.rs b/crates/p2p/src/server.rs index 657408fa80..563fb396d3 100644 --- a/crates/p2p/src/server.rs +++ b/crates/p2p/src/server.rs @@ -20,10 +20,10 @@ use irys_domain::{get_node_info, PeerList, ScoreDecreaseReason}; use irys_types::v1::GossipDataRequestV1; use irys_types::v2::GossipDataRequestV2; use irys_types::{ - parse_user_agent, BlockBody, BlockIndexQuery, CommitmentTransaction, DataTransactionHeader, - GossipRequest, GossipRequestV2, HandshakeRequest, HandshakeRequestV2, HandshakeResponseV1, - HandshakeResponseV2, IngressProof, IrysAddress, IrysBlockHeader, IrysPeerId, PeerListItem, - PeerScore, ProtocolVersion, UnpackedChunk, + custody::CustodyProof, parse_user_agent, BlockBody, BlockIndexQuery, CommitmentTransaction, + DataTransactionHeader, GossipRequest, GossipRequestV2, HandshakeRequest, HandshakeRequestV2, + HandshakeResponseV1, HandshakeResponseV2, IngressProof, IrysAddress, IrysBlockHeader, + IrysPeerId, PeerListItem, PeerScore, ProtocolVersion, UnpackedChunk, }; use rand::prelude::SliceRandom as _; use reth::builder::Block as _; @@ -916,6 +916,44 @@ where HttpResponse::Ok().json(GossipResponse::Accepted(())) } + #[expect( + clippy::unused_async, + reason = "Actix-web handler signature requires handlers to be async" + )] + async fn handle_custody_proof_v2( + server: Data, + proof_json: web::Json>, + req: actix_web::HttpRequest, + ) -> HttpResponse { + if !server.data_handler.sync_state.is_gossip_reception_enabled() { + return HttpResponse::Ok().json(GossipResponse::<()>::Rejected( + RejectionReason::GossipDisabled, + )); + } + + let v2_request = proof_json.0; + let source_peer_id = v2_request.peer_id; + let source_miner_address = v2_request.miner_address; + + match Self::check_peer_v2( + &server.peer_list, + &req, + source_peer_id, + source_miner_address, + ) { + Ok(_) => {} + Err(error_response) => return error_response, + }; + server.peer_list.set_is_online(&source_miner_address, true); + + debug!( + partition.hash = %v2_request.data.partition_hash, + "Received custody proof via gossip (handler stub)", + ); + + HttpResponse::Ok().json(GossipResponse::Accepted(())) + } + // ============================================================================ // End V2 Handlers // ============================================================================ @@ -1477,6 +1515,10 @@ where GossipRoutes::IngressProof.as_str(), web::post().to(Self::handle_ingress_proof_v2), ) + .route( + GossipRoutes::CustodyProof.as_str(), + web::post().to(Self::handle_custody_proof_v2), + ) .route( GossipRoutes::ExecutionPayload.as_str(), web::post().to(Self::handle_execution_payload_v2), diff --git a/crates/p2p/src/types.rs b/crates/p2p/src/types.rs index e7eae414a5..fe09467956 100644 --- a/crates/p2p/src/types.rs +++ b/crates/p2p/src/types.rs @@ -271,6 +271,7 @@ pub enum GossipRoutes { Block, BlockBody, IngressProof, + CustodyProof, ExecutionPayload, GetData, PullData, @@ -293,6 +294,7 @@ impl GossipRoutes { Self::Block => "/block", Self::BlockBody => "/block_body", Self::IngressProof => "/ingress_proof", + Self::CustodyProof => "/custody_proof", Self::ExecutionPayload => "/execution_payload", Self::GetData => "/get_data", Self::PullData => "/pull_data", diff --git a/crates/types/src/gossip.rs b/crates/types/src/gossip.rs index da9bd00be4..d98c425645 100644 --- a/crates/types/src/gossip.rs +++ b/crates/types/src/gossip.rs @@ -160,8 +160,8 @@ pub mod v1 { pub mod v2 { use crate::{ - BlockBody, BlockHash, ChunkPathHash, CommitmentTransaction, DataTransactionHeader, - GossipCacheKey, IngressProof, IrysBlockHeader, UnpackedChunk, H256, + custody::CustodyProof, BlockBody, BlockHash, ChunkPathHash, CommitmentTransaction, + DataTransactionHeader, GossipCacheKey, IngressProof, IrysBlockHeader, UnpackedChunk, H256, }; use alloy_primitives::B256; use reth_ethereum_primitives::Block; @@ -249,6 +249,7 @@ pub mod v2 { BlockBody(Arc), ExecutionPayload(Block), IngressProof(IngressProof), + CustodyProof(CustodyProof), } impl From> for GossipDataV2 { @@ -291,7 +292,7 @@ pub mod v2 { Self::IngressProof(ingress_proof) => { Some(super::v1::GossipDataV1::IngressProof(ingress_proof.clone())) } - Self::BlockBody(_) => None, // BlockBody does not exist in v1 + Self::BlockBody(_) | Self::CustodyProof(_) => None, } } @@ -325,6 +326,9 @@ pub mod v2 { ingress_proof.recover_signer() ) } + Self::CustodyProof(proof) => { + format!("custody proof for partition {}", proof.partition_hash) + } } } } @@ -399,6 +403,7 @@ pub enum GossipCacheKey { Block(BlockHash), ExecutionPayload(B256), IngressProof(H256), + CustodyProof(H256), } impl GossipCacheKey { From 80832aa4ce177125b796d8d00d7004cd4860f37e Mon Sep 17 00:00:00 2001 From: jason Date: Mon, 23 Feb 2026 17:52:17 +0000 Subject: [PATCH 11/13] feat(custody): review comments --- crates/actors/src/blob_extraction_service.rs | 115 ++++---- crates/actors/src/block_validation.rs | 10 + .../src/chunk_ingress_service/chunks.rs | 41 ++- .../chunk_ingress_service/ingress_proofs.rs | 3 +- crates/actors/src/custody_proof_service.rs | 35 +-- crates/database/src/database.rs | 2 - crates/database/src/tables.rs | 2 - crates/irys-reth/src/shadow_tx.rs | 25 -- crates/p2p/src/server.rs | 17 +- crates/types/src/config/consensus.rs | 59 ++-- crates/types/src/config/mod.rs | 36 ++- crates/types/src/custody.rs | 195 +++++++------ crates/types/src/ingress.rs | 11 +- crates/types/src/kzg.rs | 273 +++--------------- 14 files changed, 324 insertions(+), 500 deletions(-) diff --git a/crates/actors/src/blob_extraction_service.rs b/crates/actors/src/blob_extraction_service.rs index e9ff173790..10ca0c54bc 100644 --- a/crates/actors/src/blob_extraction_service.rs +++ b/crates/actors/src/blob_extraction_service.rs @@ -1,6 +1,6 @@ use irys_types::H256; use reth::revm::primitives::B256; -use reth_transaction_pool::blobstore::{BlobStore, BlobStoreError}; +use reth_transaction_pool::blobstore::BlobStore; use tokio::sync::mpsc::UnboundedReceiver; use tracing::{debug, warn}; @@ -62,8 +62,6 @@ impl BlobExtractionService { } fn handle_extract_blobs(&self, block_hash: H256, blob_tx_hashes: &[B256]) -> eyre::Result<()> { - use irys_types::ingress::generate_ingress_proof_v2_from_blob; - if !self.config.consensus.enable_blobs { warn!("Received blob extraction request but blobs are disabled"); return Ok(()); @@ -72,7 +70,6 @@ impl BlobExtractionService { let signer = self.config.irys_signer(); let chain_id = self.config.consensus.chain_id; let anchor: H256 = block_hash; - let mut total_blobs = 0_u64; for tx_hash in blob_tx_hashes { @@ -82,10 +79,6 @@ impl BlobExtractionService { warn!(tx.hash = %tx_hash, "Blob sidecar not found in store (may be pruned)"); continue; } - Err(BlobStoreError::Other(e)) => { - warn!(tx.hash = %tx_hash, error = %e, "Blob store error"); - continue; - } Err(e) => { warn!(tx.hash = %tx_hash, error = ?e, "Blob store error"); continue; @@ -101,55 +94,13 @@ impl BlobExtractionService { }; for (blob_idx, blob) in sidecar.blobs.iter().enumerate() { - let commitment_bytes: &[u8; 48] = sidecar.commitments[blob_idx].as_ref(); - - let proof = generate_ingress_proof_v2_from_blob( + self.process_single_blob( &signer, blob.as_ref(), - commitment_bytes, + sidecar.commitments[blob_idx].as_ref(), chain_id, anchor, )?; - - let data_root = proof.data_root(); - - let chunk_size = u64::try_from(irys_types::kzg::CHUNK_SIZE_FOR_KZG) - .map_err(|_| eyre::eyre!("chunk size overflow"))?; - - let tx_header = irys_types::transaction::DataTransactionHeader::V1( - irys_types::transaction::DataTransactionHeaderV1WithMetadata { - tx: irys_types::transaction::DataTransactionHeaderV1 { - id: H256::zero(), - anchor, - signer: signer.address(), - data_root, - data_size: chunk_size, - header_size: 0, - term_fee: Default::default(), - perm_fee: None, - ledger_id: u32::from(irys_types::block::DataLedger::Submit), - chain_id, - signature: Default::default(), - bundle_format: None, - }, - metadata: irys_types::transaction::DataTransactionMetadata::new(), - }, - ); - - let mut chunk_data = vec![0_u8; irys_types::kzg::CHUNK_SIZE_FOR_KZG]; - chunk_data[..blob.len()].copy_from_slice(blob.as_ref()); - - if let Err(e) = - self.mempool_sender - .send(MempoolServiceMessage::IngestBlobDerivedTx { - tx_header, - ingress_proof: proof, - chunk_data, - }) - { - warn!(data_root = %data_root, error = %e, "Failed to send blob-derived tx to mempool"); - } - total_blobs += 1; } } @@ -165,4 +116,64 @@ impl BlobExtractionService { Ok(()) } + + fn process_single_blob( + &self, + signer: &irys_types::irys::IrysSigner, + blob_data: &[u8], + commitment_bytes: &[u8; 48], + chain_id: u64, + anchor: H256, + ) -> eyre::Result<()> { + use irys_types::ingress::generate_ingress_proof_v2_from_blob; + + let proof = generate_ingress_proof_v2_from_blob( + signer, + blob_data, + commitment_bytes, + chain_id, + anchor, + )?; + + let data_root = proof.data_root(); + + let chunk_size = u64::try_from(irys_types::kzg::CHUNK_SIZE_FOR_KZG) + .map_err(|_| eyre::eyre!("chunk size overflow"))?; + + let tx_header = irys_types::transaction::DataTransactionHeader::V1( + irys_types::transaction::DataTransactionHeaderV1WithMetadata { + tx: irys_types::transaction::DataTransactionHeaderV1 { + id: H256::zero(), + anchor, + signer: signer.address(), + data_root, + data_size: chunk_size, + header_size: 0, + term_fee: Default::default(), + perm_fee: None, + ledger_id: u32::from(irys_types::block::DataLedger::Submit), + chain_id, + signature: Default::default(), + bundle_format: None, + }, + metadata: irys_types::transaction::DataTransactionMetadata::new(), + }, + ); + + let mut chunk_data = vec![0_u8; irys_types::kzg::CHUNK_SIZE_FOR_KZG]; + chunk_data[..blob_data.len()].copy_from_slice(blob_data); + + if let Err(e) = self + .mempool_sender + .send(MempoolServiceMessage::IngestBlobDerivedTx { + tx_header, + ingress_proof: proof, + chunk_data, + }) + { + warn!(data_root = %data_root, error = %e, "Failed to send blob-derived tx to mempool"); + } + + Ok(()) + } } diff --git a/crates/actors/src/block_validation.rs b/crates/actors/src/block_validation.rs index 6396f5fe33..6c90b50354 100644 --- a/crates/actors/src/block_validation.rs +++ b/crates/actors/src/block_validation.rs @@ -3086,6 +3086,7 @@ pub fn validate_custody_proofs( }, kzg_settings, consensus_config.custody_challenge_count, + consensus_config.num_chunks_in_partition, )?; match result { @@ -3100,6 +3101,15 @@ pub fn validate_custody_proofs( proof.partition_hash, ); } + irys_types::custody::CustodyVerificationResult::InvalidOffset { + chunk_offset, + expected, + } => { + eyre::bail!( + "custody proof for miner {:?}: offset mismatch at chunk_offset={chunk_offset}, expected={expected}", + proof.challenged_miner, + ); + } irys_types::custody::CustodyVerificationResult::MissingCommitment { data_root, chunk_index, diff --git a/crates/actors/src/chunk_ingress_service/chunks.rs b/crates/actors/src/chunk_ingress_service/chunks.rs index 681daa3dbe..47866354e7 100644 --- a/crates/actors/src/chunk_ingress_service/chunks.rs +++ b/crates/actors/src/chunk_ingress_service/chunks.rs @@ -780,22 +780,12 @@ pub fn generate_ingress_proof( enable_shadow_kzg_logging: bool, use_kzg_ingress_proofs: bool, ) -> eyre::Result { - // load the chunks from the DB - // TODO: for now we assume the chunks all all in the DB chunk cache - // in future, we'll need access to whatever unified storage provider API we have to get chunks - // regardless of actual location - let expected_chunk_count = data_size_to_chunk_count(size, chunk_size)?; let (proof, per_chunk_commitments, actual_data_size, actual_chunk_count) = db.view_eyre(|tx| { let mut dup_cursor = tx.cursor_dup_read::()?; - // start from first duplicate entry for this root_hash let dup_walker = dup_cursor.walk_dup(Some(data_root), None)?; - - // we need to validate that the index is valid - // we do this by constructing a set over the chunk hashes, checking if we've seen this hash before - // if we have, we *must* error let mut set = HashSet::::new(); let mut chunk_count: u32 = 0; @@ -817,7 +807,6 @@ pub fn generate_ingress_proof( } set.insert(chunk_path_hash); - // TODO: add code to read from ChunkProvider once it can read through CachedChunks & we have a nice system for unpacking chunks on-demand let chunk = tx .get::(index_entry.meta.chunk_path_hash)? .ok_or(eyre!( @@ -873,10 +862,32 @@ pub fn generate_ingress_proof( "chunk count mismatch: actual {actual_chunk_count} != expected {expected_chunk_count}" ); + store_proof_and_commitments( + &db, + &proof, + per_chunk_commitments.as_deref(), + data_root, + &signer, + enable_shadow_kzg_logging, + use_kzg_ingress_proofs, + )?; + + Ok(proof) +} + +fn store_proof_and_commitments( + db: &DatabaseProvider, + proof: &IngressProof, + per_chunk_commitments: Option<&[irys_types::kzg::KzgCommitmentBytes]>, + data_root: DataRoot, + signer: &IrysSigner, + enable_shadow_kzg_logging: bool, + use_kzg_ingress_proofs: bool, +) -> eyre::Result<()> { db.update(|rw_tx| -> eyre::Result<()> { - irys_database::store_ingress_proof_checked(rw_tx, &proof, &signer)?; + irys_database::store_ingress_proof_checked(rw_tx, proof, signer)?; - if let Some(ref per_chunk) = per_chunk_commitments { + if let Some(per_chunk) = per_chunk_commitments { let indexed: Vec<(u32, irys_types::kzg::KzgCommitmentBytes)> = per_chunk .iter() .enumerate() @@ -893,7 +904,7 @@ pub fn generate_ingress_proof( })??; if enable_shadow_kzg_logging && !use_kzg_ingress_proofs { - if let Err(e) = shadow_log_kzg_commitments(&db, data_root) { + if let Err(e) = shadow_log_kzg_commitments(db, data_root) { warn!( data_root = %data_root, error = %e, @@ -902,7 +913,7 @@ pub fn generate_ingress_proof( } } - Ok(proof) + Ok(()) } /// Compute KZG commitments in shadow mode: re-reads chunks from DB, computes diff --git a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs index 5db205c68a..e8d032bfd8 100644 --- a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs +++ b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs @@ -392,7 +392,6 @@ pub fn generate_and_store_ingress_proof( return Err(IngressProofGenerationError::AlreadyGenerating); } - // Notify start of proof generation if let Err(e) = cache_sender.send_traced(CacheServiceAction::NotifyProofGenerationStarted(data_root)) { @@ -503,7 +502,7 @@ pub fn reanchor_and_store_ingress_proof( .get_latest_canonical_entry() .block_hash(); - let mut proof = proof.clone(); + let mut proof = proof.clone(); // clone: need owned value for set_anchor + sign mutation proof.set_anchor(latest_anchor); if let Err(e) = signer.sign_ingress_proof(&mut proof) { if let Err(e) = cache_sender.send_traced( diff --git a/crates/actors/src/custody_proof_service.rs b/crates/actors/src/custody_proof_service.rs index 48208e4911..f10a59003d 100644 --- a/crates/actors/src/custody_proof_service.rs +++ b/crates/actors/src/custody_proof_service.rs @@ -76,7 +76,7 @@ impl CustodyProofService { &challenge.challenge_seed, self.config.consensus.custody_challenge_count, self.config.consensus.num_chunks_in_partition, - ); + )?; let kzg_settings = default_kzg_settings(); let chunk_size = usize::try_from(self.config.consensus.chunk_size) @@ -183,39 +183,6 @@ mod tests { let result = service.handle_challenge(&challenge); assert!(result.is_ok()); - - // No proof should have been gossiped assert!(gossip_rx.try_recv().is_err()); } - - #[tokio::test] - async fn service_spawns_and_shuts_down() { - let config = test_config_with_custody(); - let (gossip_tx, _gossip_rx) = unbounded_channel(); - let (challenge_tx, challenge_rx) = unbounded_channel(); - - CustodyProofService::spawn_service( - config, - empty_storage_guard(), - gossip_tx, - challenge_rx, - tokio::runtime::Handle::current(), - ); - - // Send a challenge for an unknown partition - challenge_tx - .send(CustodyProofMessage::Challenge(CustodyChallenge { - challenged_miner: IrysAddress::from([0xAA; 20]), - partition_hash: H256::from([0xBB; 32]), - challenge_seed: H256::from([0xCC; 32]), - challenge_block_height: 100, - })) - .unwrap(); - - // Drop sender to trigger service shutdown - drop(challenge_tx); - - // Give the service time to process and shut down - tokio::time::sleep(tokio::time::Duration::from_millis(50)).await; - } } diff --git a/crates/database/src/database.rs b/crates/database/src/database.rs index 5e4e9fd1dd..8f729d9616 100644 --- a/crates/database/src/database.rs +++ b/crates/database/src/database.rs @@ -752,7 +752,6 @@ pub fn set_peer_id(tx: &T, peer_id: IrysPeerId) -> Result<(), Databa tx.put::(MetadataKey::PeerId, bytes.to_vec()) } -/// Store per-chunk KZG commitments for a data_root during V2 ingress proof generation. pub fn store_per_chunk_kzg_commitments( tx: &T, data_root: DataRoot, @@ -770,7 +769,6 @@ pub fn store_per_chunk_kzg_commitments( Ok(()) } -/// Retrieve a single per-chunk KZG commitment by data_root and chunk_index. pub fn get_per_chunk_kzg_commitment( tx: &T, data_root: DataRoot, diff --git a/crates/database/src/tables.rs b/crates/database/src/tables.rs index 096a6023bc..43f3d46cc4 100644 --- a/crates/database/src/tables.rs +++ b/crates/database/src/tables.rs @@ -214,8 +214,6 @@ table CachedChunks { type Value = CachedChunk; } -/// Per-chunk KZG commitments stored during V2 ingress proof generation. -/// Used for custody proof verification (KZG opening proofs). table PerChunkKzgCommitments { type Key = DataRoot; type Value = CompactPerChunkCommitment; diff --git a/crates/irys-reth/src/shadow_tx.rs b/crates/irys-reth/src/shadow_tx.rs index 30531160a0..616e33f939 100644 --- a/crates/irys-reth/src/shadow_tx.rs +++ b/crates/irys-reth/src/shadow_tx.rs @@ -588,7 +588,6 @@ impl BorshDeserialize for UnpledgeDebit { } } -/// Custody penalty: slash pledge deposit when miner fails custody challenge. #[derive( serde::Deserialize, serde::Serialize, @@ -602,11 +601,8 @@ impl BorshDeserialize for UnpledgeDebit { arbitrary::Arbitrary, )] pub struct CustodyPenaltyPacket { - /// Amount to deduct from the penalized miner. pub amount: U256, - /// Address of the penalized miner. pub target: Address, - /// Partition hash identifying which partition failed the custody challenge. pub partition_hash: FixedBytes<32>, } @@ -1044,27 +1040,6 @@ mod tests { assert_eq!(decoded, tx); } - #[test] - fn custody_penalty_fee_payer() { - let target = Address::repeat_byte(0x55); - let packet = TransactionPacket::CustodyPenalty(CustodyPenaltyPacket { - amount: U256::from(1_u64), - target, - partition_hash: FixedBytes::<32>::ZERO, - }); - assert_eq!(packet.fee_payer_address(), Some(target)); - } - - #[test] - fn custody_penalty_topic() { - let packet = TransactionPacket::CustodyPenalty(CustodyPenaltyPacket { - amount: U256::from(1_u64), - target: Address::ZERO, - partition_hash: FixedBytes::<32>::ZERO, - }); - assert_eq!(packet.topic(), keccak256("SHADOW_TX_CUSTODY_PENALTY")); - } - /// Test backward compatibility detection - old format without solution hash should fail #[test] fn reject_old_format_without_solution_hash() { diff --git a/crates/p2p/src/server.rs b/crates/p2p/src/server.rs index 563fb396d3..e4a18dc093 100644 --- a/crates/p2p/src/server.rs +++ b/crates/p2p/src/server.rs @@ -925,6 +925,12 @@ where proof_json: web::Json>, req: actix_web::HttpRequest, ) -> HttpResponse { + if !server.data_handler.config.consensus.enable_custody_proofs { + return HttpResponse::Ok().json(GossipResponse::<()>::Rejected( + RejectionReason::GossipDisabled, + )); + } + if !server.data_handler.sync_state.is_gossip_reception_enabled() { return HttpResponse::Ok().json(GossipResponse::<()>::Rejected( RejectionReason::GossipDisabled, @@ -946,9 +952,18 @@ where }; server.peer_list.set_is_online(&source_miner_address, true); + let cache_key = irys_types::GossipCacheKey::CustodyProof(v2_request.data.partition_hash); + if let Err(e) = server + .data_handler + .cache + .record_seen(source_peer_id, cache_key) + { + warn!(error = ?e, "Failed to record custody proof in gossip cache"); + } + debug!( partition.hash = %v2_request.data.partition_hash, - "Received custody proof via gossip (handler stub)", + "Received custody proof via gossip", ); HttpResponse::Ok().json(GossipResponse::Accepted(())) diff --git a/crates/types/src/config/consensus.rs b/crates/types/src/config/consensus.rs index c40e795ae8..06b94aac97 100644 --- a/crates/types/src/config/consensus.rs +++ b/crates/types/src/config/consensus.rs @@ -121,37 +121,28 @@ pub struct ConsensusConfig { #[serde(default)] pub enable_shadow_kzg_logging: bool, - /// Generate V2 (KZG-based) ingress proofs instead of V1 (SHA256 merkle only). - /// When enabled, new ingress proofs include a KZG commitment and composite - /// commitment binding the data to the signer's address. + /// Use V2 proofs for new transactions. #[serde(default)] pub use_kzg_ingress_proofs: bool, - /// Accept V2 (KZG-based) ingress proofs from peers. When false, V2 proofs - /// received via gossip or in blocks are rejected. Must be true when - /// `use_kzg_ingress_proofs` is true. + /// Accept V2 proofs from peers. #[serde(default)] pub accept_kzg_ingress_proofs: bool, - /// Require V2 (KZG-based) ingress proofs exclusively. When true, V1 proofs - /// are rejected. Implies `accept_kzg_ingress_proofs = true`. + /// Reject V1 proofs. Implies `accept_kzg_ingress_proofs`. #[serde(default)] pub require_kzg_ingress_proofs: bool, - /// Enable EIP-4844 blob transaction support. When false, blob transactions - /// are rejected at the txpool, EVM execution, and block validation layers. #[serde(default)] pub enable_blobs: bool, - /// Enable custody proofs via KZG opening. Requires `accept_kzg_ingress_proofs`. + /// Requires `accept_kzg_ingress_proofs`. #[serde(default)] pub enable_custody_proofs: bool, - /// Number of chunks challenged per custody proof (K). Default: 20. #[serde(default = "default_custody_challenge_count")] pub custody_challenge_count: u32, - /// Number of blocks a miner has to respond to a custody challenge. Default: 10. #[serde(default = "default_custody_response_window")] pub custody_response_window: u64, @@ -504,29 +495,25 @@ impl ConsensusConfig { /// Enforce logical implications between KZG/blob config flags. /// Call before wrapping in `Arc` to fix contradictions early. pub fn normalize(&mut self) { - if self.enable_blobs && !self.accept_kzg_ingress_proofs { - tracing::warn!( - "enable_blobs=true requires accept_kzg_ingress_proofs=true, auto-enabling" - ); - self.accept_kzg_ingress_proofs = true; - } - if self.require_kzg_ingress_proofs && !self.accept_kzg_ingress_proofs { - tracing::warn!( - "require_kzg_ingress_proofs=true requires accept_kzg_ingress_proofs=true, auto-enabling" - ); - self.accept_kzg_ingress_proofs = true; - } - if self.use_kzg_ingress_proofs && !self.accept_kzg_ingress_proofs { - tracing::warn!( - "use_kzg_ingress_proofs=true requires accept_kzg_ingress_proofs=true, auto-enabling" - ); - self.accept_kzg_ingress_proofs = true; - } - if self.enable_custody_proofs && !self.accept_kzg_ingress_proofs { - tracing::warn!( - "enable_custody_proofs=true requires accept_kzg_ingress_proofs=true, auto-enabling" - ); - self.accept_kzg_ingress_proofs = true; + for flag_name in [ + "enable_blobs", + "require_kzg_ingress_proofs", + "use_kzg_ingress_proofs", + "enable_custody_proofs", + ] { + let flag_set = match flag_name { + "enable_blobs" => self.enable_blobs, + "require_kzg_ingress_proofs" => self.require_kzg_ingress_proofs, + "use_kzg_ingress_proofs" => self.use_kzg_ingress_proofs, + "enable_custody_proofs" => self.enable_custody_proofs, + _ => unreachable!(), + }; + if flag_set && !self.accept_kzg_ingress_proofs { + tracing::warn!( + "{flag_name}=true requires accept_kzg_ingress_proofs=true, auto-enabling" + ); + self.accept_kzg_ingress_proofs = true; + } } } diff --git a/crates/types/src/config/mod.rs b/crates/types/src/config/mod.rs index 477a33b9d8..7fa9c555f6 100644 --- a/crates/types/src/config/mod.rs +++ b/crates/types/src/config/mod.rs @@ -147,17 +147,31 @@ impl Config { "mempool.max_pending_chunk_items must be > 0 (a zero-capacity pending chunk cache would silently drop all pre-header chunks)" ); - if self.consensus.require_kzg_ingress_proofs && !self.consensus.accept_kzg_ingress_proofs { - bail!("require_kzg_ingress_proofs=true but accept_kzg_ingress_proofs=false — contradictory config"); - } - if self.consensus.enable_blobs && !self.consensus.accept_kzg_ingress_proofs { - bail!("enable_blobs=true but accept_kzg_ingress_proofs=false — blob V2 proofs would be rejected"); - } - if self.consensus.use_kzg_ingress_proofs && !self.consensus.accept_kzg_ingress_proofs { - bail!("use_kzg_ingress_proofs=true but accept_kzg_ingress_proofs=false — generated proofs would be rejected"); - } - if self.consensus.enable_custody_proofs && !self.consensus.accept_kzg_ingress_proofs { - bail!("enable_custody_proofs=true but accept_kzg_ingress_proofs=false — custody proofs require KZG commitments"); + for (flag_name, flag_set, reason) in [ + ( + "require_kzg_ingress_proofs", + self.consensus.require_kzg_ingress_proofs, + "contradictory config", + ), + ( + "enable_blobs", + self.consensus.enable_blobs, + "blob V2 proofs would be rejected", + ), + ( + "use_kzg_ingress_proofs", + self.consensus.use_kzg_ingress_proofs, + "generated proofs would be rejected", + ), + ( + "enable_custody_proofs", + self.consensus.enable_custody_proofs, + "custody proofs require KZG commitments", + ), + ] { + if flag_set && !self.consensus.accept_kzg_ingress_proofs { + bail!("{flag_name}=true but accept_kzg_ingress_proofs=false — {reason}"); + } } Ok(()) diff --git a/crates/types/src/custody.rs b/crates/types/src/custody.rs index 4fef2f7357..1a7d8e551b 100644 --- a/crates/types/src/custody.rs +++ b/crates/types/src/custody.rs @@ -5,7 +5,6 @@ use c_kzg::KzgSettings; use openssl::sha; use serde::{Deserialize, Serialize}; -/// A custody challenge targeting a specific miner's partition. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct CustodyChallenge { pub challenged_miner: IrysAddress, @@ -14,24 +13,16 @@ pub struct CustodyChallenge { pub challenge_block_height: u64, } -/// A single KZG opening for one challenged chunk. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct CustodyOpening { - /// Partition-relative chunk offset pub chunk_offset: u32, - /// Which data_root owns this chunk pub data_root: H256, - /// Chunk index within the data_root's transaction pub tx_chunk_index: u32, - /// Evaluation point z (32-byte BLS12-381 scalar) pub evaluation_point: FixedBytes, - /// Evaluation value y = p(z) (32-byte BLS12-381 scalar) pub evaluation_value: FixedBytes, - /// Opening proof pi (48-byte compressed G1 point) pub opening_proof: FixedBytes, } -/// A custody proof responding to a challenge. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct CustodyProof { pub challenged_miner: IrysAddress, @@ -40,17 +31,15 @@ pub struct CustodyProof { pub openings: Vec, } -/// Result of verifying a custody proof. #[derive(Debug, Clone, PartialEq, Eq)] pub enum CustodyVerificationResult { Valid, InvalidOpeningCount { expected: u32, got: u32 }, + InvalidOffset { chunk_offset: u32, expected: u32 }, MissingCommitment { data_root: H256, chunk_index: u32 }, InvalidProof { chunk_offset: u32 }, } -/// Derive challenge seed from VDF output and partition hash. -/// /// `challenge_seed = SHA256(vdf_output || partition_hash)` pub fn derive_challenge_seed(vdf_output: &[u8; 32], partition_hash: &H256) -> H256 { let mut hasher = sha::Sha256::new(); @@ -59,14 +48,16 @@ pub fn derive_challenge_seed(vdf_output: &[u8; 32], partition_hash: &H256) -> H2 H256(hasher.finish()) } -/// Select K challenged chunk offsets from a challenge seed. -/// /// `offset_j = hash_to_u64(SHA256(challenge_seed || j_le)) % num_chunks` pub fn select_challenged_offsets( challenge_seed: &H256, k: u32, num_chunks_in_partition: u64, -) -> Vec { +) -> eyre::Result> { + eyre::ensure!( + num_chunks_in_partition <= u64::from(u32::MAX), + "num_chunks_in_partition ({num_chunks_in_partition}) exceeds u32 range" + ); (0..k) .map(|j| { let mut hasher = sha::Sha256::new(); @@ -74,21 +65,14 @@ pub fn select_challenged_offsets( hasher.update(&j.to_le_bytes()); let hash = hasher.finish(); - // Interpret first 8 bytes as little-endian u64 - let val = u64::from_le_bytes( - hash[..8] - .try_into() - .expect("SHA256 output is at least 8 bytes"), - ); - // Safe truncation: partition offsets fit in u32 - u32::try_from(val % num_chunks_in_partition) - .expect("num_chunks_in_partition fits in u32 range for offset") + let val = u64::from_le_bytes([ + hash[0], hash[1], hash[2], hash[3], hash[4], hash[5], hash[6], hash[7], + ]); + Ok(u32::try_from(val % num_chunks_in_partition)?) }) .collect() } -/// Verify all openings in a custody proof against stored per-chunk commitments. -/// /// `get_commitment` retrieves the KZG commitment for a given (data_root, chunk_index) /// from the database. Returns `Ok(None)` if the commitment is not found. pub fn verify_custody_proof( @@ -96,6 +80,7 @@ pub fn verify_custody_proof( get_commitment: impl Fn(H256, u32) -> eyre::Result>, kzg_settings: &KzgSettings, expected_challenge_count: u32, + num_chunks_in_partition: u64, ) -> eyre::Result { let got = u32::try_from(proof.openings.len()) .map_err(|_| eyre::eyre!("opening count exceeds u32"))?; @@ -106,7 +91,20 @@ pub fn verify_custody_proof( }); } - for opening in &proof.openings { + let expected_offsets = select_challenged_offsets( + &proof.challenge_seed, + expected_challenge_count, + num_chunks_in_partition, + )?; + + for (opening, &expected_offset) in proof.openings.iter().zip(expected_offsets.iter()) { + if opening.chunk_offset != expected_offset { + return Ok(CustodyVerificationResult::InvalidOffset { + chunk_offset: opening.chunk_offset, + expected: expected_offset, + }); + } + let commitment = match get_commitment(opening.data_root, opening.tx_chunk_index)? { Some(c) => c, None => { @@ -143,49 +141,47 @@ mod tests { derive_challenge_point, CHUNK_SIZE_FOR_KZG, COMMITMENT_SIZE, }; - #[test] - fn derive_challenge_seed_deterministic() { - let vdf = [42_u8; 32]; - let partition = H256::from([7_u8; 32]); - let s1 = derive_challenge_seed(&vdf, &partition); - let s2 = derive_challenge_seed(&vdf, &partition); - assert_eq!(s1, s2); - } + const TEST_NUM_CHUNKS: u64 = 1000; - #[test] - fn derive_challenge_seed_different_inputs() { - let vdf_a = [1_u8; 32]; - let vdf_b = [2_u8; 32]; - let partition = H256::from([7_u8; 32]); - let s1 = derive_challenge_seed(&vdf_a, &partition); - let s2 = derive_challenge_seed(&vdf_b, &partition); - assert_ne!(s1, s2); + fn test_proof(challenge_seed: H256, openings: Vec) -> CustodyProof { + CustodyProof { + challenged_miner: IrysAddress::from([0xAA_u8; 20]), + partition_hash: H256::from([0xBB_u8; 32]), + challenge_seed, + openings, + } } #[test] fn select_challenged_offsets_returns_k() { let seed = H256::from([42_u8; 32]); - let offsets = select_challenged_offsets(&seed, 20, 1000); + let offsets = select_challenged_offsets(&seed, 20, TEST_NUM_CHUNKS).unwrap(); assert_eq!(offsets.len(), 20); } #[test] fn select_challenged_offsets_within_bounds() { let seed = H256::from([42_u8; 32]); - let num_chunks = 500_u64; - let offsets = select_challenged_offsets(&seed, 20, num_chunks); + let offsets = select_challenged_offsets(&seed, 20, 500).unwrap(); for &offset in &offsets { - assert!(u64::from(offset) < num_chunks); + assert!(u64::from(offset) < 500); } } #[test] fn select_challenged_offsets_different_seeds() { - let offsets_a = select_challenged_offsets(&H256::from([1_u8; 32]), 20, 10_000); - let offsets_b = select_challenged_offsets(&H256::from([2_u8; 32]), 20, 10_000); + let offsets_a = select_challenged_offsets(&H256::from([1_u8; 32]), 20, 10_000).unwrap(); + let offsets_b = select_challenged_offsets(&H256::from([2_u8; 32]), 20, 10_000).unwrap(); assert_ne!(offsets_a, offsets_b); } + #[test] + fn select_challenged_offsets_rejects_oversized_partition() { + let seed = H256::from([42_u8; 32]); + let result = select_challenged_offsets(&seed, 1, u64::from(u32::MAX) + 1); + assert!(result.is_err()); + } + #[test] fn verify_custody_proof_roundtrip() { let settings = default_kzg_settings(); @@ -196,7 +192,9 @@ mod tests { ); let challenge_seed = H256::from([99_u8; 32]); - let chunk_offset = 5_u32; + let expected_offsets = + select_challenged_offsets(&challenge_seed, 1, TEST_NUM_CHUNKS).unwrap(); + let chunk_offset = expected_offsets[0]; let z = derive_challenge_point(&challenge_seed, chunk_offset); let (proof_bytes, y_bytes) = compute_chunk_opening_proof(&chunk_data, &z, settings).unwrap(); @@ -210,19 +208,12 @@ mod tests { opening_proof: FixedBytes::from(proof_bytes), }; - let proof = CustodyProof { - challenged_miner: IrysAddress::from([0xAA_u8; 20]), - partition_hash: H256::from([0xBB_u8; 32]), - challenge_seed, - openings: vec![opening], - }; - - // clone: commitment_bytes is Copy but stored for closure capture let result = verify_custody_proof( - &proof, + &test_proof(challenge_seed, vec![opening]), |_data_root, _chunk_index| Ok(Some(commitment_bytes)), settings, 1, + TEST_NUM_CHUNKS, ) .unwrap(); @@ -239,10 +230,11 @@ mod tests { ); let challenge_seed = H256::from([99_u8; 32]); - let chunk_offset = 5_u32; + let expected_offsets = + select_challenged_offsets(&challenge_seed, 1, TEST_NUM_CHUNKS).unwrap(); + let chunk_offset = expected_offsets[0]; let z = derive_challenge_point(&challenge_seed, chunk_offset); - // Generate proof for different data let bad_data = vec![7_u8; CHUNK_SIZE_FOR_KZG]; let (bad_proof, bad_y) = compute_chunk_opening_proof(&bad_data, &z, settings).unwrap(); @@ -255,24 +247,53 @@ mod tests { opening_proof: FixedBytes::from(bad_proof), }; - let proof = CustodyProof { - challenged_miner: IrysAddress::from([0xAA_u8; 20]), - partition_hash: H256::from([0xBB_u8; 32]), - challenge_seed, - openings: vec![opening], + let result = verify_custody_proof( + &test_proof(challenge_seed, vec![opening]), + |_data_root, _chunk_index| Ok(Some(commitment_bytes)), + settings, + 1, + TEST_NUM_CHUNKS, + ) + .unwrap(); + + assert_eq!( + result, + CustodyVerificationResult::InvalidProof { chunk_offset } + ); + } + + #[test] + fn verify_custody_proof_wrong_offset_fails() { + let settings = default_kzg_settings(); + let challenge_seed = H256::from([99_u8; 32]); + let expected_offsets = + select_challenged_offsets(&challenge_seed, 1, TEST_NUM_CHUNKS).unwrap(); + + let wrong_offset = expected_offsets[0].wrapping_add(1); + let opening = CustodyOpening { + chunk_offset: wrong_offset, + data_root: H256::from([1_u8; 32]), + tx_chunk_index: 0, + evaluation_point: FixedBytes::ZERO, + evaluation_value: FixedBytes::ZERO, + opening_proof: FixedBytes::ZERO, }; let result = verify_custody_proof( - &proof, - |_data_root, _chunk_index| Ok(Some(commitment_bytes)), + &test_proof(challenge_seed, vec![opening]), + |_dr, _ci| Ok(Some(KzgCommitmentBytes::from([0_u8; COMMITMENT_SIZE]))), settings, 1, + TEST_NUM_CHUNKS, ) .unwrap(); assert_eq!( result, - CustodyVerificationResult::InvalidProof { chunk_offset: 5 } + CustodyVerificationResult::InvalidOffset { + chunk_offset: wrong_offset, + expected: expected_offsets[0], + } ); } @@ -280,10 +301,12 @@ mod tests { fn verify_custody_proof_missing_commitment() { let settings = default_kzg_settings(); let challenge_seed = H256::from([99_u8; 32]); + let expected_offsets = + select_challenged_offsets(&challenge_seed, 1, TEST_NUM_CHUNKS).unwrap(); let data_root = H256::from([1_u8; 32]); let opening = CustodyOpening { - chunk_offset: 5, + chunk_offset: expected_offsets[0], data_root, tx_chunk_index: 0, evaluation_point: FixedBytes::ZERO, @@ -291,14 +314,14 @@ mod tests { opening_proof: FixedBytes::ZERO, }; - let proof = CustodyProof { - challenged_miner: IrysAddress::from([0xAA_u8; 20]), - partition_hash: H256::from([0xBB_u8; 32]), - challenge_seed, - openings: vec![opening], - }; - - let result = verify_custody_proof(&proof, |_dr, _ci| Ok(None), settings, 1).unwrap(); + let result = verify_custody_proof( + &test_proof(challenge_seed, vec![opening]), + |_dr, _ci| Ok(None), + settings, + 1, + TEST_NUM_CHUNKS, + ) + .unwrap(); assert_eq!( result, @@ -313,14 +336,14 @@ mod tests { fn verify_custody_proof_wrong_opening_count() { let settings = default_kzg_settings(); - let proof = CustodyProof { - challenged_miner: IrysAddress::from([0xAA_u8; 20]), - partition_hash: H256::from([0xBB_u8; 32]), - challenge_seed: H256::from([99_u8; 32]), - openings: vec![], - }; - - let result = verify_custody_proof(&proof, |_dr, _ci| Ok(None), settings, 5).unwrap(); + let result = verify_custody_proof( + &test_proof(H256::from([99_u8; 32]), vec![]), + |_dr, _ci| Ok(None), + settings, + 5, + TEST_NUM_CHUNKS, + ) + .unwrap(); assert_eq!( result, diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index 9bee0a7304..38d0ad697e 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -120,7 +120,6 @@ impl IngressProof { } } - /// Check if this proof version is accepted by the given config flags. pub fn check_version_accepted( &self, accept_kzg: bool, @@ -133,8 +132,7 @@ impl IngressProof { } } - /// Returns the V1 merkle proof hash, or V2 composite commitment. - /// Used as a unique proof identifier (e.g. for gossip deduplication). + /// Unique identifier for gossip deduplication. pub fn proof_id(&self) -> H256 { match self { Self::V1(v1) => v1.proof, @@ -393,11 +391,7 @@ pub fn generate_ingress_proof>( Ok(proof) } -/// Generate a V2 ingress proof with KZG commitment for native Irys data. -/// -/// Unlike V1 which only hashes chunks into a merkle tree, V2 also computes -/// a KZG commitment over the chunk data and binds it to the signer's address -/// via a composite commitment. +/// Generates KZG commitment over chunks and binds it to signer via composite commitment. pub fn generate_ingress_proof_v2( signer: &IrysSigner, data_root: DataRoot, @@ -561,7 +555,6 @@ pub fn verify_ingress_proof>( return Ok(false); } - // Verify data_root matches the merkle root of the provided chunks let (_, regular_leaves) = generate_ingress_leaves( chunks_vec.iter().map(|c| Ok(c.as_ref())), recovered_address, diff --git a/crates/types/src/kzg.rs b/crates/types/src/kzg.rs index 211c68384f..7e2c50eb8f 100644 --- a/crates/types/src/kzg.rs +++ b/crates/types/src/kzg.rs @@ -13,10 +13,6 @@ pub const PROOF_SIZE: usize = 48; pub const SCALAR_SIZE: usize = 32; pub const DOMAIN_SEPARATOR: &[u8] = b"IRYS_KZG_INGRESS_V1"; -/// A 48-byte KZG commitment (compressed BLS12-381 G1 point). -/// -/// Newtype wrapper around `[u8; 48]` providing the trait implementations -/// that raw arrays lack (serde for N>32, Default for N>32, Compact). #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct KzgCommitmentBytes(pub [u8; COMMITMENT_SIZE]); @@ -131,8 +127,6 @@ impl alloy_rlp::Decodable for KzgCommitmentBytes { } } -/// A single chunk's KZG commitment stored during ingress. -/// Maps (data_root, chunk_index) → KzgCommitmentBytes in the database. #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, Compact)] pub struct PerChunkCommitment { pub chunk_index: u32, @@ -148,8 +142,6 @@ impl arbitrary::Arbitrary<'_> for PerChunkCommitment { } } -/// Returns a reference to the default (Ethereum mainnet) trusted setup KZG settings. -/// Lazily initialized on first call, thread-safe. pub fn default_kzg_settings() -> &'static KzgSettings { EnvKzgSettings::Default.get() } @@ -168,79 +160,31 @@ pub fn compute_blob_commitment( .map_err(|e| eyre::eyre!("KZG blob commitment failed: {e}")) } -/// Aggregate two G1 commitments: C = C1 + r·C2 where r = SHA256(C1 || C2) -/// interpreted as a BLS12-381 scalar. -/// -/// Uses the `blst` library (transitive dependency via `c-kzg`) for elliptic -/// curve point operations on BLS12-381 G1. +/// Aggregate two G1 commitments: C = C1 + r·C2 where r = SHA256(C1 || C2). pub fn aggregate_commitments( c1: &KzgCommitment, c2: &KzgCommitment, ) -> eyre::Result { - use blst::min_pk::PublicKey; - use blst::{blst_p1, blst_p1_affine, blst_scalar}; - let mut hasher = sha::Sha256::new(); hasher.update(c1.as_ref()); hasher.update(c2.as_ref()); let r_bytes = hasher.finish(); - let mut r_scalar = blst_scalar::default(); - // SAFETY: `r_bytes` is a 32-byte SHA256 digest; `blst_scalar_from_bendian` reads - // exactly 32 bytes from the pointer, which is within bounds. - unsafe { - blst::blst_scalar_from_bendian(&mut r_scalar, r_bytes.as_ptr()); - } - - let p1 = PublicKey::from_bytes(c1.as_ref()) - .map_err(|e| eyre::eyre!("failed to decompress C1: {e:?}"))?; - let p2 = PublicKey::from_bytes(c2.as_ref()) - .map_err(|e| eyre::eyre!("failed to decompress C2: {e:?}"))?; - - let p1_affine: &blst_p1_affine = (&p1).into(); - let p2_affine: &blst_p1_affine = (&p2).into(); - - let mut p2_proj = blst_p1::default(); - let mut r_c2 = blst_p1::default(); - // SAFETY: All blst_p1 types are initialized via `default()`. `blst_p1_from_affine` - // converts a valid affine point (from `PublicKey::from_bytes` which validated the - // curve point) to projective form. `blst_p1_mult` multiplies a valid projective - // point by a 256-bit scalar — both inputs are well-formed. - unsafe { - blst::blst_p1_from_affine(&mut p2_proj, p2_affine); - blst::blst_p1_mult(&mut r_c2, &p2_proj, r_scalar.b.as_ptr(), 256); - } - - let mut result = blst_p1::default(); - // SAFETY: `c1_proj` is initialised from a validated affine point. `r_c2` is the - // result of a valid scalar multiplication. `blst_p1_add` adds two projective points. - unsafe { - let mut c1_proj = blst_p1::default(); - blst::blst_p1_from_affine(&mut c1_proj, p1_affine); - blst::blst_p1_add(&mut result, &c1_proj, &r_c2); - } - - let mut compressed = [0_u8; COMMITMENT_SIZE]; - // SAFETY: `result` is a valid projective G1 point from the addition above. - // `compressed` is a 48-byte buffer matching the compressed G1 point size. - unsafe { - blst::blst_p1_compress(compressed.as_mut_ptr(), &result); - } + let c1_bytes: &[u8; COMMITMENT_SIZE] = c1 + .as_ref() + .try_into() + .map_err(|_| eyre::eyre!("commitment size mismatch"))?; + let c2_bytes: &[u8; COMMITMENT_SIZE] = c2 + .as_ref() + .try_into() + .map_err(|_| eyre::eyre!("commitment size mismatch"))?; + let compressed = g1_add_scaled(c1_bytes, c2_bytes, &r_bytes)?; Ok(KzgCommitment::from(compressed)) } -/// Compute the aggregated KZG commitment for a 256KB native Irys chunk. -/// -/// Splits the chunk into two 128KB halves, commits each half as a separate -/// blob, then aggregates: C = C1 + r·C2 where r = SHA256(C1 || C2). -/// -/// If `chunk_data` is shorter than [`CHUNK_SIZE_FOR_KZG`], it is zero-padded. -/// If it is longer, returns an error. -pub fn compute_chunk_commitment( - chunk_data: &[u8], - settings: &KzgSettings, -) -> eyre::Result { +/// Zero-pad chunk data and split into two `BLOB_SIZE` halves. +fn pad_and_split_chunk(chunk_data: &[u8]) -> eyre::Result<([u8; BLOB_SIZE], [u8; BLOB_SIZE])> { if chunk_data.len() > CHUNK_SIZE_FOR_KZG { return Err(eyre::eyre!( "chunk data too large: {} bytes (max {})", @@ -252,17 +196,32 @@ pub fn compute_chunk_commitment( let mut padded = [0_u8; CHUNK_SIZE_FOR_KZG]; padded[..chunk_data.len()].copy_from_slice(chunk_data); - let (first_half, second_half) = padded.split_at(BLOB_SIZE); - let first_half: &[u8; BLOB_SIZE] = first_half + // split_at at BLOB_SIZE on a CHUNK_SIZE_FOR_KZG (2*BLOB_SIZE) array + // always yields two BLOB_SIZE slices, so try_into is infallible here. + let (first, second) = padded.split_at(BLOB_SIZE); + let first: [u8; BLOB_SIZE] = first .try_into() - .expect("split_at guarantees BLOB_SIZE"); - let second_half: &[u8; BLOB_SIZE] = second_half + .map_err(|_| eyre::eyre!("split invariant"))?; + let second: [u8; BLOB_SIZE] = second .try_into() - .expect("split_at guarantees BLOB_SIZE"); - - let c1 = compute_blob_commitment(first_half, settings)?; - let c2 = compute_blob_commitment(second_half, settings)?; + .map_err(|_| eyre::eyre!("split invariant"))?; + Ok((first, second)) +} +/// Compute the aggregated KZG commitment for a 256KB native Irys chunk. +/// +/// Splits the chunk into two 128KB halves, commits each half as a separate +/// blob, then aggregates: C = C1 + r·C2 where r = SHA256(C1 || C2). +/// +/// If `chunk_data` is shorter than [`CHUNK_SIZE_FOR_KZG`], it is zero-padded. +/// If it is longer, returns an error. +pub fn compute_chunk_commitment( + chunk_data: &[u8], + settings: &KzgSettings, +) -> eyre::Result { + let (first_half, second_half) = pad_and_split_chunk(chunk_data)?; + let c1 = compute_blob_commitment(&first_half, settings)?; + let c2 = compute_blob_commitment(&second_half, settings)?; aggregate_commitments(&c1, &c2) } @@ -301,17 +260,14 @@ pub fn compute_composite_commitment( H256(hasher.finish()) } -// --------------------------------------------------------------------------- -// BLS12-381 scalar field (Fr) arithmetic helpers -// --------------------------------------------------------------------------- +// SAFETY for all blst FFI calls in this module: All blst types are initialized via +// `default()` or `from_bytes()`. Buffer sizes are guaranteed by Rust's type system +// (fixed-size arrays). Affine points are validated by `PublicKey::from_bytes` before +// conversion to projective form. Scalars are read from exactly-sized byte arrays. -/// Convert 32 big-endian bytes into a BLS12-381 scalar field element. -/// The input is automatically reduced modulo the scalar field order. fn fr_from_bytes(bytes: &[u8; SCALAR_SIZE]) -> blst::blst_fr { let mut scalar = blst::blst_scalar::default(); let mut fr = blst::blst_fr::default(); - // SAFETY: `bytes` is exactly 32 bytes; `blst_scalar_from_bendian` reads 32 bytes - // from the pointer. `blst_fr_from_scalar` reduces modulo the field order. unsafe { blst::blst_scalar_from_bendian(&mut scalar, bytes.as_ptr()); blst::blst_fr_from_scalar(&mut fr, &scalar); @@ -319,12 +275,9 @@ fn fr_from_bytes(bytes: &[u8; SCALAR_SIZE]) -> blst::blst_fr { fr } -/// Convert a BLS12-381 scalar field element back to 32 big-endian bytes. fn fr_to_bytes(fr: &blst::blst_fr) -> [u8; SCALAR_SIZE] { let mut scalar = blst::blst_scalar::default(); let mut bytes = [0_u8; SCALAR_SIZE]; - // SAFETY: `blst_scalar_from_fr` writes a valid scalar from the field element. - // `blst_bendian_from_scalar` writes exactly 32 bytes. unsafe { blst::blst_scalar_from_fr(&mut scalar, fr); blst::blst_bendian_from_scalar(bytes.as_mut_ptr(), &scalar); @@ -332,26 +285,20 @@ fn fr_to_bytes(fr: &blst::blst_fr) -> [u8; SCALAR_SIZE] { bytes } -/// Add two BLS12-381 scalars (mod field order). Inputs/outputs are big-endian. pub fn bls_fr_add(a: &[u8; SCALAR_SIZE], b: &[u8; SCALAR_SIZE]) -> [u8; SCALAR_SIZE] { let fr_a = fr_from_bytes(a); let fr_b = fr_from_bytes(b); let mut result = blst::blst_fr::default(); - // SAFETY: All `blst_fr` values are initialized. `blst_fr_add` computes - // the modular sum of two valid field elements. unsafe { blst::blst_fr_add(&mut result, &fr_a, &fr_b); } fr_to_bytes(&result) } -/// Multiply two BLS12-381 scalars (mod field order). Inputs/outputs are big-endian. pub fn bls_fr_mul(a: &[u8; SCALAR_SIZE], b: &[u8; SCALAR_SIZE]) -> [u8; SCALAR_SIZE] { let fr_a = fr_from_bytes(a); let fr_b = fr_from_bytes(b); let mut result = blst::blst_fr::default(); - // SAFETY: Both `blst_fr` values are initialized. `blst_fr_mul` computes - // the modular product of two valid field elements. unsafe { blst::blst_fr_mul(&mut result, &fr_a, &fr_b); } @@ -359,9 +306,6 @@ pub fn bls_fr_mul(a: &[u8; SCALAR_SIZE], b: &[u8; SCALAR_SIZE]) -> [u8; SCALAR_S } /// Compute P1 + scalar·P2 for two compressed BLS12-381 G1 points. -/// -/// Both `p1_bytes` and `p2_bytes` are 48-byte compressed G1 points. -/// `scalar_bytes` is a 32-byte big-endian scalar. pub fn g1_add_scaled( p1_bytes: &[u8; PROOF_SIZE], p2_bytes: &[u8; PROOF_SIZE], @@ -371,7 +315,6 @@ pub fn g1_add_scaled( use blst::{blst_p1, blst_p1_affine, blst_scalar}; let mut r_scalar = blst_scalar::default(); - // SAFETY: `scalar_bytes` is exactly 32 bytes. unsafe { blst::blst_scalar_from_bendian(&mut r_scalar, scalar_bytes.as_ptr()); } @@ -386,17 +329,12 @@ pub fn g1_add_scaled( let mut p2_proj = blst_p1::default(); let mut r_p2 = blst_p1::default(); - // SAFETY: All blst_p1 types are initialized via `default()`. `blst_p1_from_affine` - // converts a validated affine point to projective. `blst_p1_mult` multiplies a - // valid projective point by a 256-bit scalar. unsafe { blst::blst_p1_from_affine(&mut p2_proj, p2_affine); blst::blst_p1_mult(&mut r_p2, &p2_proj, r_scalar.b.as_ptr(), 256); } let mut result = blst_p1::default(); - // SAFETY: Both projective points are valid (from validated affine points - // and scalar multiplication). `blst_p1_add` adds two projective points. unsafe { let mut p1_proj = blst_p1::default(); blst::blst_p1_from_affine(&mut p1_proj, p1_affine); @@ -404,8 +342,6 @@ pub fn g1_add_scaled( } let mut compressed = [0_u8; PROOF_SIZE]; - // SAFETY: `result` is a valid projective G1 point. `compressed` is a 48-byte - // buffer matching compressed G1 point size. unsafe { blst::blst_p1_compress(compressed.as_mut_ptr(), &result); } @@ -413,57 +349,28 @@ pub fn g1_add_scaled( Ok(compressed) } -// --------------------------------------------------------------------------- -// KZG opening proof functions -// --------------------------------------------------------------------------- - /// Compute a KZG opening proof for a 256KB chunk at evaluation point `z`. /// -/// Splits the chunk into two 128KB halves (same scheme as `compute_chunk_commitment`), -/// computes per-half KZG proofs, then aggregates: -/// - `π = π1 + r·π2` (G1 point addition) -/// - `y = y1 + r·y2` (scalar field addition) -/// where `r = SHA256(C1 || C2)` and `C1, C2` are the per-half commitments. -/// -/// Returns `(proof_bytes, evaluation_bytes)` = (π, y). +/// Aggregates per-half proofs: `π = π1 + r·π2`, `y = y1 + r·y2` +/// where `r = SHA256(C1 || C2)`. pub fn compute_chunk_opening_proof( chunk_data: &[u8], z_bytes: &[u8; SCALAR_SIZE], settings: &KzgSettings, ) -> eyre::Result<([u8; PROOF_SIZE], [u8; SCALAR_SIZE])> { - if chunk_data.len() > CHUNK_SIZE_FOR_KZG { - return Err(eyre::eyre!( - "chunk data too large: {} bytes (max {})", - chunk_data.len(), - CHUNK_SIZE_FOR_KZG - )); - } + let (first_half, second_half) = pad_and_split_chunk(chunk_data)?; - let mut padded = [0_u8; CHUNK_SIZE_FOR_KZG]; - padded[..chunk_data.len()].copy_from_slice(chunk_data); - - let (first_half, second_half) = padded.split_at(BLOB_SIZE); - let first_half: &[u8; BLOB_SIZE] = first_half - .try_into() - .expect("split_at guarantees BLOB_SIZE"); - let second_half: &[u8; BLOB_SIZE] = second_half - .try_into() - .expect("split_at guarantees BLOB_SIZE"); - - let blob1 = Blob::new(*first_half); - let blob2 = Blob::new(*second_half); + let blob1 = Blob::new(first_half); + let blob2 = Blob::new(second_half); - // Per-half commitments needed for aggregation scalar r - let c1 = compute_blob_commitment(first_half, settings)?; - let c2 = compute_blob_commitment(second_half, settings)?; + let c1 = compute_blob_commitment(&first_half, settings)?; + let c2 = compute_blob_commitment(&second_half, settings)?; - // r = SHA256(C1 || C2) — same derivation as aggregate_commitments let mut hasher = sha::Sha256::new(); hasher.update(c1.as_ref()); hasher.update(c2.as_ref()); let r_bytes = hasher.finish(); - // Compute KZG opening proofs for each half let z = c_kzg::Bytes32::new(*z_bytes); let (proof1, y1) = settings .compute_kzg_proof(&blob1, &z) @@ -472,12 +379,10 @@ pub fn compute_chunk_opening_proof( .compute_kzg_proof(&blob2, &z) .map_err(|e| eyre::eyre!("KZG proof computation failed for second half: {e}"))?; - // Aggregate proof: π = π1 + r·π2 let proof1_bytes: [u8; PROOF_SIZE] = *proof1.to_bytes().as_ref(); let proof2_bytes: [u8; PROOF_SIZE] = *proof2.to_bytes().as_ref(); let aggregated_proof = g1_add_scaled(&proof1_bytes, &proof2_bytes, &r_bytes)?; - // Aggregate evaluation: y = y1 + r·y2 let y1_bytes: [u8; SCALAR_SIZE] = *y1.as_ref(); let y2_bytes: [u8; SCALAR_SIZE] = *y2.as_ref(); let r_y2 = bls_fr_mul(&y2_bytes, &r_bytes); @@ -507,23 +412,12 @@ pub fn verify_chunk_opening_proof( .map_err(|e| eyre::eyre!("KZG proof verification failed: {e}")) } -// --------------------------------------------------------------------------- -// Challenge point derivation -// --------------------------------------------------------------------------- - -/// Derive a BLS12-381 field element from a challenge seed and chunk offset. -/// -/// Used as the evaluation point `z` for custody opening proofs. -/// Result is `SHA256(challenge_seed || chunk_offset_le)` reduced modulo -/// the BLS12-381 scalar field order. +/// `z = SHA256(challenge_seed || chunk_offset_le) mod BLS12-381_r` pub fn derive_challenge_point(challenge_seed: &H256, chunk_offset: u32) -> [u8; SCALAR_SIZE] { let mut hasher = sha::Sha256::new(); hasher.update(&challenge_seed.0); hasher.update(&chunk_offset.to_le_bytes()); - let hash = hasher.finish(); - - // Reduce modulo BLS12-381 scalar field order via blst - fr_to_bytes(&fr_from_bytes(&hash)) + fr_to_bytes(&fr_from_bytes(&hasher.finish())) } #[cfg(test)] @@ -535,8 +429,6 @@ mod tests { default_kzg_settings() } - /// Helper to compare KzgCommitment values by their byte representation, - /// since the c-kzg type doesn't implement PartialEq. fn commitment_bytes(c: &KzgCommitment) -> &[u8] { c.as_ref() } @@ -556,8 +448,6 @@ mod tests { #[test] fn zero_padded_blob_matches_single_commitment() { - // A chunk that fits in a single blob (≤128KB) should still produce a - // valid aggregated commitment. The second half is all zeros. let small_data = vec![99_u8; BLOB_SIZE]; let commitment = compute_chunk_commitment(&small_data, kzg_settings()).unwrap(); @@ -673,42 +563,6 @@ mod tests { } } - // -- BLS scalar field arithmetic ------------------------------------------- - - #[test] - fn bls_fr_add_identity() { - let zero = [0_u8; SCALAR_SIZE]; - let a = [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 42, - ]; - assert_eq!(bls_fr_add(&a, &zero), a); - } - - #[test] - fn bls_fr_mul_identity() { - let one = { - let mut b = [0_u8; SCALAR_SIZE]; - b[SCALAR_SIZE - 1] = 1; - b - }; - let a = [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 42, - ]; - assert_eq!(bls_fr_mul(&a, &one), a); - } - - #[test] - fn bls_fr_mul_zero() { - let zero = [0_u8; SCALAR_SIZE]; - let a = [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 42, - ]; - assert_eq!(bls_fr_mul(&a, &zero), zero); - } - #[test] fn g1_add_scaled_valid_points() { let data1 = [1_u8; BLOB_SIZE]; @@ -723,12 +577,9 @@ mod tests { s }; let result = g1_add_scaled(&p1, &p2, &scalar).unwrap(); - // p1 + 1*p2 should be a valid G1 point blst::min_pk::PublicKey::from_bytes(&result).expect("result should be a valid G1 point"); } - // -- Opening proof tests --------------------------------------------------- - #[test] fn opening_proof_wrong_data_fails() { let data = vec![42_u8; CHUNK_SIZE_FOR_KZG]; @@ -741,11 +592,9 @@ mod tests { let z = derive_challenge_point(&H256::from([1_u8; 32]), 0); let (_proof, _y) = compute_chunk_opening_proof(&data, &z, settings).unwrap(); - // Compute proof for different data let bad_data = vec![7_u8; CHUNK_SIZE_FOR_KZG]; let (bad_proof, bad_y) = compute_chunk_opening_proof(&bad_data, &z, settings).unwrap(); - // Verify with original commitment but bad proof/y — should fail let ok = verify_chunk_opening_proof(&commitment_bytes_val, &z, &bad_y, &bad_proof, settings) .unwrap(); @@ -771,38 +620,12 @@ mod tests { let z1 = derive_challenge_point(&H256::from([1_u8; 32]), 0); let (proof, y) = compute_chunk_opening_proof(&data, &z1, settings).unwrap(); - // Verify with a different z — should fail let z2 = derive_challenge_point(&H256::from([2_u8; 32]), 0); let ok = verify_chunk_opening_proof(&commitment_bytes_val, &z2, &y, &proof, settings).unwrap(); assert!(!ok); } - // -- Challenge point derivation tests -------------------------------------- - - #[test] - fn derive_challenge_point_deterministic() { - let seed = H256::from([42_u8; 32]); - let z1 = derive_challenge_point(&seed, 0); - let z2 = derive_challenge_point(&seed, 0); - assert_eq!(z1, z2); - } - - #[test] - fn derive_challenge_point_different_offsets() { - let seed = H256::from([42_u8; 32]); - let z0 = derive_challenge_point(&seed, 0); - let z1 = derive_challenge_point(&seed, 1); - assert_ne!(z0, z1); - } - - #[test] - fn derive_challenge_point_different_seeds() { - let z1 = derive_challenge_point(&H256::from([1_u8; 32]), 0); - let z2 = derive_challenge_point(&H256::from([2_u8; 32]), 0); - assert_ne!(z1, z2); - } - #[test] fn derive_challenge_point_valid_field_element() { // BLS12-381 scalar field order (big-endian) From 6c2fd81871d3788a0ffb747e746ed1c4a08f9894 Mon Sep 17 00:00:00 2001 From: jason Date: Fri, 27 Feb 2026 14:03:05 +0000 Subject: [PATCH 12/13] feat(custody): wire custody proof challenge-response loop and fix KZG stack overflow --- crates/actors/src/blob_extraction_service.rs | 18 +- crates/actors/src/block_discovery.rs | 1 + crates/actors/src/block_producer.rs | 19 +- crates/actors/src/block_tree_service.rs | 15 ++ crates/actors/src/block_validation.rs | 49 ++++- .../chunk_ingress_service/ingress_proofs.rs | 187 ++++++------------ crates/actors/src/custody_proof_service.rs | 126 +++++++++++- crates/actors/src/mempool_service.rs | 30 ++- .../block_validation_task.rs | 33 +++- crates/chain/src/chain.rs | 1 + crates/p2p/src/block_pool.rs | 142 +++++++++++++ crates/p2p/src/cache.rs | 7 + crates/p2p/src/gossip_client.rs | 1 + crates/p2p/src/gossip_data_handler.rs | 3 + crates/p2p/src/gossip_service.rs | 2 + crates/p2p/src/server.rs | 24 ++- crates/p2p/src/tests/util.rs | 7 + crates/types/src/block.rs | 5 + crates/types/src/config/consensus.rs | 23 +-- crates/types/src/ingress.rs | 64 +++--- crates/types/src/kzg.rs | 73 +++++-- 21 files changed, 627 insertions(+), 203 deletions(-) diff --git a/crates/actors/src/blob_extraction_service.rs b/crates/actors/src/blob_extraction_service.rs index 10ca0c54bc..574c303d62 100644 --- a/crates/actors/src/blob_extraction_service.rs +++ b/crates/actors/src/blob_extraction_service.rs @@ -93,11 +93,17 @@ impl BlobExtractionService { } }; - for (blob_idx, blob) in sidecar.blobs.iter().enumerate() { + eyre::ensure!( + sidecar.commitments.len() == sidecar.blobs.len(), + "sidecar commitment count ({}) != blob count ({})", + sidecar.commitments.len(), + sidecar.blobs.len(), + ); + for (blob, commitment) in sidecar.blobs.iter().zip(sidecar.commitments.iter()) { self.process_single_blob( &signer, blob.as_ref(), - sidecar.commitments[blob_idx].as_ref(), + commitment.as_ref(), chain_id, anchor, )?; @@ -126,6 +132,7 @@ impl BlobExtractionService { anchor: H256, ) -> eyre::Result<()> { use irys_types::ingress::generate_ingress_proof_v2_from_blob; + use irys_types::kzg::KzgCommitmentBytes; let proof = generate_ingress_proof_v2_from_blob( signer, @@ -137,6 +144,9 @@ impl BlobExtractionService { let data_root = proof.data_root(); + // Blob is a single chunk (index 0) — store its KZG commitment for custody verification + let per_chunk_commitments = vec![(0_u32, KzgCommitmentBytes::from(*commitment_bytes))]; + let chunk_size = u64::try_from(irys_types::kzg::CHUNK_SIZE_FOR_KZG) .map_err(|_| eyre::eyre!("chunk size overflow"))?; @@ -160,8 +170,7 @@ impl BlobExtractionService { }, ); - let mut chunk_data = vec![0_u8; irys_types::kzg::CHUNK_SIZE_FOR_KZG]; - chunk_data[..blob_data.len()].copy_from_slice(blob_data); + let chunk_data = irys_types::kzg::zero_pad_to_chunk_size(blob_data)?; if let Err(e) = self .mempool_sender @@ -169,6 +178,7 @@ impl BlobExtractionService { tx_header, ingress_proof: proof, chunk_data, + per_chunk_commitments, }) { warn!(data_root = %data_root, error = %e, "Failed to send blob-derived tx to mempool"); diff --git a/crates/actors/src/block_discovery.rs b/crates/actors/src/block_discovery.rs index 535a36ae43..8519d6c7cf 100644 --- a/crates/actors/src/block_discovery.rs +++ b/crates/actors/src/block_discovery.rs @@ -960,6 +960,7 @@ pub async fn build_block_body_for_processed_block_header( block_hash: block_header.block_hash, data_transactions: data_txs, commitment_transactions: commitment_txs, + custody_proofs: Vec::new(), }; Ok(block_body) diff --git a/crates/actors/src/block_producer.rs b/crates/actors/src/block_producer.rs index c8e51dda4f..e2926e91c0 100644 --- a/crates/actors/src/block_producer.rs +++ b/crates/actors/src/block_producer.rs @@ -1198,7 +1198,23 @@ pub trait BlockProdStrategy { let block_signer = self.inner().config.irys_signer(); block_signer.sign_block_header(&mut irys_block)?; - // Build BlockTransactions from the mempool bundle + let custody_proofs = if self.inner().config.consensus.enable_custody_proofs { + let (tx, rx) = oneshot::channel(); + if let Err(e) = self + .inner() + .service_senders + .custody_proof + .send(crate::custody_proof_service::CustodyProofMessage::TakePendingProofs(tx)) + { + warn!(error = %e, "Failed to request pending custody proofs"); + Vec::new() + } else { + rx.await.unwrap_or_default() + } + } else { + Vec::new() + }; + let mut all_data_txs = Vec::new(); all_data_txs.extend(mempool_bundle.submit_txs); all_data_txs.extend(mempool_bundle.publish_txs.txs); @@ -1207,6 +1223,7 @@ pub trait BlockProdStrategy { block_hash: irys_block.block_hash, commitment_transactions: mempool_bundle.commitment_txs, data_transactions: all_data_txs, + custody_proofs, }; let sealed_block = IrysSealedBlock::new(irys_block, block_body)?; diff --git a/crates/actors/src/block_tree_service.rs b/crates/actors/src/block_tree_service.rs index e70068ec7f..0f7a5aef74 100644 --- a/crates/actors/src/block_tree_service.rs +++ b/crates/actors/src/block_tree_service.rs @@ -795,6 +795,21 @@ impl BlockTreeServiceInner { ); } + if state == ChainState::Onchain && self.config.consensus.enable_custody_proofs { + let msg = crate::custody_proof_service::CustodyProofMessage::NewBlock { + vdf_output: arc_block.vdf_limiter_info.output, + block_height: height, + }; + if let Err(e) = self.service_senders.custody_proof.send(msg) { + tracing::warn!( + block.hash = ?block_hash, + block.height = height, + error = %e, + "Failed to send custody proof new block trigger", + ); + } + } + Ok(()) } diff --git a/crates/actors/src/block_validation.rs b/crates/actors/src/block_validation.rs index 6c90b50354..982bf38b74 100644 --- a/crates/actors/src/block_validation.rs +++ b/crates/actors/src/block_validation.rs @@ -3065,8 +3065,6 @@ fn get_submit_ledger_slot_addresses( /// Verify custody proofs included in a block. /// /// Returns `Ok(())` if all proofs are valid or custody proofs are disabled. -/// This function is not yet wired into `validate_block()` — that happens once -/// blocks carry custody proofs via gossip (Phase 3). pub fn validate_custody_proofs( custody_proofs: &[irys_types::custody::CustodyProof], consensus_config: &ConsensusConfig, @@ -3131,6 +3129,53 @@ pub fn validate_custody_proofs( Ok(()) } +/// Store per-chunk KZG commitments extracted from V2 EvmBlob ingress proofs. +/// +/// For blob-derived data (single chunk), the per-chunk commitment at index 0 +/// equals the blob's KZG commitment from the ingress proof. This ensures +/// custody proof verification can find the commitment for peer-received blocks. +pub fn store_blob_ingress_commitments( + block: &IrysBlockHeader, + db: &DatabaseProvider, +) -> eyre::Result<()> { + use irys_types::ingress::DataSourceType; + use irys_types::kzg::KzgCommitmentBytes; + + let mut to_store: Vec<(H256, KzgCommitmentBytes)> = Vec::new(); + + for ledger in &block.data_ledgers { + let proofs = match &ledger.proofs { + Some(p) => &p.0, + None => continue, + }; + for proof in proofs { + if let IngressProof::V2(v2) = proof { + if v2.source_type == DataSourceType::EvmBlob { + to_store.push((v2.data_root, v2.kzg_commitment)); + } + } + } + } + + if to_store.is_empty() { + return Ok(()); + } + + db.update(|rw_tx| { + for (data_root, commitment) in &to_store { + irys_database::store_per_chunk_kzg_commitments(rw_tx, *data_root, &[(0, *commitment)])?; + } + Ok::<(), eyre::Report>(()) + })??; + + tracing::debug!( + count = to_store.len(), + "Stored per-chunk KZG commitments from blob ingress proofs", + ); + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs index e8d032bfd8..ff7e15ea70 100644 --- a/crates/actors/src/chunk_ingress_service/ingress_proofs.rs +++ b/crates/actors/src/chunk_ingress_service/ingress_proofs.rs @@ -75,13 +75,11 @@ impl ChunkIngressServiceInner { ) .map_err(|msg| IngressProofError::RejectedVersion(msg.into()))?; - // Validate the proofs signature and basic details let data_root_val = ingress_proof.data_root(); let address = ingress_proof .pre_validate(&data_root_val) .map_err(|_| IngressProofError::InvalidSignature)?; - // Reject proofs from addresses not staked or pending stake (spam protection) let block_tree = self.block_tree_read_guard.read(); let epoch_snapshot = block_tree.canonical_epoch_snapshot(); let commitment_snapshot = block_tree.canonical_commitment_snapshot(); @@ -91,7 +89,6 @@ impl ChunkIngressServiceInner { return Err(IngressProofError::UnstakedAddress); } - // Validate the anchor self.validate_ingress_proof_anchor(&ingress_proof)?; // TODO: we should only overwrite a proof we already have if the new one has a newer anchor than the old one @@ -336,6 +333,56 @@ impl ProofCheckResult { } } +/// RAII guard that notifies the cache service when proof generation completes. +/// Sends `NotifyProofGenerationStarted` on acquisition and `NotifyProofGenerationCompleted` +/// on drop — guaranteeing the completion signal even on early-return error paths. +struct ProofGenerationGuard<'a> { + data_root: DataRoot, + cache_sender: &'a CacheServiceSender, +} + +impl<'a> ProofGenerationGuard<'a> { + fn acquire(data_root: DataRoot, cache_sender: &'a CacheServiceSender) -> eyre::Result { + let (response_sender, response_receiver) = std::sync::mpsc::channel(); + cache_sender + .send(CacheServiceAction::RequestIngressProofGenerationState { + data_root, + response_sender, + }) + .map_err(|err| { + eyre::eyre!("Failed to request ingress proof generation state: {err}") + })?; + + let is_already_generating = response_receiver.recv().map_err(|err| { + eyre::eyre!("Failed to receive ingress proof generation state response: {err}") + })?; + + if is_already_generating { + return Err(eyre::eyre!( + "Ingress proof generation already in progress for data_root {:?}", + data_root + )); + } + + let _ = cache_sender.send(CacheServiceAction::NotifyProofGenerationStarted(data_root)); + + Ok(Self { + data_root, + cache_sender, + }) + } +} + +impl Drop for ProofGenerationGuard<'_> { + fn drop(&mut self) { + let _ = self + .cache_sender + .send(CacheServiceAction::NotifyProofGenerationCompleted( + self.data_root, + )); + } +} + /// Generates (and stores) an ingress proof for the provided `data_root` if all chunks are present. /// Validates the generated proof's anchor against the canonical chain and gossips it if valid. /// Returns the generated proof on success. @@ -361,48 +408,17 @@ pub fn generate_and_store_ingress_proof( let data_size = calculate_and_validate_data_size(db, data_root, chunk_size)?; - // Pick anchor: hint or latest canonical block let latest_anchor = block_tree_guard .read() .get_latest_canonical_entry() .block_hash(); let anchor = anchor_hint.unwrap_or(latest_anchor); - let is_already_generating = { - let (response_sender, response_receiver) = std::sync::mpsc::channel(); - if let Err(err) = - cache_sender.send_traced(CacheServiceAction::RequestIngressProofGenerationState { - data_root, - response_sender, - }) - { - return Err(IngressProofGenerationError::CacheServiceError(format!( - "Failed to request ingress proof generation state: {err}" - ))); - } - - response_receiver.recv().map_err(|err| { - IngressProofGenerationError::CacheServiceError(format!( - "Failed to receive ingress proof generation state response: {err}" - )) - })? - }; - - if is_already_generating { - return Err(IngressProofGenerationError::AlreadyGenerating); - } + let _guard = ProofGenerationGuard::acquire(data_root, cache_sender) + .map_err(|e| IngressProofGenerationError::CacheServiceError(e.to_string()))?; - if let Err(e) = - cache_sender.send_traced(CacheServiceAction::NotifyProofGenerationStarted(data_root)) - { - warn!( - ?data_root, - "Failed to notify cache of proof generation start: {e}" - ); - } - - let proof_res = super::chunks::generate_ingress_proof( - db.clone(), + let proof = super::chunks::generate_ingress_proof( + db.clone(), // clone: Arc-wrapped DatabaseProvider — cheap ref-count bump data_root, data_size, chunk_size, @@ -411,33 +427,11 @@ pub fn generate_and_store_ingress_proof( anchor, config.consensus.enable_shadow_kzg_logging, config.consensus.use_kzg_ingress_proofs, - ); - - let proof = match proof_res { - Ok(p) => p, - Err(e) => { - if let Err(e) = cache_sender.send_traced( - CacheServiceAction::NotifyProofGenerationCompleted(data_root), - ) { - warn!( - ?data_root, - "Failed to notify cache of proof generation completion: {e}" - ); - } - return Err(IngressProofGenerationError::GenerationFailed(e.to_string())); - } - }; + ) + .map_err(|e| IngressProofGenerationError::GenerationFailed(e.to_string()))?; gossip_ingress_proof(gossip_sender, &proof, block_tree_guard, db, config); - if let Err(e) = cache_sender.send_traced(CacheServiceAction::NotifyProofGenerationCompleted( - data_root, - )) { - warn!( - ?data_root, - "Failed to notify cache of proof generation completion: {e}" - ); - } Ok(proof) } @@ -450,52 +444,16 @@ pub fn reanchor_and_store_ingress_proof( gossip_sender: &tokio::sync::mpsc::UnboundedSender>, cache_sender: &CacheServiceSender, ) -> Result { - // Only staked nodes should reanchor ingress proofs let epoch_snapshot = block_tree_guard.read().canonical_epoch_snapshot(); if !epoch_snapshot.is_staked(signer.address()) { return Err(IngressProofGenerationError::NodeNotStaked); } - let is_already_generating = { - let (response_sender, response_receiver) = std::sync::mpsc::channel(); - if let Err(err) = - cache_sender.send_traced(CacheServiceAction::RequestIngressProofGenerationState { - data_root: proof.data_root(), - response_sender, - }) - { - return Err(IngressProofGenerationError::CacheServiceError(format!( - "Failed to request ingress proof generation state: {err}" - ))); - } - - response_receiver.recv().map_err(|err| { - IngressProofGenerationError::CacheServiceError(format!( - "Failed to receive ingress proof generation state response: {err}" - )) - })? - }; - let data_root = proof.data_root(); + let _guard = ProofGenerationGuard::acquire(data_root, cache_sender) + .map_err(|e| IngressProofGenerationError::CacheServiceError(e.to_string()))?; - if is_already_generating { - return Err(IngressProofGenerationError::AlreadyGenerating); - } - - if let Err(e) = - cache_sender.send_traced(CacheServiceAction::NotifyProofGenerationStarted(data_root)) - { - warn!(data_root = ?data_root, "Failed to notify cache of proof generation start: {e}"); - } - - if let Err(e) = calculate_and_validate_data_size(db, data_root, config.consensus.chunk_size) { - if let Err(e) = cache_sender.send_traced( - CacheServiceAction::NotifyProofGenerationCompleted(data_root), - ) { - warn!(data_root = ?data_root, "Failed to notify cache of proof generation completion: {e}"); - } - return Err(e); - } + calculate_and_validate_data_size(db, data_root, config.consensus.chunk_size)?; let latest_anchor = block_tree_guard .read() @@ -504,31 +462,14 @@ pub fn reanchor_and_store_ingress_proof( let mut proof = proof.clone(); // clone: need owned value for set_anchor + sign mutation proof.set_anchor(latest_anchor); - if let Err(e) = signer.sign_ingress_proof(&mut proof) { - if let Err(e) = cache_sender.send_traced( - CacheServiceAction::NotifyProofGenerationCompleted(data_root), - ) { - warn!(data_root = ?data_root, "Failed to notify cache of proof generation completion: {e}"); - } - return Err(IngressProofGenerationError::GenerationFailed(e.to_string())); - } - - if let Err(e) = store_ingress_proof(db, &proof, signer) { - if let Err(e) = cache_sender.send_traced( - CacheServiceAction::NotifyProofGenerationCompleted(data_root), - ) { - warn!(data_root = ?data_root, "Failed to notify cache of proof generation completion: {e}"); - } - return Err(IngressProofGenerationError::GenerationFailed(e.to_string())); - } + signer + .sign_ingress_proof(&mut proof) + .map_err(|e| IngressProofGenerationError::GenerationFailed(e.to_string()))?; + store_ingress_proof(db, &proof, signer) + .map_err(|e| IngressProofGenerationError::GenerationFailed(e.to_string()))?; gossip_ingress_proof(gossip_sender, &proof, block_tree_guard, db, config); - if let Err(e) = cache_sender.send_traced(CacheServiceAction::NotifyProofGenerationCompleted( - data_root, - )) { - warn!(data_root = ?data_root, "Failed to notify cache of proof generation completion: {e}"); - } Ok(proof) } diff --git a/crates/actors/src/custody_proof_service.rs b/crates/actors/src/custody_proof_service.rs index f10a59003d..9ccde14dbd 100644 --- a/crates/actors/src/custody_proof_service.rs +++ b/crates/actors/src/custody_proof_service.rs @@ -1,23 +1,33 @@ +use irys_database::get_per_chunk_kzg_commitment; use irys_domain::StorageModulesReadGuard; use irys_types::custody::{ - select_challenged_offsets, CustodyChallenge, CustodyOpening, CustodyProof, + CustodyChallenge, CustodyOpening, CustodyProof, CustodyVerificationResult, + derive_challenge_seed, select_challenged_offsets, verify_custody_proof, }; use irys_types::kzg::{compute_chunk_opening_proof, default_kzg_settings, derive_challenge_point}; use irys_types::v2::{GossipBroadcastMessageV2, GossipDataV2}; -use irys_types::{Config, GossipCacheKey, PartitionChunkOffset}; +use irys_types::{ + Config, DatabaseProvider, GossipCacheKey, H256, IrysAddress, PartitionChunkOffset, +}; use reth::revm::primitives::FixedBytes; +use reth_db::Database as _; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; use tracing::{debug, warn}; #[derive(Debug)] pub enum CustodyProofMessage { Challenge(CustodyChallenge), + ReceivedProof(CustodyProof), + TakePendingProofs(tokio::sync::oneshot::Sender>), + NewBlock { vdf_output: H256, block_height: u64 }, } pub struct CustodyProofService { config: Config, storage_modules_guard: StorageModulesReadGuard, gossip_sender: UnboundedSender, + irys_db: DatabaseProvider, + pending_proofs: Vec, } impl CustodyProofService { @@ -25,6 +35,7 @@ impl CustodyProofService { config: Config, storage_modules_guard: StorageModulesReadGuard, gossip_sender: UnboundedSender, + irys_db: DatabaseProvider, rx: UnboundedReceiver, runtime_handle: tokio::runtime::Handle, ) { @@ -32,12 +43,14 @@ impl CustodyProofService { config, storage_modules_guard, gossip_sender, + irys_db, + pending_proofs: Vec::new(), }; runtime_handle.spawn(service.start(rx)); } - async fn start(self, mut rx: UnboundedReceiver) { + async fn start(mut self, mut rx: UnboundedReceiver) { debug!("Custody proof service started"); while let Some(msg) = rx.recv().await { match msg { @@ -50,11 +63,71 @@ impl CustodyProofService { ); } } + CustodyProofMessage::ReceivedProof(proof) => { + self.handle_received_proof(proof); + } + CustodyProofMessage::TakePendingProofs(sender) => { + let proofs = std::mem::take(&mut self.pending_proofs); + let _ = sender.send(proofs); + } + CustodyProofMessage::NewBlock { + vdf_output, + block_height, + } => { + self.handle_new_block(&vdf_output, block_height); + } } } debug!("Custody proof service stopped"); } + fn handle_received_proof(&mut self, proof: CustodyProof) { + if !self.config.consensus.enable_custody_proofs { + return; + } + + let kzg_settings = default_kzg_settings(); + let result = self + .irys_db + .view(|tx| { + verify_custody_proof( + &proof, + |data_root, chunk_index| { + get_per_chunk_kzg_commitment(tx, data_root, chunk_index) + }, + kzg_settings, + self.config.consensus.custody_challenge_count, + self.config.consensus.num_chunks_in_partition, + ) + }) + .map_err(eyre::Report::from) + .and_then(|inner| inner); + + match result { + Ok(CustodyVerificationResult::Valid) => { + debug!( + partition.hash = %proof.partition_hash, + "Received valid custody proof, storing as pending", + ); + self.pending_proofs.push(proof); + } + Ok(invalid) => { + warn!( + partition.hash = %proof.partition_hash, + result = ?invalid, + "Received invalid custody proof, discarding", + ); + } + Err(e) => { + warn!( + partition.hash = %proof.partition_hash, + error = %e, + "Failed to verify received custody proof", + ); + } + } + } + fn handle_challenge(&self, challenge: &CustodyChallenge) -> eyre::Result<()> { let storage_modules = self.storage_modules_guard.read(); let sm = storage_modules @@ -142,12 +215,44 @@ impl CustodyProofService { Ok(()) } + + fn handle_new_block(&self, vdf_output: &H256, block_height: u64) { + if !self.config.consensus.enable_custody_proofs { + return; + } + + let mining_address = IrysAddress::from_private_key(&self.config.node_config.mining_key); + let storage_modules = self.storage_modules_guard.read(); + + for sm in storage_modules.iter() { + let partition_hash = match sm.partition_hash() { + Some(h) => h, + None => continue, + }; + + let challenge_seed = derive_challenge_seed(&vdf_output.0, &partition_hash); + let challenge = CustodyChallenge { + challenged_miner: mining_address, + partition_hash, + challenge_seed, + challenge_block_height: block_height, + }; + + if let Err(e) = self.handle_challenge(&challenge) { + warn!( + partition.hash = %partition_hash, + error = %e, + "Failed to generate self-custody proof", + ); + } + } + } } #[cfg(test)] mod tests { use super::*; - use irys_types::{Config, IrysAddress, NodeConfig, H256}; + use irys_types::{Config, H256, IrysAddress, NodeConfig}; use std::sync::{Arc, RwLock}; use tokio::sync::mpsc::unbounded_channel; @@ -164,6 +269,17 @@ mod tests { irys_domain::StorageModulesReadGuard::new(Arc::new(RwLock::new(Vec::new()))) } + fn test_db() -> DatabaseProvider { + let path = tempfile::tempdir().unwrap(); + let db = irys_database::open_or_create_db( + path.path(), + irys_database::tables::IrysTables::ALL, + None, + ) + .unwrap(); + DatabaseProvider(Arc::new(db)) + } + #[test] fn handle_challenge_unknown_partition_returns_ok() { let config = test_config_with_custody(); @@ -172,6 +288,8 @@ mod tests { config, storage_modules_guard: empty_storage_guard(), gossip_sender: gossip_tx, + irys_db: test_db(), + pending_proofs: Vec::new(), }; let challenge = CustodyChallenge { diff --git a/crates/actors/src/mempool_service.rs b/crates/actors/src/mempool_service.rs index 9337c595ed..362a716d5e 100644 --- a/crates/actors/src/mempool_service.rs +++ b/crates/actors/src/mempool_service.rs @@ -47,6 +47,7 @@ use irys_types::{DataLedger, IngressProofsList, TokioServiceHandle, TxKnownStatu use lru::LruCache; use reth::rpc::types::BlockId; use reth::tasks::shutdown::Shutdown; +use reth_db::Database as _; use reth_db::cursor::*; use std::collections::BTreeMap; use std::fmt::Display; @@ -257,6 +258,7 @@ pub enum MempoolServiceMessage { tx_header: DataTransactionHeader, ingress_proof: IngressProof, chunk_data: Vec, + per_chunk_commitments: Vec<(u32, irys_types::kzg::KzgCommitmentBytes)>, }, } @@ -438,9 +440,15 @@ impl Inner { tx_header, ingress_proof, chunk_data, + per_chunk_commitments, } => { - self.handle_ingest_blob_derived_tx(tx_header, ingress_proof, chunk_data) - .await; + self.handle_ingest_blob_derived_tx( + tx_header, + ingress_proof, + chunk_data, + per_chunk_commitments, + ) + .await; } } Ok(()) @@ -451,6 +459,7 @@ impl Inner { tx_header: DataTransactionHeader, ingress_proof: IngressProof, chunk_data: Vec, + per_chunk_commitments: Vec<(u32, irys_types::kzg::KzgCommitmentBytes)>, ) { if let Err(reason) = ingress_proof.check_version_accepted( self.config.consensus.accept_kzg_ingress_proofs, @@ -499,6 +508,23 @@ impl Inner { if let Err(e) = self.handle_ingest_ingress_proof(ingress_proof) { warn!(data_root = %data_root, error = ?e, "Failed to store blob ingress proof"); } + + if !per_chunk_commitments.is_empty() { + if let Err(e) = self.irys_db.update(|rw_tx| { + irys_database::store_per_chunk_kzg_commitments( + rw_tx, + data_root, + &per_chunk_commitments, + ) + .map_err(|e| reth_db::DatabaseError::Other(e.to_string())) + }) { + warn!( + data_root = %data_root, + error = %e, + "Failed to store per-chunk KZG commitments for blob" + ); + } + } } #[tracing::instrument(level = "trace", skip_all)] diff --git a/crates/actors/src/validation_service/block_validation_task.rs b/crates/actors/src/validation_service/block_validation_task.rs index d2c7fd8a99..f1183381a5 100644 --- a/crates/actors/src/validation_service/block_validation_task.rs +++ b/crates/actors/src/validation_service/block_validation_task.rs @@ -520,6 +520,22 @@ impl BlockValidationTask { }) }; + let transactions_for_custody = Arc::clone(&transactions); + let custody_config = self.service_inner.config.clone(); // clone: Config is Arc-wrapped + let custody_db = self.service_inner.db.clone(); // clone: DatabaseProvider is Arc-wrapped + let custody_proofs_task = async move { + crate::block_validation::validate_custody_proofs( + &transactions_for_custody.custody_proofs, + &custody_config.consensus, + &custody_db, + ) + .map(|()| ValidationResult::Valid) + .unwrap_or_else(|err| { + tracing::error!(custom.error = ?err, "custody proofs validation failed"); + ValidationResult::Invalid(ValidationError::Other(err.to_string())) + }) + }; + // Wait for all validation tasks to complete let ( recall_result, @@ -528,13 +544,15 @@ impl BlockValidationTask { seeds_validation_result, commitment_ordering_result, data_txs_result, + custody_proofs_result, ) = tokio::join!( recall_task, poa_task, shadow_tx_task, seeds_validation_task, commitment_ordering_task, - data_txs_validation_task + data_txs_validation_task, + custody_proofs_task, ); // Check shadow_tx_result first to extract ExecutionData @@ -554,6 +572,7 @@ impl BlockValidationTask { &seeds_validation_result, &commitment_ordering_result, &data_txs_result, + &custody_proofs_result, ) { ( ValidationResult::Valid, @@ -561,6 +580,7 @@ impl BlockValidationTask { ValidationResult::Valid, ValidationResult::Valid, ValidationResult::Valid, + ValidationResult::Valid, ) => { tracing::debug!("All consensus validations successful, submitting to reth"); @@ -580,6 +600,16 @@ impl BlockValidationTask { match reth_result { Ok(()) => { tracing::debug!("Reth execution layer validation successful"); + + // Store per-chunk KZG commitments from blob ingress proofs + // so custody verification can find them for peer-received blocks. + if let Err(e) = crate::block_validation::store_blob_ingress_commitments( + &self.block, + &self.service_inner.db, + ) { + tracing::warn!(error = %e, "Failed to store blob ingress commitments"); + } + ValidationResult::Valid } Err(err) => { @@ -599,6 +629,7 @@ impl BlockValidationTask { &seeds_validation_result, &commitment_ordering_result, &data_txs_result, + &custody_proofs_result, ] .into_iter() .find_map(|r| match r { diff --git a/crates/chain/src/chain.rs b/crates/chain/src/chain.rs index 76e4ebb31e..87288013cb 100644 --- a/crates/chain/src/chain.rs +++ b/crates/chain/src/chain.rs @@ -1615,6 +1615,7 @@ impl IrysNode { config.clone(), // clone: Config is Arc-wrapped internally storage_modules_guard.clone(), // clone: Arc-based read guard service_senders.gossip_broadcast.clone(), // clone: UnboundedSender is cheaply cloneable + irys_db.clone(), // clone: DatabaseProvider is Arc-wrapped receivers.custody_proof, runtime_handle.clone(), ); diff --git a/crates/p2p/src/block_pool.rs b/crates/p2p/src/block_pool.rs index fb7e59f84c..3d4c7ed0a4 100644 --- a/crates/p2p/src/block_pool.rs +++ b/crates/p2p/src/block_pool.rs @@ -1234,6 +1234,148 @@ where } } +/// Order pre-fetched transactions into BlockTransactions structure. +/// +/// Caller is responsible for providing ALL required transactions. +/// This function only handles ordering them correctly per ledger. +/// Transactions are returned in the exact order specified in the block header, +/// which is critical for commitment transaction validation (e.g., stake must come before pledge). +pub(crate) fn order_transactions_for_block( + block_header: &IrysBlockHeader, + data_txs: Vec, + commitment_txs: Vec, +) -> Result { + use std::collections::HashMap; + + // Extract required IDs from block header (preserving order) + // Use ledger_id-based lookup to avoid relying on vector ordering + let submit_ids: Vec = block_header + .data_ledgers + .iter() + .find(|l| l.ledger_id == DataLedger::Submit as u32) + .map(|l| l.tx_ids.0.clone()) + .unwrap_or_default(); + + let publish_ids: Vec = block_header + .data_ledgers + .iter() + .find(|l| l.ledger_id == DataLedger::Publish as u32) + .map(|l| l.tx_ids.0.clone()) + .unwrap_or_default(); + + let commitment_ids: Vec = block_header + .system_ledgers + .iter() + .find(|l| l.ledger_id == SystemLedger::Commitment as u32) + .map(|l| l.tx_ids.0.clone()) + .unwrap_or_default(); + + // Create sets for quick lookup + let submit_ids_set: HashSet = submit_ids.iter().copied().collect(); + let publish_ids_set: HashSet = publish_ids.iter().copied().collect(); + + // Collect transactions into maps by ID + let mut submit_txs_map: HashMap = HashMap::new(); + let mut publish_txs_map: HashMap = HashMap::new(); + let mut commitment_txs_map: HashMap = HashMap::new(); + + for data_tx in data_txs { + // Note: A tx can be in both submit and publish ledgers (published after submission) + // so we check both independently and clone if needed for both + let in_submit = submit_ids_set.contains(&data_tx.id); + let in_publish = publish_ids_set.contains(&data_tx.id); + + if in_submit && in_publish { + submit_txs_map.insert(data_tx.id, data_tx.clone()); + publish_txs_map.insert(data_tx.id, data_tx); + } else if in_submit { + submit_txs_map.insert(data_tx.id, data_tx); + } else if in_publish { + publish_txs_map.insert(data_tx.id, data_tx); + } + } + + for commitment_tx in commitment_txs { + commitment_txs_map.insert(commitment_tx.id(), commitment_tx); + } + + // Build final vectors in the exact order specified by block header + let submit_txs: Vec<_> = submit_ids + .iter() + .filter_map(|id| submit_txs_map.remove(id)) + .collect(); + + let publish_txs: Vec<_> = publish_ids + .iter() + .filter_map(|id| publish_txs_map.remove(id)) + .collect(); + + let commitment_txs: Vec<_> = commitment_ids + .iter() + .filter_map(|id| commitment_txs_map.remove(id)) + .collect(); + + // Validate header/body consistency: check that resolved counts match expected counts + if submit_txs.len() != submit_ids.len() { + let missing_ids: Vec = submit_ids + .iter() + .filter(|id| !submit_txs.iter().any(|tx| &tx.id == *id)) + .copied() + .collect(); + return Err(BlockPoolError::Critical( + CriticalBlockPoolError::HeaderBodyMismatch { + block_hash: block_header.block_hash, + ledger: "submit".to_string(), + expected: submit_ids.len(), + found: submit_txs.len(), + missing_ids, + }, + )); + } + + if publish_txs.len() != publish_ids.len() { + let missing_ids: Vec = publish_ids + .iter() + .filter(|id| !publish_txs.iter().any(|tx| &tx.id == *id)) + .copied() + .collect(); + return Err(BlockPoolError::Critical( + CriticalBlockPoolError::HeaderBodyMismatch { + block_hash: block_header.block_hash, + ledger: "publish".to_string(), + expected: publish_ids.len(), + found: publish_txs.len(), + missing_ids, + }, + )); + } + + if commitment_txs.len() != commitment_ids.len() { + let missing_ids: Vec = commitment_ids + .iter() + .filter(|id| !commitment_txs.iter().any(|tx| &tx.id() == *id)) + .copied() + .collect(); + return Err(BlockPoolError::Critical( + CriticalBlockPoolError::HeaderBodyMismatch { + block_hash: block_header.block_hash, + ledger: "commitment".to_string(), + expected: commitment_ids.len(), + found: commitment_txs.len(), + missing_ids, + }, + )); + } + + Ok(BlockTransactions { + commitment_txs, + data_txs: HashMap::from([ + (DataLedger::Submit, submit_txs), + (DataLedger::Publish, publish_txs), + ]), + custody_proofs: Vec::new(), + }) +} fn check_block_status( block_status_provider: &BlockStatusProvider, block_hash: BlockHash, diff --git a/crates/p2p/src/cache.rs b/crates/p2p/src/cache.rs index 68ea783fdc..04ada30f10 100644 --- a/crates/p2p/src/cache.rs +++ b/crates/p2p/src/cache.rs @@ -70,6 +70,13 @@ impl GossipCache { Ok(self.ingress_proofs.contains_key(ingress_proof_hash)) } + pub(crate) fn seen_custody_proof_from_any_peer( + &self, + partition_hash: &H256, + ) -> GossipResult { + Ok(self.custody_proofs.contains_key(partition_hash)) + } + /// Record that a peer has seen some data /// /// # Errors diff --git a/crates/p2p/src/gossip_client.rs b/crates/p2p/src/gossip_client.rs index 0eb53cae36..30d59eca7d 100644 --- a/crates/p2p/src/gossip_client.rs +++ b/crates/p2p/src/gossip_client.rs @@ -310,6 +310,7 @@ impl GossipClient { block_hash: header.block_hash, data_transactions, commitment_transactions, + custody_proofs: Vec::new(), }; Ok(GossipResponse::Accepted(Some(GossipDataV2::BlockBody( diff --git a/crates/p2p/src/gossip_data_handler.rs b/crates/p2p/src/gossip_data_handler.rs index 6227c7fb9c..a9279e05a0 100644 --- a/crates/p2p/src/gossip_data_handler.rs +++ b/crates/p2p/src/gossip_data_handler.rs @@ -84,6 +84,8 @@ where /// Precomputed hash of the consensus config to avoid recomputing on every handshake pub consensus_config_hash: H256, pub runtime_handle: tokio::runtime::Handle, + pub custody_proof_sender: + tokio::sync::mpsc::UnboundedSender, } impl Clone for GossipDataHandler @@ -108,6 +110,7 @@ where started_at: self.started_at, consensus_config_hash: self.consensus_config_hash, runtime_handle: self.runtime_handle.clone(), + custody_proof_sender: self.custody_proof_sender.clone(), } } } diff --git a/crates/p2p/src/gossip_service.rs b/crates/p2p/src/gossip_service.rs index 857d54e323..ec8762f503 100644 --- a/crates/p2p/src/gossip_service.rs +++ b/crates/p2p/src/gossip_service.rs @@ -194,6 +194,7 @@ impl P2PService { &service_senders, ); + let custody_proof_sender = service_senders.custody_proof.clone(); // clone: extract before move into BlockPool let block_pool = BlockPool::new( db, block_discovery, @@ -227,6 +228,7 @@ impl P2PService { started_at, consensus_config_hash, runtime_handle: self.runtime_handle.clone(), + custody_proof_sender, }); let server = GossipServer::new( Arc::clone(&gossip_data_handler), diff --git a/crates/p2p/src/server.rs b/crates/p2p/src/server.rs index e4a18dc093..d977a7ecbd 100644 --- a/crates/p2p/src/server.rs +++ b/crates/p2p/src/server.rs @@ -953,6 +953,19 @@ where server.peer_list.set_is_online(&source_miner_address, true); let cache_key = irys_types::GossipCacheKey::CustodyProof(v2_request.data.partition_hash); + let already_seen = server + .data_handler + .cache + .seen_custody_proof_from_any_peer(&v2_request.data.partition_hash); + + if matches!(already_seen, Ok(true)) { + debug!( + partition.hash = %v2_request.data.partition_hash, + "Custody proof already seen, skipping", + ); + return HttpResponse::Ok().json(GossipResponse::Accepted(())); + } + if let Err(e) = server .data_handler .cache @@ -963,9 +976,18 @@ where debug!( partition.hash = %v2_request.data.partition_hash, - "Received custody proof via gossip", + "Received custody proof via gossip, forwarding to custody service", ); + use irys_actors::custody_proof_service::CustodyProofMessage; + if let Err(e) = server + .data_handler + .custody_proof_sender + .send(CustodyProofMessage::ReceivedProof(v2_request.data)) + { + warn!(error = %e, "Failed to forward custody proof to service"); + } + HttpResponse::Ok().json(GossipResponse::Accepted(())) } diff --git a/crates/p2p/src/tests/util.rs b/crates/p2p/src/tests/util.rs index c93b1340ca..52b858aee4 100644 --- a/crates/p2p/src/tests/util.rs +++ b/crates/p2p/src/tests/util.rs @@ -1000,6 +1000,7 @@ pub(crate) fn data_handler_stub( irys_actors::chunk_ingress_service::facade::ChunkIngressFacadeImpl::from(&service_senders); // Keep the chunk_ingress receiver alive so the channel remains open. spawn_test_chunk_ingress_consumer(service_receivers.chunk_ingress, None); + let custody_proof_sender = service_senders.custody_proof.clone(); // clone: extract before move into BlockPool let block_pool_stub = Arc::new(BlockPool::new( db, block_discovery_stub, @@ -1043,6 +1044,7 @@ pub(crate) fn data_handler_stub( started_at: std::time::Instant::now(), consensus_config_hash, runtime_handle: tokio::runtime::Handle::current(), + custody_proof_sender, }) } @@ -1076,6 +1078,10 @@ pub(crate) fn data_handler_with_stubbed_pool( // Keep the chunk_ingress receiver alive so the channel remains open. spawn_test_chunk_ingress_consumer(service_receivers.chunk_ingress, None); let consensus_config_hash = config.consensus.keccak256_hash(); + let custody_proof_sender = { + let (tx, _rx) = tokio::sync::mpsc::unbounded_channel(); + tx + }; Arc::new(GossipDataHandler { mempool: mempool_stub, chunk_ingress, @@ -1098,6 +1104,7 @@ pub(crate) fn data_handler_with_stubbed_pool( started_at: std::time::Instant::now(), consensus_config_hash, runtime_handle: tokio::runtime::Handle::current(), + custody_proof_sender, }) } diff --git a/crates/types/src/block.rs b/crates/types/src/block.rs index df24e877fd..b7bfdda90c 100644 --- a/crates/types/src/block.rs +++ b/crates/types/src/block.rs @@ -1098,6 +1098,9 @@ pub struct BlockTransactions { pub system_txs: HashMap>, /// Data transactions organized by ledger type pub data_txs: HashMap>, + /// Custody proofs included in this block + #[serde(default)] + pub custody_proofs: Vec, } impl BlockTransactions { @@ -1133,6 +1136,8 @@ pub struct BlockBody { pub block_hash: BlockHash, pub data_transactions: Vec, pub commitment_transactions: Vec, + #[serde(default)] + pub custody_proofs: Vec, } impl BlockBody { diff --git a/crates/types/src/config/consensus.rs b/crates/types/src/config/consensus.rs index 06b94aac97..e7d91476ff 100644 --- a/crates/types/src/config/consensus.rs +++ b/crates/types/src/config/consensus.rs @@ -495,19 +495,16 @@ impl ConsensusConfig { /// Enforce logical implications between KZG/blob config flags. /// Call before wrapping in `Arc` to fix contradictions early. pub fn normalize(&mut self) { - for flag_name in [ - "enable_blobs", - "require_kzg_ingress_proofs", - "use_kzg_ingress_proofs", - "enable_custody_proofs", - ] { - let flag_set = match flag_name { - "enable_blobs" => self.enable_blobs, - "require_kzg_ingress_proofs" => self.require_kzg_ingress_proofs, - "use_kzg_ingress_proofs" => self.use_kzg_ingress_proofs, - "enable_custody_proofs" => self.enable_custody_proofs, - _ => unreachable!(), - }; + let dependents: &[(&str, bool)] = &[ + ("enable_blobs", self.enable_blobs), + ( + "require_kzg_ingress_proofs", + self.require_kzg_ingress_proofs, + ), + ("use_kzg_ingress_proofs", self.use_kzg_ingress_proofs), + ("enable_custody_proofs", self.enable_custody_proofs), + ]; + for &(flag_name, flag_set) in dependents { if flag_set && !self.accept_kzg_ingress_proofs { tracing::warn!( "{flag_name}=true requires accept_kzg_ingress_proofs=true, auto-enabling" diff --git a/crates/types/src/ingress.rs b/crates/types/src/ingress.rs index 38d0ad697e..08230ec0ff 100644 --- a/crates/types/src/ingress.rs +++ b/crates/types/src/ingress.rs @@ -200,8 +200,16 @@ pub enum DataSourceType { EvmBlob = 1, } -impl DataSourceType { - pub fn from_u8(val: u8) -> eyre::Result { +impl From for u8 { + fn from(val: DataSourceType) -> Self { + val as Self // safe: #[repr(u8)] + } +} + +impl TryFrom for DataSourceType { + type Error = eyre::Report; + + fn try_from(val: u8) -> eyre::Result { match val { 0 => Ok(Self::NativeData), 1 => Ok(Self::EvmBlob), @@ -212,20 +220,20 @@ impl DataSourceType { impl<'a> Arbitrary<'a> for DataSourceType { fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - Self::from_u8(u.int_in_range(0..=1)?).map_err(|_| arbitrary::Error::IncorrectFormat) + Self::try_from(u.int_in_range(0..=1)?).map_err(|_| arbitrary::Error::IncorrectFormat) } } impl Compact for DataSourceType { fn to_compact>(&self, buf: &mut B) -> usize { - buf.put_u8(*self as u8); + buf.put_u8(u8::from(*self)); 1 } fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { // Compact deserialization: default to NativeData for forward compatibility // with unknown discriminants in stored data - let source = Self::from_u8(buf[0]).unwrap_or_default(); + let source = Self::try_from(buf[0]).unwrap_or_default(); (source, &buf[1..]) } } @@ -305,7 +313,7 @@ impl alloy_rlp::Encodable for IngressProofV2 { + self.composite_commitment.length() + self.chain_id.length() + self.anchor.length() - + (self.source_type as u8).length(), + + u8::from(self.source_type).length(), }; header.encode(out); self.data_root.encode(out); @@ -313,7 +321,7 @@ impl alloy_rlp::Encodable for IngressProofV2 { self.composite_commitment.encode(out); self.chain_id.encode(out); self.anchor.encode(out); - (self.source_type as u8).encode(out); + u8::from(self.source_type).encode(out); } } @@ -336,7 +344,7 @@ impl alloy_rlp::Decodable for IngressProofV2 { composite_commitment, chain_id, anchor, - source_type: DataSourceType::from_u8(source_type_u8) + source_type: DataSourceType::try_from(source_type_u8) .map_err(|_| alloy_rlp::Error::Custom("unknown DataSourceType discriminant"))?, }) } @@ -413,19 +421,14 @@ pub fn generate_ingress_proof_v2( let per_chunk_bytes: Vec = chunk_commitments .iter() .map(|c| { - let bytes: [u8; 48] = c - .as_ref() - .try_into() - .map_err(|_| eyre::eyre!("KZG commitment is not 48 bytes"))?; - Ok(KzgCommitmentBytes::from(bytes)) + Ok(KzgCommitmentBytes::from(crate::kzg::commitment_to_bytes( + c, + )?)) }) .collect::>>()?; let aggregated = aggregate_all_commitments(&chunk_commitments)?; - let kzg_bytes: [u8; 48] = aggregated - .as_ref() - .try_into() - .map_err(|_| eyre::eyre!("KZG commitment is not 48 bytes"))?; + let kzg_bytes = crate::kzg::commitment_to_bytes(&aggregated)?; let composite = compute_composite_commitment(&kzg_bytes, &signer.address()); @@ -456,16 +459,9 @@ pub fn generate_ingress_proof_v2_from_blob( chain_id: u64, anchor: H256, ) -> eyre::Result { - use crate::kzg::{compute_composite_commitment, KzgCommitmentBytes, CHUNK_SIZE_FOR_KZG}; - - eyre::ensure!( - blob_data.len() <= CHUNK_SIZE_FOR_KZG, - "blob data exceeds max chunk size ({})", - CHUNK_SIZE_FOR_KZG, - ); + use crate::kzg::{compute_composite_commitment, KzgCommitmentBytes}; - let mut padded = vec![0_u8; CHUNK_SIZE_FOR_KZG]; - padded[..blob_data.len()].copy_from_slice(blob_data); + let padded = crate::kzg::zero_pad_to_chunk_size(blob_data)?; // Use regular leaves (without signer) for data_root — consistent with native V2 path let (_, regular_leaves) = generate_ingress_leaves( @@ -546,10 +542,7 @@ pub fn verify_ingress_proof>( .collect::>>()?; let aggregated = crate::kzg::aggregate_all_commitments(&chunk_commitments)?; - let kzg_bytes: [u8; 48] = aggregated - .as_ref() - .try_into() - .map_err(|_| eyre::eyre!("KZG commitment is not 48 bytes"))?; + let kzg_bytes = crate::kzg::commitment_to_bytes(&aggregated)?; if kzg_bytes != v2.kzg_commitment.0 { return Ok(false); @@ -965,10 +958,7 @@ mod tests { _ => panic!("expected V2 proof"), } - // Verify with the zero-padded chunk (256KB) — the verifier recomputes - // the KZG commitment from the provided chunks - let mut padded = vec![0_u8; crate::kzg::CHUNK_SIZE_FOR_KZG]; - padded[..blob_data.len()].copy_from_slice(&blob_data); + let padded = crate::kzg::zero_pad_to_chunk_size(&blob_data).unwrap(); assert!(verify_ingress_proof(&proof, [padded.as_slice()], chain_id)?); @@ -996,8 +986,7 @@ mod tests { // Verify with different data (wrong fill value) — should fail let bad_blob = kzg_safe_data(131_072, 7); - let mut bad_padded = vec![0_u8; crate::kzg::CHUNK_SIZE_FOR_KZG]; - bad_padded[..bad_blob.len()].copy_from_slice(&bad_blob); + let bad_padded = crate::kzg::zero_pad_to_chunk_size(&bad_blob).unwrap(); assert!(!verify_ingress_proof( &proof, @@ -1026,8 +1015,7 @@ mod tests { H256::random(), )?; - let mut padded = vec![0_u8; crate::kzg::CHUNK_SIZE_FOR_KZG]; - padded[..blob_data.len()].copy_from_slice(&blob_data); + let padded = crate::kzg::zero_pad_to_chunk_size(&blob_data).unwrap(); // Verify with wrong chain_id — should fail assert!(!verify_ingress_proof(&proof, [padded.as_slice()], 2)?); diff --git a/crates/types/src/kzg.rs b/crates/types/src/kzg.rs index 7e2c50eb8f..ab5bcc0fab 100644 --- a/crates/types/src/kzg.rs +++ b/crates/types/src/kzg.rs @@ -142,8 +142,21 @@ impl arbitrary::Arbitrary<'_> for PerChunkCommitment { } } +/// Returns a reference to the lazily-initialized Ethereum KZG trusted setup. +/// +/// The trusted setup (~50MB) overflows the default 8MB thread stack, so +/// initialization is performed on a dedicated thread with a 64MB stack. pub fn default_kzg_settings() -> &'static KzgSettings { - EnvKzgSettings::Default.get() + static SETTINGS: std::sync::OnceLock<&'static KzgSettings> = std::sync::OnceLock::new(); + SETTINGS.get_or_init(|| { + std::thread::Builder::new() + .name("kzg-setup".into()) + .stack_size(64 * 1024 * 1024) + .spawn(|| EnvKzgSettings::Default.get()) + .expect("failed to spawn KZG setup thread") + .join() + .expect("KZG setup thread panicked") + }) } /// Compute a KZG commitment for a single 128KB blob (4096 field elements). @@ -183,8 +196,10 @@ pub fn aggregate_commitments( Ok(KzgCommitment::from(compressed)) } -/// Zero-pad chunk data and split into two `BLOB_SIZE` halves. -fn pad_and_split_chunk(chunk_data: &[u8]) -> eyre::Result<([u8; BLOB_SIZE], [u8; BLOB_SIZE])> { +/// Zero-pad chunk data and split into two heap-allocated `BLOB_SIZE` halves. +fn pad_and_split_chunk( + chunk_data: &[u8], +) -> eyre::Result<(Box<[u8; BLOB_SIZE]>, Box<[u8; BLOB_SIZE]>)> { if chunk_data.len() > CHUNK_SIZE_FOR_KZG { return Err(eyre::eyre!( "chunk data too large: {} bytes (max {})", @@ -193,18 +208,24 @@ fn pad_and_split_chunk(chunk_data: &[u8]) -> eyre::Result<([u8; BLOB_SIZE], [u8; )); } - let mut padded = [0_u8; CHUNK_SIZE_FOR_KZG]; - padded[..chunk_data.len()].copy_from_slice(chunk_data); + let mut first_vec = vec![0_u8; BLOB_SIZE]; + let mut second_vec = vec![0_u8; BLOB_SIZE]; + + let split = chunk_data.len().min(BLOB_SIZE); + first_vec[..split].copy_from_slice(&chunk_data[..split]); + if chunk_data.len() > BLOB_SIZE { + second_vec[..chunk_data.len() - BLOB_SIZE].copy_from_slice(&chunk_data[BLOB_SIZE..]); + } - // split_at at BLOB_SIZE on a CHUNK_SIZE_FOR_KZG (2*BLOB_SIZE) array - // always yields two BLOB_SIZE slices, so try_into is infallible here. - let (first, second) = padded.split_at(BLOB_SIZE); - let first: [u8; BLOB_SIZE] = first + let first: Box<[u8; BLOB_SIZE]> = first_vec + .into_boxed_slice() .try_into() .map_err(|_| eyre::eyre!("split invariant"))?; - let second: [u8; BLOB_SIZE] = second + let second: Box<[u8; BLOB_SIZE]> = second_vec + .into_boxed_slice() .try_into() .map_err(|_| eyre::eyre!("split invariant"))?; + Ok((first, second)) } @@ -260,6 +281,26 @@ pub fn compute_composite_commitment( H256(hasher.finish()) } +/// Convert a [`KzgCommitment`] to a fixed-size byte array. +pub fn commitment_to_bytes(c: &KzgCommitment) -> eyre::Result<[u8; COMMITMENT_SIZE]> { + c.as_ref() + .try_into() + .map_err(|_| eyre::eyre!("KZG commitment is not 48 bytes")) +} + +/// Zero-pad data to [`CHUNK_SIZE_FOR_KZG`] bytes. +pub fn zero_pad_to_chunk_size(data: &[u8]) -> eyre::Result> { + eyre::ensure!( + data.len() <= CHUNK_SIZE_FOR_KZG, + "data exceeds chunk size: {} > {}", + data.len(), + CHUNK_SIZE_FOR_KZG, + ); + let mut padded = vec![0_u8; CHUNK_SIZE_FOR_KZG]; + padded[..data.len()].copy_from_slice(data); + Ok(padded) +} + // SAFETY for all blst FFI calls in this module: All blst types are initialized via // `default()` or `from_bytes()`. Buffer sizes are guaranteed by Rust's type system // (fixed-size arrays). Affine points are validated by `PublicKey::from_bytes` before @@ -360,11 +401,15 @@ pub fn compute_chunk_opening_proof( ) -> eyre::Result<([u8; PROOF_SIZE], [u8; SCALAR_SIZE])> { let (first_half, second_half) = pad_and_split_chunk(chunk_data)?; - let blob1 = Blob::new(first_half); - let blob2 = Blob::new(second_half); + let blob1 = Blob::new(*first_half); + let blob2 = Blob::new(*second_half); - let c1 = compute_blob_commitment(&first_half, settings)?; - let c2 = compute_blob_commitment(&second_half, settings)?; + let c1 = settings + .blob_to_kzg_commitment(&blob1) + .map_err(|e| eyre::eyre!("KZG commitment failed for first half: {e}"))?; + let c2 = settings + .blob_to_kzg_commitment(&blob2) + .map_err(|e| eyre::eyre!("KZG commitment failed for second half: {e}"))?; let mut hasher = sha::Sha256::new(); hasher.update(c1.as_ref()); From a118d3f10c63950ae7e076f0a89481b22b2a55a8 Mon Sep 17 00:00:00 2001 From: jason Date: Fri, 27 Feb 2026 15:08:52 +0000 Subject: [PATCH 13/13] docs: add KZG proofs primer --- KZG_PROOFS.md | 665 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 665 insertions(+) create mode 100644 KZG_PROOFS.md diff --git a/KZG_PROOFS.md b/KZG_PROOFS.md new file mode 100644 index 0000000000..0a95f387cf --- /dev/null +++ b/KZG_PROOFS.md @@ -0,0 +1,665 @@ +# KZG Commitments, Ingress Proofs, Custody Proofs, and Blobs + +A technical reference for engineers working on the Irys protocol. + +--- + +## Table of Contents + +1. [Introduction and Motivation](#1-introduction-and-motivation) +2. [KZG Commitments — Conceptual Primer](#2-kzg-commitments--conceptual-primer) +3. [Irys Chunks and KZG Blobs](#3-irys-chunks-and-kzg-blobs) +4. [Ingress Proofs — V1 and V2](#4-ingress-proofs--v1-and-v2) +5. [Ingress Proof Lifecycle](#5-ingress-proof-lifecycle) +6. [EIP-4844 Blob Extraction](#6-eip-4844-blob-extraction) +7. [Custody Proofs](#7-custody-proofs) +8. [Configuration and Rollout](#8-configuration-and-rollout) +9. [Data Flow Diagrams](#9-data-flow-diagrams) +10. [Glossary](#10-glossary) + +--- + +## 1. Introduction and Motivation + +Irys is a decentralised data storage network. Miners store 256 KB data chunks, packed with entropy such that mining is data-dependent (Proof of Access). When a user submits a transaction, miners generate **ingress proofs** — cryptographic attestations confirming receipt and storage of the data. + +### Why move beyond SHA-256 Merkle proofs? + +The original ingress proof (V1) is a SHA-256 Merkle tree root. It demonstrates that all chunks were hashed together to produce that root, but it provides no mechanism to subsequently challenge a miner about *individual* chunks without re-downloading the entire transaction. + +KZG commitments address this limitation. A KZG commitment is a compact fingerprint of data that supports **point evaluation**: a verifier may select any position in the data and require the prover to reveal the value at that position, together with a short proof that the value is consistent with the original commitment. The prover cannot fabricate a valid response without breaking a hard cryptographic assumption. + +### Why KZG specifically? + +- **Established implementation with no custom trusted setup** — KZG commitments are already employed in Ethereum (EIP-4844) and are built into Reth. Irys reuses the same `c-kzg` library and the Ethereum trusted setup ceremony data, thereby avoiding the need to conduct a custom ceremony or maintain a separate cryptographic library. +- **Verification without the original data** — A KZG commitment can be verified against a claimed value at any evaluation point using only the commitment and the opening proof. The verifier never requires access to the original data. This property is what makes custody proofs feasible: a validator can confirm that a miner holds a specific chunk without downloading it. + +These properties enable two capabilities: + +- **Custody proofs** — Challenge a miner to demonstrate continued storage of specific chunks at random positions, without downloading any data. +- **EIP-4844 blob support** — Ethereum blobs arrive with KZG commitments already attached, permitting Irys to ingest them natively. + +### The three subsystems + +``` + ┌─────────────────┐ + User Transaction ──> │ Ingress Proofs │ ── V1 (Merkle) or V2 (KZG) + └────────┬────────┘ + │ V2 stores per-chunk commitments + v + ┌─────────────────┐ + VDF Challenge ────> │ Custody Proofs │ ── "prove you still hold chunk N" + └─────────────────┘ + + EIP-4844 Blob ────> ┌─────────────────┐ + (from Ethereum) │ Blob Extraction │ ── converts blobs to Irys transactions + └─────────────────┘ +``` + +--- + +## 2. KZG Commitments — Conceptual Primer + +> This section contains no code — only the underlying concepts. + +### Polynomials as data containers + +Consider a list of N numbers representing your data. One may construct a polynomial p(x) of degree N−1 that passes through all of them: p(0) = data[0], p(1) = data[1], …, p(N−1) = data[N−1]. This polynomial *is* the data, merely expressed in a different form. + +### Committing to a polynomial + +A **KZG commitment** is a single elliptic curve point that serves as a fingerprint of a polynomial. Given a polynomial p(x), the commitment is computed as: + +``` +C = p(s) · G +``` + +where `s` is a secret number from a one-time trusted setup ceremony and `G` is a generator point on the BLS12-381 elliptic curve. No party knows `s` — it was destroyed after the ceremony — but the ceremony produced precomputed "powers of s" that permit anyone to commit without knowledge of `s` itself. + +The commitment `C` is 48 bytes (a compressed BLS12-381 G1 point). + +### Opening proofs (point evaluation) + +Given commitment `C`, a verifier selects a challenge point `z` and requests: "What is p(z)?" + +The prover responds with: + +- `y = p(z)` — the evaluation value (32 bytes) +- `π` — an opening proof (48 bytes) + +The verifier checks the proof using a pairing equation on the elliptic curve. If the proof is valid, the verifier is satisfied that `p(z) = y` for the polynomial committed by `C`, without having seen the polynomial itself. + +This is the core property upon which custody proofs depend. + +### The BLS12-381 curve + +KZG employs the BLS12-381 elliptic curve. Its scalar field has a modulus beginning with `0x73eda753…`. When converting data bytes into field elements (the "numbers" through which the polynomial passes), each 32-byte element must be numerically less than the field modulus. Since the modulus begins with `0x73` (115), any element whose first byte is ≥ `0x74` (116) is guaranteed to exceed it. Elements beginning with `0x73` may or may not exceed it depending on subsequent bytes. For uniform-fill blobs in tests, the safe upper bound for the fill byte is 114 (`MAX_VALID_SEED` in `kzg.rs`). In practice, the `c-kzg` library handles this encoding internally for blob data. + +### Trusted setup + +The precomputed ceremony data is loaded once and cached as a static reference (`&'static KzgSettings`). It originates from Ethereum's KZG ceremony (the same one used for EIP-4844). In the codebase, it is accessed via `default_kzg_settings()`, which lazily initialises from `EnvKzgSettings::Default.get()`. + +--- + +## 3. Irys Chunks and KZG Blobs + +### The size disparity + +| Concept | Size | +|---------|------| +| Irys chunk | 256 KB (`CHUNK_SIZE_FOR_KZG = 262_144`) | +| EIP-4844 blob | 128 KB (`BLOB_SIZE = 131_072`) | +| KZG commitment | 48 bytes (`COMMITMENT_SIZE`) | +| KZG opening proof | 48 bytes (`PROOF_SIZE`) | +| Field element / scalar | 32 bytes (`SCALAR_SIZE`) | + +An Irys chunk is twice the size of a KZG blob. The solution is to **split each chunk into two blob-sized halves**. + +### How a chunk becomes a commitment + +``` + 256 KB chunk (zero-padded if shorter) + ┌────────────────────────────────────┐ + │ first 128 KB │ second 128 KB │ + └────────┬────────┴────────┬─────────┘ + │ │ + blob_to_kzg_commitment blob_to_kzg_commitment + │ │ + v v + C1 C2 + │ │ + └──────┬──────────┘ + │ + r = SHA256(C1 || C2) + C = C1 + r·C2 + │ + v + Single chunk commitment (48 bytes) +``` + +**Step by step:** + +1. **Pad** — If the chunk is shorter than 256 KB, zero-pad it to exactly 256 KB. +2. **Split** — Divide at the 128 KB boundary into two halves. +3. **Commit each half** — Each half is treated as an EIP-4844 blob and committed separately using `blob_to_kzg_commitment` from the `c-kzg` library. +4. **Aggregate** — Combine `C1` and `C2` into a single commitment using a random linear combination: compute `r = SHA256(C1 || C2)`, then `C = C1 + r·C2`. + +The aggregation derives `r` from the commitments themselves so that no party can craft two different pairs of halves that produce the same aggregated commitment (by the Schwartz–Zippel lemma, the collision probability is negligible over the BLS12-381 scalar field). + +### Multi-chunk transactions + +A transaction may comprise many chunks. Each chunk receives its own commitment as described above, then all chunk commitments are aggregated into a **single transaction-level commitment** via iterative pairwise aggregation: + +``` +C_tx = aggregate(aggregate(aggregate(C_chunk0, C_chunk1), C_chunk2), C_chunk3) +``` + +This is left-associative — **ordering matters**. + +### Code references + +| Function | File | Purpose | +|----------|------|---------| +| `pad_and_split_chunk` (private) | `crates/types/src/kzg.rs` | Zero-pad and split into two halves | +| `compute_blob_commitment` | `crates/types/src/kzg.rs` | KZG commitment for one 128 KB blob | +| `compute_chunk_commitment` | `crates/types/src/kzg.rs` | Full pipeline: pad → split → commit → aggregate | +| `aggregate_commitments` | `crates/types/src/kzg.rs` | C = C1 + r·C2 for two commitments | +| `aggregate_all_commitments` | `crates/types/src/kzg.rs` | Iterative pairwise aggregation of N commitments | +| `g1_add_scaled` | `crates/types/src/kzg.rs` | Low-level BLS12-381 G1 point arithmetic via blst FFI | + +--- + +## 4. Ingress Proofs — V1 and V2 + +An **ingress proof** is a signed attestation from a miner confirming receipt and storage of a transaction's data. It is included in blocks and gossiped across the network. + +### V1 — SHA-256 Merkle Proof (legacy) + +```rust +// crates/types/src/ingress.rs +pub struct IngressProofV1 { + pub signature: IrysSignature, // excluded from RLP (recomputed during verification) + pub data_root: H256, // Merkle root of signer-dependent ingress leaves + pub proof: H256, // Merkle tree node ID + pub chain_id: u64, // replay protection + pub anchor: H256, // block hash for expiry +} +``` + +The `data_root` is computed from **signer-dependent leaves** — the Merkle tree includes the signer's address in each leaf hash. This binds the proof to a specific miner but provides no mechanism to query individual chunks. + +### V2 — KZG Commitment + +```rust +// crates/types/src/ingress.rs +pub struct IngressProofV2 { + pub signature: IrysSignature, + pub data_root: H256, // Merkle root of regular leaves (signer-independent) + pub kzg_commitment: KzgCommitmentBytes, // aggregated KZG commitment over all chunks (48 bytes) + pub composite_commitment: H256, // SHA256(DOMAIN || kzg || signer_address) + pub chain_id: u64, // replay protection + pub anchor: H256, // block hash for expiry + pub source_type: DataSourceType, // NativeData(0) or EvmBlob(1) +} +``` + +Key differences from V1: + +| Aspect | V1 | V2 | +|--------|----|----| +| Data fingerprint | SHA-256 Merkle root | KZG commitment (elliptic curve point) | +| Point evaluation | Not possible | Supported — enables custody proofs | +| Signer binding | Incorporated into data_root leaves | Separate composite_commitment field | +| Data source tracking | Not applicable | NativeData or EvmBlob | +| data_root computation | Signer-dependent leaves | Regular (signer-independent) leaves | + +### Composite commitment — rationale + +The KZG commitment `C` depends solely on the data, not on who computed it. Two miners storing the same transaction would produce identical KZG commitments. Without an additional binding step, one miner could replicate another's commitment. + +The **composite commitment** prevents this: + +``` +composite = SHA256("IRYS_KZG_INGRESS_V1" || kzg_commitment || signer_address) +``` + +The domain separator (`IRYS_KZG_INGRESS_V1`) prevents cross-protocol confusion. The signer address binds the commitment to a specific miner. Together, they ensure that each miner's proof is unique even for identical data. + +### Version gating + +The `IngressProof` enum employs the `IntegerTagged` macro for versioning — V1 carries discriminant 1, V2 carries discriminant 2. Acceptance is governed by configuration flags: + +``` +check_version_accepted(accept_kzg, require_kzg): + - V2 proof + !accept_kzg → rejected + - V1 proof + require_kzg → rejected + - otherwise → accepted +``` + +--- + +## 5. Ingress Proof Lifecycle + +### Generation + +When a miner receives a new data transaction, it generates an ingress proof: + +``` +crates/actors/src/mempool_service/ingress_proofs.rs (orchestration) + │ + └─> crates/actors/src/mempool_service/chunks.rs (core logic) + │ + └─> crates/types/src/ingress.rs (proof construction) + │ + └─> crates/types/src/kzg.rs (KZG primitives) +``` + +**Detailed flow (`generate_ingress_proof` in `chunks.rs`):** + +1. **Collect chunks** — Read all chunks for the transaction from the cache database (`CachedChunksIndex` table), verifying uniqueness and ordering. +2. **Branch on configuration** — If `use_kzg_ingress_proofs` is true, generate V2; otherwise V1. +3. **V2 path**: + - Compute per-chunk KZG commitments (each chunk → `compute_chunk_commitment`). + - Aggregate all chunk commitments → single `kzg_commitment`. + - Compute `composite_commitment` binding to the signer. + - Construct `IngressProofV2` and sign it. +4. **Store** — Write the proof and per-chunk commitments to the database. + +### Storage + +Two database tables are involved: + +| Table | Key | Value | Purpose | +|-------|-----|-------|---------| +| `IngressProofs` | (DataRoot, IrysAddress) | CompactCachedIngressProof | Store the complete proof | +| `PerChunkKzgCommitments` | (DataRoot, chunk_index) | CompactPerChunkCommitment (wraps `PerChunkCommitment { chunk_index, commitment: KzgCommitmentBytes }`) | Store individual chunk commitments for subsequent custody verification | + +The per-chunk commitments are stored separately because custody proofs require the ability to look up the commitment for a specific chunk by index, not the aggregated commitment. + +### Gossip + +Following generation, the proof is broadcast to the network: + +``` +gossip_ingress_proof() + │ + └─> GossipBroadcastMessageV2 { + key: GossipCacheKey::IngressProof(proof.proof_id()), + data: GossipDataV2::IngressProof(proof) + } +``` + +The `proof_id()` method is used for deduplication in the gossip cache: + +- V1: the Merkle proof hash +- V2: the composite commitment + +Peers receive proofs via the `/gossip/v2/ingress_proof` HTTP endpoint (`crates/p2p/src/server.rs`), which delegates to the gossip data handler for validation and forwarding to the mempool. + +### Validation (in blocks) + +When validating a block, each ingress proof is checked: + +1. **Version acceptance** — `check_version_accepted(accept_kzg, require_kzg)` +2. **Full data availability verification** (if enabled) — Reconstruct chunks from the database and invoke `verify_ingress_proof`, which recomputes the KZG commitments from scratch and verifies they match. +3. **Unique signer enforcement** — Each signer may only have one proof per transaction per block. + +--- + +## 6. EIP-4844 Blob Extraction + +### What are EIP-4844 blobs? + +EIP-4844 (Proto-Danksharding) introduces a new transaction type to Ethereum that carries large data payloads called **blobs**. Each blob is 128 KB and arrives with a KZG commitment in a **sidecar** — metadata attached to the transaction but not executed by the EVM. + +Irys employs its Reth-based execution layer to process Ethereum transactions. When blob transactions arrive, Irys can extract the blob data and ingest it as native Irys data. + +### BlobExtractionService + +``` +crates/actors/src/blob_extraction_service.rs +``` + +This actor receives `ExtractBlobs` messages containing block hashes and blob transaction hashes. For each blob: + +``` +EIP-4844 Blob (128 KB + KZG commitment from sidecar) + │ + ├─ Take the KZG commitment directly from the sidecar (no recomputation) + ├─ Zero-pad blob data from 128 KB → 256 KB (Irys chunk size) + ├─ Compute data_root from the padded data (regular leaves, not signer-dependent) + ├─ Compute composite_commitment binding KZG to the signer + ├─ Construct IngressProofV2 with source_type = EvmBlob + │ + └─> Create synthetic DataTransactionHeader + │ + └─> Send IngestBlobDerivedTx to the mempool +``` + +**Key observation:** Following extraction, blob-derived data is indistinguishable from native data within the mempool and storage systems. It possesses a regular transaction header, an ingress proof, and chunk data — the same structures used for native transactions. + +**Why not recompute the KZG commitment?** The blob sidecar already contains a KZG commitment computed using the same trusted setup (Ethereum's KZG ceremony). Recomputing it would be redundant and computationally expensive. + +### Configuration + +Blob extraction is gated by the `enable_blobs` flag. Enabling it automatically implies `accept_kzg_ingress_proofs` (since blob-derived proofs are always V2). + +--- + +## 7. Custody Proofs + +### Purpose + +Once data is stored, how does the network verify that miners are *still* storing it? Custody proofs answer this question through a challenge-response protocol. + +A verifier selects random chunk positions in a miner's partition and demands: "At position N, what does your data evaluate to?" The miner must produce a KZG opening proof for each challenged position. If the proofs match the stored per-chunk commitments, the miner is verified. If they do not, the miner is penalised. + +### Challenge derivation + +Challenges are deterministic — any party can compute the same challenges given the same inputs: + +``` +challenge_seed = SHA256(vdf_output || partition_hash) +``` + +The VDF output provides unpredictable timing (miners cannot prepare in advance), and the partition hash identifies which partition is being challenged. + +From the seed, **k** chunk offsets are selected (default k = 20): + +``` +For j = 0..k: (j is u32) + hash = SHA256(challenge_seed || j.to_le_bytes()) // 4-byte LE + offset_j = u32(first_8_bytes_as_u64(hash) % num_chunks_in_partition) +``` + +For each offset, an evaluation point is derived: + +``` +z_j = SHA256(challenge_seed || offset_j.to_le_bytes()) mod BLS12-381_r // offset_j is u32, 4-byte LE +``` + +The `mod BLS12-381_r` step ensures the point is a valid scalar in the BLS12-381 field. + +### Proof generation + +The `CustodyProofService` (`crates/actors/src/custody_proof_service.rs`) is responsible for handling challenges. + +When a block reaches `ChainState::Onchain`, `block_tree_service.rs` sends `CustodyProofMessage::NewBlock { vdf_output: H256, block_height: u64 }` to the service (gated by `enable_custody_proofs`). The service's `handle_new_block` method iterates all local storage modules, derives a challenge seed for each partition, and invokes `handle_challenge` internally — thereby self-challenging on every confirmed block. + +``` +CustodyChallenge received (via NewBlock self-challenge or peer gossip) + │ + ├─ Locate storage module matching partition_hash + │ (if none found, this node does not own the partition — skip) + │ + └─ For each of k challenged offsets: + │ + ├─ Read packed chunk from storage module + ├─ Unpack it (reverse multi-iteration entropy packing → plaintext) + ├─ Derive evaluation point: z = derive_challenge_point(seed, offset) + ├─ Compute opening proof on the plaintext: + │ (proof_bytes, y_bytes) = compute_chunk_opening_proof(data, z, settings) + │ + │ Internally, this: + │ 1. Pads the chunk to 256 KB and splits into two 128 KB halves + │ 2. Computes a blob proof for each half: (π1, y1) and (π2, y2) + │ 3. Aggregates: π = π1 + r·π2, y = y1 + r·y2 + │ where r = SHA256(C1 || C2) + │ + └─ Construct CustodyOpening { + chunk_offset, + data_root, // identifies the transaction + tx_chunk_index, // position within the transaction + evaluation_point, // z (32 bytes) + evaluation_value, // y (32 bytes) + opening_proof // π (48 bytes) + } + │ + └─ Assemble CustodyProof with all openings + └─ Gossip to network via GossipDataV2::CustodyProof +``` + +The gossip sending and receiving paths are fully wired. On the receiving side, `handle_custody_proof_v2` in `server.rs` caches the proof for deduplication, then forwards it via `CustodyProofMessage::ReceivedProof` to the `CustodyProofService`. The service's `handle_received_proof` method verifies the proof against stored per-chunk commitments and, if valid, adds it to the pending proofs list. The block producer drains pending proofs via `TakePendingProofs` when assembling blocks. + +### Verification + +The verification function `validate_custody_proofs` in `block_validation.rs` is wired into `validate_block()` as a parallel `tokio::join!` task in `block_validation_task.rs`, alongside recall, PoA, shadow transaction, seeds, commitment ordering, and data transaction validation. + +```rust +// crates/types/src/custody.rs +pub fn verify_custody_proof( + proof: &CustodyProof, + get_commitment: impl Fn(H256, u32) -> eyre::Result>, + kzg_settings: &KzgSettings, + expected_challenge_count: u32, + num_chunks_in_partition: u64, +) -> eyre::Result +``` + +Verification proceeds as follows: + +1. **Check opening count** — There must be exactly `expected_challenge_count` openings. +2. **Recompute expected offsets** — Derived from the challenge seed (deterministic, publicly verifiable). +3. **For each opening:** + - Verify that `chunk_offset` matches the expected offset. + - Look up the stored per-chunk KZG commitment via `get_commitment(data_root, tx_chunk_index)` — these were stored during ingress proof generation. + - Invoke `verify_chunk_opening_proof(commitment, z, y, π)` — the core KZG verification. +4. **Return a result** — one of: + +| Result | Meaning | +|--------|---------| +| `Valid` | All openings verified successfully | +| `InvalidOpeningCount` | Incorrect number of openings | +| `InvalidOffset` | Opening at an unexpected chunk position | +| `MissingCommitment` | No stored commitment for this chunk (data was never ingested) | +| `InvalidProof` | KZG verification failed — the miner does not hold the correct data | + +### Penalties + +If verification fails, a **CustodyPenalty** shadow transaction is generated. Shadow transactions are protocol-level actions encoded as EVM transactions: + +``` +CustodyPenaltyPacket { + amount: U256, // tokens to deduct + target: Address, // penalised miner + partition_hash: FixedBytes<32>, // which partition failed (alloy_primitives, not irys H256) +} +``` + +The penalty is Borsh-encoded, prefixed with the `IRYS_SHADOW_EXEC` marker (`b"irys-shadow-exec"`), and sent to `SHADOW_TX_DESTINATION_ADDR`. The Irys EVM extension detects this prefix in the transaction input and executes the encoded action (deducting funds from the miner's account). + +### Code references + +| Component | File | +|-----------|------| +| Challenge types | `crates/types/src/custody.rs` | +| Proof generation service | `crates/actors/src/custody_proof_service.rs` | +| KZG opening primitives | `crates/types/src/kzg.rs` | +| Verification | `crates/types/src/custody.rs` — `verify_custody_proof` | +| Penalty shadow transaction | `crates/irys-reth/src/shadow_tx.rs` | + +--- + +## 8. Configuration and Rollout + +### Configuration flags + +All flags reside in `ConsensusConfig` (`crates/types/src/config/consensus.rs`): + +| Flag | Default | Description | +|------|---------|-------------| +| `enable_shadow_kzg_logging` | false | Compute V2 commitments alongside V1 proofs and log for comparison. Non-consensus — purely for testing. | +| `use_kzg_ingress_proofs` | false | Generate V2 (KZG) proofs for new transactions instead of V1. | +| `accept_kzg_ingress_proofs` | false | Accept V2 proofs from peers during validation. | +| `require_kzg_ingress_proofs` | false | Reject V1 proofs entirely. Implies `accept_kzg_ingress_proofs`. | +| `enable_blobs` | false | Extract EIP-4844 blobs and ingest as Irys transactions. Implies `accept_kzg_ingress_proofs`. | +| `enable_custody_proofs` | false | Run the custody proof challenge-response protocol. Requires `accept_kzg_ingress_proofs`. | +| `custody_challenge_count` | 20 | Number of random chunk positions challenged per custody proof. | +| `custody_response_window` | 10 | Number of blocks a miner has to respond to a custody challenge. | + +### Flag dependencies + +The `normalize()` method enforces logical implications at startup: + +``` +enable_blobs=true ──┐ +require_kzg_proofs=true ──┤ +use_kzg_proofs=true ──┼──> accept_kzg_ingress_proofs = true +enable_custody_proofs=true ──┘ +``` + +If any of these flags is true but `accept_kzg_ingress_proofs` is false, `normalize()` auto-enables it with a warning log. + +The `validate()` method (`crates/types/src/config/mod.rs`) performs the same checks but returns hard errors — catching contradictions that survive after normalisation. + +### Rollout strategy + +The flags enable a phased rollout: + +``` +Phase 1: Shadow Mode + enable_shadow_kzg_logging = true + (V1 proofs remain in use; V2 computed and logged for comparison) + +Phase 2: Accept + accept_kzg_ingress_proofs = true + (V2 proofs accepted from peers; V1 still generated locally) + +Phase 3: Use + use_kzg_ingress_proofs = true + (This node generates V2 proofs; V1 still accepted from others) + +Phase 4: Require + require_kzg_ingress_proofs = true + (V1 proofs rejected — full network migration complete) + +Phase 5: Custody and Blobs + enable_custody_proofs = true + enable_blobs = true + (Full end-to-end wiring: challenge issuance on confirmed blocks, + proof generation, gossip broadcast, peer receipt and verification, + pending proof collection by the block producer, and block validation.) +``` + +--- + +## 9. Data Flow Diagrams + +### Native data transaction — end to end + +``` +User submits transaction with N chunks + │ + v +MempoolService receives tx + chunks + │ + ├─ Store chunks in CachedChunks DB table + │ + └─ generate_ingress_proof() + │ + ├─ For each chunk: + │ compute_chunk_commitment(chunk_data) + │ └─ pad → split → commit_half_1 → commit_half_2 → aggregate + │ + ├─ aggregate_all_commitments([C0, C1, ..., CN-1]) + │ └─ iterative pairwise: C = aggregate(C_prev, C_next) + │ + ├─ compute_composite_commitment(C_tx, signer_address) + │ └─ SHA256(DOMAIN || C_tx || address) + │ + ├─ Sign proof + │ + └─ store_proof_and_commitments() + ├─ IngressProofs table: (data_root, address) → proof + └─ PerChunkKzgCommitments table: (data_root, i) → C_i + │ + v +Gossip IngressProof to peers + │ + v +Block producer includes proof in block + │ + v +Block validators check proof + │ Subsequently... + v │ +Data stored in partitions v + Block confirmed on-chain + (ChainState::Onchain) + │ + v + BlockTreeService sends NewBlock + (VDF output from block's vdf_limiter_info) + │ + v + CustodyProofService::handle_new_block + ├─ Derive challenge_seed per partition + ├─ Locate storage module + ├─ For each offset: + │ unpack chunk + │ compute_chunk_opening_proof + └─ Gossip CustodyProof + │ + v + Peers receive via gossip + ├─ Verify proof (handle_received_proof) + └─ Store as pending + │ + v + Block producer includes proofs + │ + v + validate_custody_proofs (in validate_block) + ├─ Recompute offsets + ├─ Look up per-chunk commitments + ├─ Verify each opening + └─ Valid → OK / Invalid → CustodyPenalty +``` + +### EIP-4844 blob path + +``` +Ethereum blob transaction (128 KB blob + KZG commitment in sidecar) + │ + v +BlobExtractionService::process_single_blob() + │ + ├─ Take KZG commitment from sidecar (no recomputation) + ├─ Zero-pad blob: 128 KB → 256 KB + ├─ Compute data_root from padded data + ├─ Compute composite_commitment + ├─ Create IngressProofV2 (source_type = EvmBlob) + │ + └─ Create synthetic DataTransactionHeader + │ + └─ IngestBlobDerivedTx → MempoolService + │ + └─ (same flow as native data from this point) +``` + +--- + +## 10. Glossary + +| Term | Definition | +|------|-----------| +| **BLS12-381** | The elliptic curve employed for KZG commitments. Provides approximately 128-bit security. | +| **Blob** | A 128 KB data payload (EIP-4844). Irys chunks are 256 KB, equivalent to two blobs. | +| **Chunk** | A 256 KB unit of data in Irys storage. | +| **Commitment (KZG)** | A 48-byte elliptic curve point that uniquely fingerprints a polynomial (and thus the data it represents). | +| **Composite commitment** | SHA256(DOMAIN \|\| KZG commitment \|\| signer address). Binds a KZG commitment to a specific miner. | +| **Custody challenge** | A request for a miner to prove continued storage of specific chunks in a partition. | +| **Custody proof** | A miner's response: KZG opening proofs at the challenged positions. | +| **Domain separator** | The bytes `IRYS_KZG_INGRESS_V1` prepended to hashes to prevent cross-protocol confusion. | +| **Evaluation point (z)** | A 32-byte scalar at which the polynomial is evaluated during a custody challenge. | +| **Field element** | A 32-byte number in the BLS12-381 scalar field. Must be less than the field modulus (which begins with `0x73`); a first byte ≥ `0x74` is always invalid. | +| **G1 point** | A point on the BLS12-381 G1 curve (48 bytes compressed). Commitments and proofs are G1 points. | +| **Ingress proof** | A signed attestation that a miner has received and stored a transaction's data. | +| **KZG** | Kate–Zaverucha–Goldberg — the authors of the polynomial commitment scheme. | +| **Opening proof (π)** | A 48-byte proof that a polynomial evaluates to a specific value at a specific point. | +| **Partition** | A logical storage unit that a miner manages. Contains many chunks. | +| **Per-chunk commitment** | The KZG commitment for a single chunk, stored in the database for custody verification. | +| **Shadow transaction** | A protocol-level action (such as custody penalties) encoded as an EVM transaction. | +| **Sidecar** | Metadata attached to an EIP-4844 blob transaction, including the KZG commitment. | +| **Trusted setup** | A one-time ceremony that produces the cryptographic parameters (`KzgSettings`) required for KZG. | +| **VDF** | Verifiable Delay Function. Provides unpredictable timing for custody challenges. |