Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,7 @@ impl<E: EthSpec> PendingComponents<E> {
&self,
spec: &Arc<ChainSpec>,
num_expected_columns_opt: Option<usize>,
min_proofs_required_opt: Option<usize>,
recover: R,
) -> Result<Option<AvailableExecutedBlock<E>>, AvailabilityCheckError>
where
Expand Down Expand Up @@ -349,11 +350,8 @@ impl<E: EthSpec> PendingComponents<E> {
return Ok(None);
};

// Check if this node needs execution proofs to validate blocks.
let needs_execution_proofs = spec.zkvm_min_proofs_required().is_some();

if needs_execution_proofs {
let min_proofs = spec.zkvm_min_proofs_required().unwrap();
// Check if this block needs execution proofs.
if let Some(min_proofs) = min_proofs_required_opt {
let num_proofs = self.execution_proof_subnet_count();
if num_proofs < min_proofs {
// Not enough execution proofs yet
Expand Down Expand Up @@ -605,7 +603,13 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
);
});

self.check_availability_and_cache_components(block_root, pending_components, None)
let min_proofs_required_opt = self.get_min_proofs_required(epoch);
self.check_availability_and_cache_components(
block_root,
pending_components,
None,
min_proofs_required_opt,
Comment on lines +610 to +611
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This means we could have execution proofs, pre-peerDAS -- we can make this stricter/remove this later on if we feel it doesn't help with testing

)
}

#[allow(clippy::type_complexity)]
Expand Down Expand Up @@ -645,10 +649,12 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
);
});

let min_proofs_required_opt = self.get_min_proofs_required(epoch);
self.check_availability_and_cache_components(
block_root,
pending_components,
Some(num_expected_columns),
min_proofs_required_opt,
)
}

Expand Down Expand Up @@ -682,6 +688,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
})?;

let num_expected_columns_opt = self.get_num_expected_columns(epoch);
let min_proofs_required_opt = self.get_min_proofs_required(epoch);

pending_components.span.in_scope(|| {
debug!(
Expand All @@ -696,6 +703,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
block_root,
pending_components,
num_expected_columns_opt,
min_proofs_required_opt,
)
}

Expand All @@ -704,10 +712,12 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
block_root: Hash256,
pending_components: MappedRwLockReadGuard<'_, PendingComponents<T::EthSpec>>,
num_expected_columns_opt: Option<usize>,
min_proofs_required_opt: Option<usize>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
if let Some(available_block) = pending_components.make_available(
&self.spec,
num_expected_columns_opt,
min_proofs_required_opt,
|block, span| self.state_cache.recover_pending_executed_block(block, span),
)? {
// Explicitly drop read lock before acquiring write lock
Expand Down Expand Up @@ -876,6 +886,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
})?;

let num_expected_columns_opt = self.get_num_expected_columns(epoch);
let min_proofs_required_opt = self.get_min_proofs_required(epoch);

pending_components.span.in_scope(|| {
debug!(
Expand All @@ -889,6 +900,7 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
block_root,
pending_components,
num_expected_columns_opt,
min_proofs_required_opt,
)
}

Expand All @@ -903,6 +915,16 @@ impl<T: BeaconChainTypes> DataAvailabilityCheckerInner<T> {
}
}

/// Returns the minimum number of execution proofs required for a block at the given epoch.
/// Returns `None` if proofs are not required (zkVM not enabled for this epoch).
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does not check proof boundary epoch/retention window since that will be done on the sync layer

fn get_min_proofs_required(&self, epoch: Epoch) -> Option<usize> {
if self.spec.is_zkvm_enabled_for_epoch(epoch) {
self.spec.zkvm_min_proofs_required()
} else {
None
}
}

/// maintain the cache
pub fn do_maintenance(&self, cutoff_epoch: Epoch) -> Result<(), AvailabilityCheckError> {
// clean up any lingering states in the state cache
Expand Down
Loading