Skip to content
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions beacon_node/network/src/network_beacon_processor/rpc_methods.rs
Original file line number Diff line number Diff line change
Expand Up @@ -880,6 +880,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
"Received BlobsByRange Request"
);

// Check Deneb is enabled. Blobs are available since then.
let request_start_slot = Slot::from(req.start_slot);

let data_availability_boundary_slot = match self.chain.data_availability_boundary() {
Expand Down Expand Up @@ -917,6 +918,17 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
};
}

// Check Fulu/PeerDAS is enabled. Blobs are not stored since then.
// Only need to check if the last slot is a Fulu slot or above.
let request_end_slot = Slot::from(req.start_slot + req.count);
let request_end_epoch = request_end_slot.epoch(T::EthSpec::slots_per_epoch());
if self.chain.spec.is_peer_das_enabled_for_epoch(request_end_epoch) {
return Err((
RpcErrorResponse::InvalidRequest,
"Req including Fulu slots",
))
}
Copy link
Collaborator

@dapplion dapplion Apr 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, rethinking this maybe this is too punishing? Why not allow the request to creep into Fulu and just check that the start slot is in Deneb. It's fine to return empty for slots in Fulu.

Lighthouse only does by_range requests for a single epoch, but other clients may have different logic.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I agree that it could be bit too punishing. The issue was vague in a way that we didn't set how much punishment we will give. Do you think it is okay to accept the request as long as it contains pre-Fulu slots?

Also, it seems like there's no check from Lighthouse to not send Fulu slots as left on the comment #7122 (comment). So, we could potentially be penalized by ourselves

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Feels like something that should be clarified in the spec to align behaviour on all clients. Do you want to raise an issue to the specs?

Copy link
Contributor Author

@SunnysidedJ SunnysidedJ Apr 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure! Just raised a PR to the specs ethereum/consensus-specs#4286

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Changed as per the spec PR to not punish and return empty for Fulu slots


let block_roots =
self.get_block_roots_for_slot_range(req.start_slot, req.count, "BlobsByRange")?;

Expand Down
56 changes: 52 additions & 4 deletions beacon_node/network/src/network_beacon_processor/tests.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
//#![cfg(not(debug_assertions))] // Tests are too slow in debug.
#![cfg(test)]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unwanted diff


use crate::{
Expand Down Expand Up @@ -429,13 +429,13 @@ impl TestRig {
}
}

pub fn enqueue_blobs_by_range_request(&self, count: u64) {
pub fn enqueue_blobs_by_range_request(&self, start_slot: u64, count: u64) {
self.network_beacon_processor
.send_blobs_by_range_request(
PeerId::random(),
InboundRequestId::new_unchecked(42, 24),
BlobsByRangeRequest {
start_slot: 0,
start_slot,
count,
},
)
Expand Down Expand Up @@ -1216,9 +1216,55 @@ async fn test_blobs_by_range() {
if test_spec::<E>().deneb_fork_epoch.is_none() {
return;
};
let rig_slot = 64;
let mut rig = TestRig::new(rig_slot).await;
let slot_count = 32;
// can I use slots_per_epoch from types::EthSpec?
rig.enqueue_blobs_by_range_request(0, slot_count);

let mut blob_count = 0;
for slot in 0..slot_count {
let root = rig
.chain
.block_root_at_slot(Slot::new(slot), WhenSlotSkipped::None)
.unwrap();
blob_count += root
.map(|root| {
rig.chain
.get_blobs(&root)
.map(|list| list.len())
.unwrap_or(0)
})
.unwrap_or(0);
}
let mut actual_count = 0;
while let Some(next) = rig._network_rx.recv().await {
if let NetworkMessage::SendResponse {
peer_id: _,
response: Response::BlobsByRange(blob),
inbound_request_id: _,
} = next
{
if blob.is_some() {
actual_count += 1;
} else {
break;
}
} else {
panic!("unexpected message {:?}", next);
}
}
assert_eq!(blob_count, actual_count);
}

#[tokio::test]
async fn test_blobs_by_range_fulu() {
if !test_spec::<E>().is_peer_das_scheduled() {
return;
};
let mut rig = TestRig::new(64).await;
let slot_count = 32;
rig.enqueue_blobs_by_range_request(slot_count);
rig.enqueue_blobs_by_range_request(0, slot_count);

let mut blob_count = 0;
for slot in 0..slot_count {
Expand Down Expand Up @@ -1252,5 +1298,7 @@ async fn test_blobs_by_range() {
panic!("unexpected message {:?}", next);
}
}
assert_eq!(blob_count, 0);
assert_eq!(actual_count, 0);
assert_eq!(blob_count, actual_count);
}
Loading