Skip to content

Commit 2bf0df7

Browse files
authored
add: column support to ENR, Metadata and Request Manager (#6741)
* add csc to enr and metadata * add column filtering into RequestManager * nits * add comment * resolved reviews 1 * added local custody column set into RequestManager as a field * faster lookups with hashsets * fix regressions, fix other reviews, fix response checking for columns * simpler fix for hashsets
1 parent 7647d17 commit 2bf0df7

File tree

8 files changed

+397
-17
lines changed

8 files changed

+397
-17
lines changed

beacon_chain/consensus_object_pools/block_quarantine.nim

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@ const
2424
## Enough for finalization in an alternative fork
2525
MaxBlobless = SLOTS_PER_EPOCH
2626
## Arbitrary
27+
MaxColumnless = SLOTS_PER_EPOCH
28+
## Arbitrary
2729
MaxUnviables = 16 * 1024
2830
## About a day of blocks - most likely not needed but it's quite cheap..
2931

@@ -58,6 +60,12 @@ type
5860
## block as well. A blobless block inserted into this table must
5961
## have a resolved parent (i.e., it is not an orphan).
6062

63+
columnless*: OrderedTable[Eth2Digest, ForkedSignedBeaconBlock]
64+
## Blocks that we don't have columns for. When we have received
65+
## all columns for this block, we can proceed to resolving the
66+
## block as well. A columnless block inserted into this table must
67+
## have a resolved parent (i.e., it is not an orphan)
68+
6169
unviable*: OrderedTable[Eth2Digest, tuple[]]
6270
## Unviable blocks are those that come from a history that does not
6371
## include the finalized checkpoint we're currently following, and can
@@ -132,6 +140,10 @@ func removeBlobless*(
132140
quarantine: var Quarantine, signedBlock: ForkySignedBeaconBlock) =
133141
quarantine.blobless.del(signedBlock.root)
134142

143+
func removeColumnless*(
144+
quarantine: var Quarantine, signedBlock: ForkySignedBeaconBlock) =
145+
quarantine.columnless.del(signedBlock.root)
146+
135147
func isViable(
136148
finalizedSlot: Slot, slot: Slot): bool =
137149
# The orphan must be newer than the finalization point so that its parent
@@ -236,6 +248,18 @@ func cleanupBlobless(quarantine: var Quarantine, finalizedSlot: Slot) =
236248
quarantine.addUnviable k
237249
quarantine.blobless.del k
238250

251+
func cleanupColumnless(quarantine: var Quarantine, finalizedSlot: Slot) =
252+
var toDel: seq[Eth2Digest]
253+
254+
for k, v in quarantine.columnless:
255+
withBlck(v):
256+
if not isViable(finalizedSlot, forkyBlck.message.slot):
257+
toDel.add k
258+
259+
for k in toDel:
260+
quarantine.addUnviable k
261+
quarantine.columnless.del k
262+
239263
func clearAfterReorg*(quarantine: var Quarantine) =
240264
## Clear missing and orphans to start with a fresh slate in case of a reorg
241265
## Unviables remain unviable and are not cleared.
@@ -325,6 +349,29 @@ proc addBlobless*(
325349
quarantine.missing.del(signedBlock.root)
326350
true
327351

352+
proc addColumnless*(
353+
quarantine: var Quarantine, finalizedSlot: Slot,
354+
signedBlock: fulu.SignedBeaconBlock): bool =
355+
356+
if not isViable(finalizedSlot, signedBlock.message.slot):
357+
quarantine.addUnviable(signedBlock.root)
358+
return false
359+
360+
quarantine.cleanupColumnless(finalizedSlot)
361+
362+
if quarantine.columnless.lenu64 >= MaxColumnless:
363+
var oldest_columnless_key: Eth2Digest
364+
for k in quarantine.columnless.keys:
365+
oldest_columnless_key = k
366+
break
367+
quarantine.blobless.del oldest_columnless_key
368+
369+
debug "block quarantine: Adding columnless", blck = shortLog(signedBlock)
370+
quarantine.columnless[signedBlock.root] =
371+
ForkedSignedBeaconBlock.init(signedBlock)
372+
quarantine.missing.del(signedBlock.root)
373+
true
374+
328375
func popBlobless*(
329376
quarantine: var Quarantine,
330377
root: Eth2Digest): Opt[ForkedSignedBeaconBlock] =
@@ -334,6 +381,19 @@ func popBlobless*(
334381
else:
335382
Opt.none(ForkedSignedBeaconBlock)
336383

384+
func popColumnless*(
385+
quarantine: var Quarantine,
386+
root: Eth2Digest): Opt[ForkedSignedBeaconBlock] =
387+
var blck: ForkedSignedBeaconBlock
388+
if quarantine.columnless.pop(root, blck):
389+
Opt.some(blck)
390+
else:
391+
Opt.none(ForkedSignedBeaconBlock)
392+
337393
iterator peekBlobless*(quarantine: var Quarantine): ForkedSignedBeaconBlock =
338394
for k, v in quarantine.blobless.mpairs():
339395
yield v
396+
397+
iterator peekColumnless*(quarantine: var Quarantine): ForkedSignedBeaconBlock =
398+
for k, v in quarantine.columnless.mpairs():
399+
yield v

beacon_chain/consensus_object_pools/data_column_quarantine.nim

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ func hasDataColumn*(
7878
false
7979

8080
func peekColumnIndices*(quarantine: DataColumnQuarantine,
81-
blck: electra.SignedBeaconBlock):
81+
blck: fulu.SignedBeaconBlock):
8282
seq[ColumnIndex] =
8383
# Peeks into the currently received column indices
8484
# from quarantine, necessary data availability checks
@@ -110,7 +110,7 @@ func gatherDataColumns*(quarantine: DataColumnQuarantine,
110110

111111
func popDataColumns*(
112112
quarantine: var DataColumnQuarantine, digest: Eth2Digest,
113-
blck: electra.SignedBeaconBlock):
113+
blck: fulu.SignedBeaconBlock):
114114
seq[ref DataColumnSidecar] =
115115
var r: DataColumnSidecars
116116
for idx in quarantine.custody_columns:
@@ -123,7 +123,7 @@ func popDataColumns*(
123123
r
124124

125125
func hasMissingDataColumns*(quarantine: DataColumnQuarantine,
126-
blck: electra.SignedBeaconBlock): bool =
126+
blck: fulu.SignedBeaconBlock): bool =
127127
# `hasMissingDataColumns` consists of the data columns that,
128128
# have been missed over gossip, also in case of a supernode,
129129
# the method would return missing columns when the supernode
@@ -149,7 +149,7 @@ func hasMissingDataColumns*(quarantine: DataColumnQuarantine,
149149
return true
150150

151151
func hasEnoughDataColumns*(quarantine: DataColumnQuarantine,
152-
blck: electra.SignedBeaconBlock): bool =
152+
blck: fulu.SignedBeaconBlock): bool =
153153
# `hasEnoughDataColumns` dictates whether there is `enough`
154154
# data columns for a block to be enqueued, ideally for a supernode
155155
# if it receives atleast 50%+ gossip and RPC
@@ -175,7 +175,7 @@ func hasEnoughDataColumns*(quarantine: DataColumnQuarantine,
175175
return true
176176

177177
func dataColumnFetchRecord*(quarantine: DataColumnQuarantine,
178-
blck: electra.SignedBeaconBlock):
178+
blck: fulu.SignedBeaconBlock):
179179
DataColumnFetchRecord =
180180
var indices: seq[ColumnIndex]
181181
for i in quarantine.custody_columns:

beacon_chain/networking/eth2_network.nim

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1647,6 +1647,15 @@ proc runDiscoveryLoop(node: Eth2Node) {.async: (raises: [CancelledError]).} =
16471647
# Also, give some time to dial the discovered nodes and update stats etc
16481648
await sleepAsync(5.seconds)
16491649

1650+
proc fetchNodeIdFromPeerId*(peer: Peer): NodeId=
1651+
# Convert peer id to node id by extracting the peer's public key
1652+
let nodeId =
1653+
block:
1654+
var key: PublicKey
1655+
discard peer.peerId.extractPublicKey(key)
1656+
keys.PublicKey.fromRaw(key.skkey.getBytes()).get().toNodeId()
1657+
nodeId
1658+
16501659
proc resolvePeer(peer: Peer) =
16511660
# Resolve task which performs searching of peer's public key and recovery of
16521661
# ENR using discovery5. We only resolve ENR for peers we know about to avoid
@@ -2418,6 +2427,33 @@ func announcedENR*(node: Eth2Node): enr.Record =
24182427
doAssert node.discovery != nil, "The Eth2Node must be initialized"
24192428
node.discovery.localNode.record
24202429

2430+
proc lookupCscFromPeer*(peer: Peer): uint64 =
2431+
# Fetches the custody column count from a remote peer.
2432+
# If the peer advertises their custody column count via the `csc` ENR field,
2433+
# that value is returned. Otherwise, the default value `CUSTODY_REQUIREMENT`
2434+
# is assumed.
2435+
2436+
let metadata = peer.metadata
2437+
if metadata.isOk:
2438+
return metadata.get.custody_subnet_count
2439+
2440+
# Try getting the custody count from ENR if metadata fetch fails.
2441+
debug "Could not get csc from metadata, trying from ENR",
2442+
peer_id = peer.peerId
2443+
let enrOpt = peer.enr
2444+
if not enrOpt.isNone:
2445+
let enr = enrOpt.get
2446+
let enrFieldOpt = enr.get(enrCustodySubnetCountField, seq[byte])
2447+
if enrFieldOpt.isOk:
2448+
try:
2449+
let csc = SSZ.decode(enrFieldOpt.get, uint8)
2450+
return csc.uint64
2451+
except SszError, SerializationError:
2452+
discard # Ignore decoding errors and fallback to default
2453+
2454+
# Return default value if no valid custody subnet count is found.
2455+
return CUSTODY_REQUIREMENT.uint64
2456+
24212457
func shortForm*(id: NetKeyPair): string =
24222458
$PeerId.init(id.pubkey)
24232459

@@ -2579,6 +2615,20 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
25792615
else:
25802616
debug "Stability subnets changed; updated ENR attnets", attnets
25812617

2618+
proc loadCscnetMetadataAndEnr*(node: Eth2Node, cscnets: CscCount) =
2619+
node.metadata.custody_subnet_count = cscnets.uint64
2620+
let res =
2621+
node.discovery.updateRecord({
2622+
enrCustodySubnetCountField: SSZ.encode(cscnets)
2623+
})
2624+
2625+
if res.isErr:
2626+
# This should not occur in this scenario as the private key would always
2627+
# be the correct one and the ENR will not increase in size
2628+
warn "Failed to update the ENR csc field", error = res.error
2629+
else:
2630+
debug "Updated ENR csc", cscnets
2631+
25822632
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
25832633
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/validator.md#sync-committee-subnet-stability
25842634
if node.metadata.syncnets == syncnets:

beacon_chain/nimbus_beacon_node.nim

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -422,6 +422,9 @@ proc initFullNode(
422422
DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64
423423
else:
424424
CUSTODY_REQUIREMENT.uint64
425+
custody_columns_set =
426+
node.network.nodeId.get_custody_columns_set(max(SAMPLES_PER_SLOT.uint64,
427+
localCustodySubnets))
425428
consensusManager = ConsensusManager.new(
426429
dag, attestationPool, quarantine, node.elManager,
427430
ActionTracker.init(node.network.nodeId, config.subscribeAllSubnets),
@@ -478,6 +481,13 @@ proc initFullNode(
478481
Opt.some blob_sidecar
479482
else:
480483
Opt.none(ref BlobSidecar)
484+
rmanDataColumnLoader = proc(
485+
columnId: DataColumnIdentifier): Opt[ref DataColumnSidecar] =
486+
var data_column_sidecar = DataColumnSidecar.new()
487+
if dag.db.getDataColumnSidecar(columnId.block_root, columnId.index, data_column_sidecar[]):
488+
Opt.some data_column_sidecar
489+
else:
490+
Opt.none(ref DataColumnSidecar)
481491

482492
processor = Eth2Processor.new(
483493
config.doppelgangerDetection,
@@ -525,10 +535,10 @@ proc initFullNode(
525535
processor: processor,
526536
network: node.network)
527537
requestManager = RequestManager.init(
528-
node.network, dag.cfg.DENEB_FORK_EPOCH, getBeaconTime,
529-
(proc(): bool = syncManager.inProgress),
530-
quarantine, blobQuarantine, rmanBlockVerifier,
531-
rmanBlockLoader, rmanBlobLoader)
538+
node.network, supernode, custody_columns_set, dag.cfg.DENEB_FORK_EPOCH,
539+
getBeaconTime, (proc(): bool = syncManager.inProgress),
540+
quarantine, blobQuarantine, dataColumnQuarantine, rmanBlockVerifier,
541+
rmanBlockLoader, rmanBlobLoader, rmanDataColumnLoader)
532542

533543
# As per EIP 7594, the BN is now categorised into a
534544
# `Fullnode` and a `Supernode`, the fullnodes custodies a
@@ -552,7 +562,13 @@ proc initFullNode(
552562
dataColumnQuarantine[].supernode = supernode
553563
dataColumnQuarantine[].custody_columns =
554564
node.network.nodeId.get_custody_columns(max(SAMPLES_PER_SLOT.uint64,
555-
localCustodySubnets))
565+
localCustodySubnets))
566+
567+
if node.config.subscribeAllSubnets:
568+
node.network.loadCscnetMetadataAndEnr(DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint8)
569+
else:
570+
node.network.loadCscnetMetadataAndEnr(CUSTODY_REQUIREMENT.uint8)
571+
556572
if node.config.lightClientDataServe:
557573
proc scheduleSendingLightClientUpdates(slot: Slot) =
558574
if node.lightClientPool[].broadcastGossipFut != nil:

beacon_chain/spec/datatypes/fulu.nim

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ type
117117
seq_number*: uint64
118118
attnets*: AttnetBits
119119
syncnets*: SyncnetBits
120-
custody_subnet_count*: CscCount
120+
custody_subnet_count*: uint64
121121

122122
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#executionpayload
123123
ExecutionPayload* = object

beacon_chain/spec/eip7594_helpers.nim

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,20 @@ func get_custody_columns*(node_id: NodeId,
9393

9494
sortedColumnIndices(ColumnIndex(columns_per_subnet), subnet_ids)
9595

96+
func get_custody_columns_set*(node_id: NodeId,
97+
custody_subnet_count: uint64):
98+
HashSet[ColumnIndex] =
99+
# This method returns a HashSet of column indices,
100+
# the method is specifically relevant while peer filtering
101+
let
102+
subnet_ids =
103+
get_custody_column_subnets(node_id, custody_subnet_count)
104+
const
105+
columns_per_subnet =
106+
NUMBER_OF_COLUMNS div DATA_COLUMN_SIDECAR_SUBNET_COUNT
107+
108+
sortedColumnIndices(ColumnIndex(columns_per_subnet), subnet_ids).toHashSet()
109+
96110
func get_custody_column_list*(node_id: NodeId,
97111
custody_subnet_count: uint64):
98112
List[ColumnIndex, NUMBER_OF_COLUMNS] =

beacon_chain/spec/network.nim

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ const
4747

4848
enrAttestationSubnetsField* = "attnets"
4949
enrSyncSubnetsField* = "syncnets"
50+
enrCustodySubnetCountField* = "csc"
5051
enrForkIdField* = "eth2"
5152

5253
template eth2Prefix(forkDigest: ForkDigest): string =

0 commit comments

Comments
 (0)