Skip to content

Commit 544f78d

Browse files
committed
extended, refined, polished documentation (no algorithmic changes)
1 parent 4800c80 commit 544f78d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+305
-152
lines changed

engine/access/access_test.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -560,8 +560,7 @@ func (suite *Suite) TestGetExecutionResultByBlockID() {
560560
if err != nil {
561561
return err
562562
}
563-
// requires LockIndexExecutionResult
564-
return all.Results.BatchIndex(lctx, rw, blockID, er.ID())
563+
return all.Results.BatchIndex(lctx, rw, blockID, er.ID()) // requires storage.LockIndexExecutionResult
565564
})
566565
}))
567566

engine/access/ingestion2/finalized_block_processor.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,7 @@ type FinalizedBlockProcessor struct {
4949
lockManager storage.LockManager
5050
db storage.DB
5151

52-
blocks storage.Blocks
53-
52+
blocks storage.Blocks
5453
executionResults storage.ExecutionResults
5554

5655
collectionSyncer *CollectionSyncer

engine/execution/state/bootstrap/bootstrap.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,6 @@ func (b *Bootstrapper) BootstrapExecutionDatabase(
9797
db storage.DB,
9898
rootSeal *flow.Seal,
9999
) error {
100-
101100
commit := rootSeal.FinalState
102101
return storage.WithLocks(manager, storage.LockGroupExecutionBootstrap, func(lctx lockctx.Context) error {
103102
return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error {

engine/execution/state/state.go

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -417,8 +417,9 @@ func (s *state) saveExecutionResults(
417417
// dedicated database. However, we have not yet persisted that this execution node is committing to the
418418
// result represented by the chunk data packs. Populating the index from chunk ID to chunk data pack ID
419419
// in the protocol database (signifying the node's slashable commitment to the respective result) is
420-
// done by the functor returned by `Store`. The functor's is invoked as part of the atomic batch update
421-
// of the protocol database below.
420+
// done by the functor returned by `Store`.
421+
// The functor's is invoked as part of the atomic batch update of the protocol database below
422+
// and requires the lock [storage.LockIndexChunkDataPackByChunkID].
422423
storeChunkDataPacksFunc, err := s.chunkDataPacks.Store(chunks)
423424
if err != nil {
424425
return fmt.Errorf("can not store chunk data packs for block ID: %v: %w", blockID, err)
@@ -445,25 +446,26 @@ func (s *state) saveExecutionResults(
445446
// overwriting of the previously stored mapping.
446447
err := s.db.WithReaderBatchWriter(func(batch storage.ReaderBatchWriter) error {
447448
// store the ChunkID -> StoredChunkDataPack.ID() mapping
448-
// in s.db (protocol database along with other execution data in a single batch)
449+
// in protocol database along with other execution data in a single batch
450+
// requires [storage.LockIndexChunkDataPackByChunkID]
449451
err := storeChunkDataPacksFunc(lctx, batch)
450452
if err != nil {
451453
return fmt.Errorf("cannot store chunk data packs: %w", err)
452454
}
453455

454-
// require LockInsertEvent
456+
// requires [storage.LockInsertEvent]
455457
err = s.events.BatchStore(lctx, blockID, []flow.EventsList{result.AllEvents()}, batch)
456458
if err != nil {
457459
return fmt.Errorf("cannot store events: %w", err)
458460
}
459461

460-
// require LockInsertServiceEvent
462+
// requires [storage.LockInsertServiceEvent]
461463
err = s.serviceEvents.BatchStore(lctx, blockID, result.AllServiceEvents(), batch)
462464
if err != nil {
463465
return fmt.Errorf("cannot store service events: %w", err)
464466
}
465467

466-
// require LockInsertAndIndexTxResult
468+
// requires [storage.LockInsertAndIndexTxResult]
467469
err = s.transactionResults.BatchStore(
468470
lctx,
469471
batch,
@@ -475,14 +477,14 @@ func (s *state) saveExecutionResults(
475477
}
476478

477479
executionResult := &result.ExecutionReceipt.ExecutionResult
478-
// require [storage.LockInsertMyReceipt] lock
479480
// saving my receipts will also save the execution result
481+
// requires [storage.LockInsertMyReceipt] lock
480482
err = s.myReceipts.BatchStoreMyReceipt(lctx, result.ExecutionReceipt, batch)
481483
if err != nil {
482484
return fmt.Errorf("could not persist execution result: %w", err)
483485
}
484486

485-
// require [storage.LockIndexExecutionResult] lock
487+
// requires [storage.LockIndexExecutionResult] lock
486488
err = s.results.BatchIndex(lctx, batch, blockID, executionResult.ID())
487489
if err != nil {
488490
return fmt.Errorf("cannot index execution result: %w", err)
@@ -491,7 +493,7 @@ func (s *state) saveExecutionResults(
491493
// the state commitment is the last data item to be stored, so that
492494
// IsBlockExecuted can be implemented by checking whether state commitment exists
493495
// in the database
494-
// require [storage.LockIndexStateCommitment] lock
496+
// requires [storage.LockIndexStateCommitment] lock
495497
err = s.commits.BatchStore(lctx, blockID, result.CurrentEndState(), batch)
496498
if err != nil {
497499
return fmt.Errorf("cannot store state commitment: %w", err)

module/builder/consensus/builder_test.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,10 @@ func (bs *BuilderSuite) SetupTest() {
254254

255255
lockManager := storage.NewTestingLockManager()
256256

257-
// insert finalized height and root height
257+
// Insert finalized height and root height:
258+
// Currently, we need locks [storage.LockInsertInstanceParams] and [storage.LockFinalizeBlock]. However, the lock policy only permits the locking
259+
// order [storage.LockInsertInstanceParams] → [storage.LockIndexExecutionResult] → [storage.LockInsertBlock] → [storage.LockFinalizeBlock]. Hence,
260+
// we acquire all 4 locks in that order.
258261
db := bs.db
259262
err := unittest.WithLocks(bs.T(), lockManager, []string{storage.LockInsertInstanceParams, storage.LockIndexExecutionResult, storage.LockInsertBlock, storage.LockFinalizeBlock}, func(lctx lockctx.Context) error {
260263
return db.WithReaderBatchWriter(func(rw storage.ReaderBatchWriter) error {

module/executiondatasync/optimistic_sync/persisters/stores/events.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ func NewEventsStore(
3232
}
3333

3434
// Persist adds events to the batch.
35+
// The caller must acquire [storage.LockInsertEvent] and hold it until the write batch is committed.
3536
//
3637
// No error returns are expected during normal operations
3738
func (e *EventsStore) Persist(lctx lockctx.Proof, batch storage.ReaderBatchWriter) error {

state/cluster/badger/state.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,32 +61,30 @@ func Bootstrap(db storage.DB, lockManager lockctx.Manager, stateRoot *StateRoot)
6161
if err != nil {
6262
return fmt.Errorf("could not insert genesis block: %w", err)
6363
}
64-
// insert block height -> ID mapping
64+
// insert block height -> ID mapping for genesis block (finalized by protocol convention)
6565
err = operation.IndexClusterBlockHeight(lctx, rw, chainID, genesis.Height, genesis.ID())
6666
if err != nil {
6767
return fmt.Errorf("failed to map genesis block height to block: %w", err)
6868
}
69-
// insert boundary
69+
// insert latest finalized height
7070
err = operation.BootstrapClusterFinalizedHeight(lctx, rw, chainID, genesis.Height)
7171
if err != nil {
7272
return fmt.Errorf("could not insert genesis boundary: %w", err)
7373
}
7474

75+
// Initialize and persist safety and liveness data for cluster consensus
7576
safetyData := &hotstuff.SafetyData{
7677
LockedOneChainView: genesis.View,
7778
HighestAcknowledgedView: genesis.View,
7879
}
79-
8080
livenessData := &hotstuff.LivenessData{
8181
CurrentView: genesis.View + 1, // starting view for hotstuff
8282
NewestQC: rootQC,
8383
}
84-
// insert safety data
8584
err = operation.UpsertSafetyData(lctx, rw, chainID, safetyData)
8685
if err != nil {
8786
return fmt.Errorf("could not insert safety data: %w", err)
8887
}
89-
// insert liveness data
9088
err = operation.UpsertLivenessData(lctx, rw, chainID, livenessData)
9189
if err != nil {
9290
return fmt.Errorf("could not insert liveness data: %w", err)

state/protocol/badger/state.go

Lines changed: 29 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ func SkipNetworkAddressValidation(conf *BootstrapConfig) {
8888
conf.SkipNetworkAddressValidation = true
8989
}
9090

91-
// Bootstrap initializes a the protocol state from the provided root snapshot and persists it to the database.
91+
// Bootstrap initializes the protocol state from the provided root snapshot and persists it to the database.
9292
// No errors expected during normal operation.
9393
func Bootstrap(
9494
metrics module.ComplianceMetrics,
@@ -143,11 +143,17 @@ func Bootstrap(
143143

144144
var state *State
145145

146-
// we acquire both [storage.LockInsertBlock] and [storage.LockFinalizeBlock] because
147-
// the bootstrapping process inserts and finalizes blocks (all blocks within the
148-
// trusted root snapshot are presumed to be finalized)
146+
// Overview of ACQUIRED LOCKS:
147+
// * In a nutshell, the instance parameters describe how far back the node's local history reaches in comparison
148+
// to the genesis / spork root block. These values are immutable throughout the lifetime of a node. Hence, the
149+
// lock [storage.LockInsertInstanceParams] should only ever be used here during bootstrapping.
150+
// * We acquire both [storage.LockInsertBlock] and [storage.LockFinalizeBlock] because the bootstrapping process
151+
// inserts and finalizes blocks (all blocks within the trusted root snapshot are presumed to be finalized).
152+
// * Liveness and safety data for Jolteon consensus need to be initialized, requiring [storage.LockInsertSafetyData]
153+
// and [storage.LockInsertLivenessData].
154+
// * The lock [storage.LockIndexExecutionResult] is required for bootstrapping execution. When bootstrapping other
155+
// node roles, the lock is acquired (for algorithmic uniformity) but not used.
149156
err = storage.WithLocks(lockManager, storage.LockGroupProtocolStateBootstrap, func(lctx lockctx.Context) error {
150-
151157
// bootstrap the sealing segment
152158
// creating sealed root block with the rootResult
153159
// creating finalized root block with lastFinalized
@@ -209,9 +215,12 @@ func Bootstrap(
209215
// bootstrapProtocolStates bootstraps data structures needed for Dynamic Protocol State.
210216
// The sealing segment may contain blocks committing to different Protocol State entries,
211217
// in which case each of these protocol state entries are stored in the database during
212-
// bootstrapping.
213-
// For each distinct protocol state entry, we also store the associated EpochSetup and
214-
// EpochCommit service events.
218+
// bootstrapping. For each distinct protocol state entry, we also store the associated
219+
// EpochSetup and EpochCommit service events.
220+
//
221+
// Caller must hold [storage.LockInsertBlock] lock.
222+
//
223+
// No error returns expected during normal operation.
215224
func bootstrapProtocolState(
216225
lctx lockctx.Proof,
217226
rw storage.ReaderBatchWriter,
@@ -280,7 +289,7 @@ func bootstrapProtocolState(
280289
// history is covered. The spork root block is persisted as a root proposal without proposer
281290
// signature (by convention).
282291
//
283-
// It requires [storage.LockIndexExecutionResult] lock
292+
// Required locks: [storage.LockIndexExecutionResult] and [storage.LockInsertBlock] and [storage.LockFinalizeBlock]
284293
func bootstrapSealingSegment(
285294
lctx lockctx.Proof,
286295
db storage.DB,
@@ -298,7 +307,7 @@ func bootstrapSealingSegment(
298307
if err != nil {
299308
return fmt.Errorf("could not insert execution result: %w", err)
300309
}
301-
err = operation.IndexTrustedExecutionResult(lctx, rw, result.BlockID, result.ID())
310+
err = operation.IndexTrustedExecutionResult(lctx, rw, result.BlockID, result.ID()) // requires [storage.LockIndexExecutionResult]
302311
if err != nil {
303312
return fmt.Errorf("could not index execution result: %w", err)
304313
}
@@ -504,13 +513,19 @@ func bootstrapSealingSegment(
504513
// bootstrapStatePointers instantiates central pointers used to by the protocol
505514
// state for keeping track of lifecycle variables:
506515
// - Consensus Safety and Liveness Data (only used by consensus participants)
507-
// - Root Block's Height (heighest block in sealing segment)
516+
// - Root Block's Height (highest block in sealing segment)
508517
// - Sealed Root Block Height (block height sealed as of the Root Block)
509518
// - Latest Finalized Height (initialized to height of Root Block)
510519
// - Latest Sealed Block Height (initialized to block height sealed as of the Root Block)
511520
// - Spork root block ID (spork root block in sealing segment)
512521
// - initial entry in map:
513522
// Finalized Block ID -> ID of latest seal in fork with this block as head
523+
//
524+
// Caller must hold locks:
525+
// [storage.LockInsertSafetyData] and [storage.LockInsertLivenessData] and
526+
// [storage.LockInsertBlock] and [storage.LockFinalizeBlock]
527+
//
528+
// No error returns expected during normal operation.
514529
func bootstrapStatePointers(lctx lockctx.Proof, rw storage.ReaderBatchWriter, root protocol.Snapshot) error {
515530
// sealing segment lists blocks in order of ascending height, so the tail
516531
// is the oldest ancestor and head is the newest child in the segment
@@ -716,6 +731,9 @@ func bootstrapEpochForProtocolStateEntry(
716731
// indexEpochHeights populates the epoch height index from the root snapshot.
717732
// We index the FirstHeight for every epoch where the transition occurs within the sealing segment of the root snapshot,
718733
// or for the first epoch of a spork if the snapshot is a spork root snapshot (1 block sealing segment).
734+
//
735+
// Caller must hold [storage.LockFinalizeBlock] lock.
736+
//
719737
// No errors are expected during normal operation.
720738
func indexEpochHeights(lctx lockctx.Proof, rw storage.ReaderBatchWriter, segment *flow.SealingSegment) error {
721739
// CASE 1: For spork root snapshots, there is exactly one block B and one epoch E.

storage/blocks.go

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,15 @@ import (
1616
// a certified block (including a QC for the block).
1717
type Blocks interface {
1818

19-
// BatchStore stores a valid block in a batch.
20-
// Error returns:
21-
// - storage.ErrAlreadyExists if the blockID already exists in the database.
22-
// - generic error in case of unexpected failure from the database layer or encoding failure.
19+
// BatchStore adds the provided block to the database write batch and populates all secondary storage indices
20+
// (maps from the block ID to some block-related information).
21+
//
22+
// CAUTION: Under the hood, `BatchStore` performs some prior database reads, which must happen atomically with
23+
// the subsequent database write in order to prevent accidental state corruption. Therefore, the caller must
24+
// acquire [storage.LockInsertBlock] and hold it until the database write has been committed.
25+
//
26+
// Expected error returns during normal operations:
27+
// - [storage.ErrAlreadyExists] if some block with the same ID has already been stored
2328
BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, proposal *flow.Proposal) error
2429

2530
// ByID returns the block with the given hash. It is available for all incorporated blocks (validated blocks
@@ -88,16 +93,15 @@ type Blocks interface {
8893
ByCollectionID(collID flow.Identifier) (*flow.Block, error)
8994

9095
// BatchIndexBlockContainingCollectionGuarantees produces mappings from the IDs of [flow.CollectionGuarantee]s to the block ID containing these guarantees.
91-
// The caller must acquire a storage.LockIndexBlockByPayloadGuarantees lock.
96+
// The caller must acquire [storage.LockIndexBlockByPayloadGuarantees] and hold it until the database write has been committed.
9297
//
9398
// CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation
9499
// assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY
95100
// *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate).
96101
// Hence, this function should be treated as a temporary solution, which requires generalization
97102
// (one-to-many mapping) for soft finality and the mature protocol.
98103
//
99-
// Error returns:
100-
// - storage.ErrAlreadyExists if any collection guarantee is already indexed
101-
// - generic error in case of unexpected failure from the database layer or encoding failure.
104+
// Expected error returns during normal operations:
105+
// - [storage.ErrAlreadyExists] if any collection guarantee is already indexed
102106
BatchIndexBlockContainingCollectionGuarantees(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, guaranteeIDs []flow.Identifier) error
103107
}

storage/chunk_data_packs_stored.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ func (c StoredChunkDataPack) Equals(other StoredChunkDataPack) (equal bool, diff
113113
return true, ""
114114
}
115115

116-
// ID returns the identifier of the chunk data pack, which is derived from its contents.
116+
// ID returns the identifier of the chunk data pack, which is derived from its contents via a collision-resistant hash.
117117
// Note, StoredChunkDataPack.ID() is the same as ChunkDataPack.ID()
118118
func (c StoredChunkDataPack) ID() flow.Identifier {
119119
return flow.NewChunkDataPackHeader(c.ChunkID, c.StartState, flow.MakeID(c.Proof), c.CollectionID, c.ExecutionDataRoot).ID()

0 commit comments

Comments
 (0)