Skip to content

Commit 87ea684

Browse files
authored
Merge branch 'master' into mpeter/fix-chain-rules-condition
2 parents 16627db + 55a15f7 commit 87ea684

File tree

8 files changed

+136
-4
lines changed

8 files changed

+136
-4
lines changed

cmd/execution_builder.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -767,7 +767,7 @@ func (exeNode *ExecutionNode) LoadExecutionState(
767767
storedChunkDataPacks := store.NewStoredChunkDataPacks(
768768
node.Metrics.Cache, chunkDB, exeNode.exeConf.chunkDataPackCacheSize)
769769
chunkDataPacks := store.NewChunkDataPacks(node.Metrics.Cache,
770-
chunkDB, storedChunkDataPacks, exeNode.collections, exeNode.exeConf.chunkDataPackCacheSize)
770+
node.ProtocolDB, storedChunkDataPacks, exeNode.collections, exeNode.exeConf.chunkDataPackCacheSize)
771771

772772
getLatestFinalized := func() (uint64, error) {
773773
final, err := node.State.Final().Head()

cmd/util/cmd/rollback-executed-height/cmd/rollback_executed_height.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ func runE(*cobra.Command, []string) error {
8888
}
8989
chunkDataPacksDB := pebbleimpl.ToDB(chunkDataPacksPebbleDB)
9090
storedChunkDataPacks := store.NewStoredChunkDataPacks(metrics, chunkDataPacksDB, 1000)
91-
chunkDataPacks := store.NewChunkDataPacks(metrics, chunkDataPacksDB, storedChunkDataPacks, collections, 1000)
91+
chunkDataPacks := store.NewChunkDataPacks(metrics, db, storedChunkDataPacks, collections, 1000)
9292
protocolDBBatch := db.NewBatch()
9393
defer protocolDBBatch.Close()
9494

78.8 KB
Loading
79.1 KB
Loading
313 KB
Loading

engine/verification/verifier/verifiers.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ func initStorages(
255255
}
256256
storedChunkDataPacks := store.NewStoredChunkDataPacks(metrics.NewNoopCollector(), pebbleimpl.ToDB(chunkDataPackDB), 1000)
257257
chunkDataPacks := store.NewChunkDataPacks(metrics.NewNoopCollector(),
258-
pebbleimpl.ToDB(chunkDataPackDB), storedChunkDataPacks, storages.Collections, 1000)
258+
db, storedChunkDataPacks, storages.Collections, 1000)
259259

260260
verifier := makeVerifier(log.Logger, chainID, storages.Headers, transactionFeesDisabled, scheduledCallbacksEnabled)
261261
closer := func() error {

storage/README.md

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
# Flow Storage
2+
3+
The storage package subtree (`./storage/**`) implements persistent data storage for Flow.
4+
5+
## Overview
6+
The storage layer is divided into layers:
7+
8+
![Storage Layer Diagram](../docs/images/Storage_Layer_Overview.png)
9+
10+
[Diagram source](https://drive.google.com/file/d/1nF5k4RT78vRB8n5C5Nwalc2PdX-k2uKP/view?usp=sharing)
11+
12+
### 1. Common Database Interface
13+
`storage.DB` defines an interface for direct interaction with a database backend.
14+
This layer operates on keys and values as `[]byte` and is unaware of what resource types are being stored.
15+
`pebbleimpl` and `badgerimpl` packages implement this interface for [Pebble](https://github.com/cockroachdb/pebble) and [Badger](https://github.com/hypermodeinc/badger) respectively.
16+
17+
Flow used Badger as the primary database backend until Mainnet26.
18+
Flow began using Pebble as the primary database backend with Mainnet27, starting Oct 2025.
19+
20+
### 2. Resource Operations
21+
The `operation` package implements basic low-level database operations.
22+
Most exported function in `operation` is typically one read or write operation for a specific resource type.
23+
Low-level storage operations which are always performed together can be combined into one exported "procedure" function.
24+
In this case, the low-level operations used in the procedure should be private.
25+
26+
### 3. Resource Stores
27+
The `store` package implements resource-level database operations.
28+
Typically one resource type (eg. a `Block` or a `Collection`) has one corresponding resource store.
29+
Caching, if applicable, is implemented at this layer.
30+
31+
## Best Practices
32+
33+
### Prefer content hash keys
34+
We consider two types of keys:
35+
1. Collision-resistant content hash of value (eg. `block.ID() -> block`)
36+
2. Index keys (eg. `finalizedHeight -> block.ID()`)
37+
38+
It is generally safe to upsert Type 1 keys without synchronization, because updates will not change existing values.
39+
For this reason, **prefer Type 1 keys wherever possible**.
40+
41+
All Type 2 keys must be explicitly synchronized to protect against concurrent updates.
42+
43+
### Use functors to front-load expensive operations
44+
If an operation function does not require any lock, it should immediately perform the storage operation and return an error.
45+
46+
If an operation function does require any lock, it should return a functor to allow deferring lock acquisition.
47+
Expensive independent operations such as encoding and hashing should be performed immediately, outside the functor.
48+
49+
#### Example 1: Operation without lock
50+
```go
51+
func UpsertCollection(w storage.Writer, col *flow.LightCollection) error {
52+
return UpsertByKey(w, MakePrefix(codeCollection, col.ID()), col)
53+
}
54+
```
55+
56+
#### Example 2: Operation with lock
57+
```go
58+
func UpsertCollection(col *flow.LightCollection) func(lctx lockctx.Proof, rw storage.ReaderBatchWriter) error {
59+
id := col.ID() // compute the ID (hash) immediately before acquiring the lock
60+
key := MakePrefix(codeCollection, id)
61+
deferredUpsert := Upserting(key, col) // the Upserting function is a helper to perform encoding before acquiring the lock
62+
63+
return func(lctx lockctx.Proof, rw storage.ReaderBatchWriter) error {
64+
// check lock context
65+
return deferredUpsert(rw)
66+
}
67+
}
68+
```
69+
70+
71+
## Isolation
72+
The common database interface (Layer 1) provides read-committed isolation and serializable atomic writes.
73+
74+
Write operations are grouped into write batches, which are committed atomically.
75+
76+
The `ReaderBatchWriter` is commonly used and provides both read and write methods.
77+
CAUTION: Unlike Badger transactions, reads here observe the **latest committed state**.
78+
- Reads DO NOT observe writes in the write batch
79+
- Reads DO observe writes committed concurrently by other threads
80+
- Subsequent reads of the same key DO NOT always observe the same value
81+
82+
### Badger Transaction (for reference only - no longer supported)
83+
Badger (**no longer supported**) transactions read their own writes and read a consistent prior snapshot for the duration of the transaction.
84+
![Badger transaction](./../docs/images/Badger_SSI_Transaction.png)
85+
86+
### Pebble Write Batch
87+
Pebble reads the latest committed state, which may change between subsequent reads.
88+
![Pebble write batch](./../docs/images/Pebble_Read_Committed_WriteBatch.png)
89+
90+
91+
## Synchronization with Lock Context Manager
92+
The storage package exposes a `LockManager`, which must be a process-wide singleton.
93+
All synchronized functions in the storage package should register their locks in [`storage/locks.go`](locks.go).
94+
- High-level functions should acquire locks using a `lockctx.Context`
95+
- Low-level functions should validate locks by accepting a `lockctx.Proof`
96+
97+
The `LockManager` enforces an ordering policy and guarantees deadlock-free operation.
98+
99+
For additional information, see [the package documentation](https://github.com/jordanschalm/lockctx).
100+
101+
#### Example: Contexts & Proofs
102+
In this example, the high-level `storage.Blocks` uses ` lockctx.Context` to acquire necessary locks.
103+
```go
104+
func (blocks *Blocks) Insert(block *Block) {
105+
lctx := blocks.LockManager.NewContext()
106+
defer lctx.Release()
107+
108+
lctx.AcquireLock(storage.LockInsertHeader)
109+
lctx.AcquireLock(storage.LockInsertPayload)
110+
111+
blocks.db.WithReaderBatchWriter(func(batch) {
112+
operation.InsertHeader(lctx, block.Header)
113+
operation.InsertPayload(lctx, block.Payload)
114+
}
115+
}
116+
```
117+
Then a `lockctx.Proof` is passed down to lower level functions, which validate the lock was acquired.
118+
```go
119+
func InsertHeader(lctx lockctx.Proof, header *Header) {
120+
if !lctx.HoldsLock(storage.LockInsertHeader) {
121+
// return error
122+
}
123+
// insert header
124+
}
125+
func InsertPayload(lctx lockctx.Proof, payload *Payload) {
126+
if !lctx.HoldsLock(storage.LockInsertPayload) {
127+
// return error
128+
}
129+
// insert payload
130+
}
131+
```
132+

storage/store/protocol_kv_store_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ import (
1414
"github.com/onflow/flow-go/utils/unittest"
1515
)
1616

17-
// TesKeyValueStoreStorage tests if the KV store is stored, retrieved and indexed correctly
17+
// TestKeyValueStoreStorage tests if the KV store is stored, retrieved and indexed correctly
1818
func TestKeyValueStoreStorage(t *testing.T) {
1919
dbtest.RunWithDB(t, func(t *testing.T, db storage.DB) {
2020
lockManager := storage.NewTestingLockManager()

0 commit comments

Comments
 (0)