Skip to content

Commit 9292db6

Browse files
committed
Merge branch 'main' into rkuris/simple-range-proof-verification
2 parents 64a21d9 + 0e0e229 commit 9292db6

File tree

14 files changed

+267
-250
lines changed

14 files changed

+267
-250
lines changed

.github/workflows/ci.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ jobs:
3535
os: macos-latest
3636
- profile-key: debug-ethhash-logger
3737
profile-args: "--features ethhash,logger"
38+
- profile-key: debug-all-features
39+
profile-args: "--all-features"
3840
- profile-key: maxperf-ethhash-logger
3941
profile-args: "--profile maxperf --features ethhash,logger"
4042
runs-on: ${{ matrix.os || 'ubuntu-latest' }}
@@ -89,6 +91,8 @@ jobs:
8991
os: macos-latest
9092
- profile-key: debug-ethhash-logger
9193
profile-args: "--features ethhash,logger"
94+
- profile-key: debug-all-features
95+
profile-args: "--all-features"
9296
runs-on: ${{ matrix.os || 'ubuntu-latest' }}
9397
steps:
9498
- uses: actions/checkout@v4
@@ -121,6 +125,8 @@ jobs:
121125
os: macos-latest
122126
- profile-key: debug-ethhash-logger
123127
profile-args: "--features ethhash,logger"
128+
- profile-key: debug-all-features
129+
profile-args: "--all-features"
124130
runs-on: ${{ matrix.os || 'ubuntu-latest' }}
125131
steps:
126132
- uses: actions/checkout@v4
@@ -150,6 +156,8 @@ jobs:
150156
os: macos-latest
151157
- profile-key: debug-ethhash-logger
152158
profile-args: "--features ethhash,logger"
159+
- profile-key: debug-all-features
160+
profile-args: "--all-features"
153161
- profile-key: maxperf-ethhash-logger
154162
profile-args: "--cargo-profile maxperf --features ethhash,logger"
155163
runs-on: ${{ matrix.os || 'ubuntu-latest' }}
@@ -178,6 +186,8 @@ jobs:
178186
os: macos-latest
179187
- profile-key: debug-ethhash-logger
180188
profile-args: "--features ethhash,logger"
189+
- profile-key: debug-all-features
190+
profile-args: "--all-features"
181191
- profile-key: maxperf-ethhash-logger
182192
profile-args: "--profile maxperf --features ethhash,logger"
183193
runs-on: ${{ matrix.os || 'ubuntu-latest' }}

clippy.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ doc-valid-idents = [
1111

1212
disallowed-methods = [
1313
{ path = "rand::rng", replacement = "firewood_storage::SeededRng::from_env_or_random", reason = "use a prng with a user-defined seed instead", allow-invalid = true },
14+
{ path = "std::os::unix::fs::FileExt::write_at", replacement = "write_all_at", reason = "use write_all_at instead of write_at to handle short writes ensuring the entire buffer is written", allow-invalid = true },
1415
]
1516

1617
disallowed-types = [

firewood/src/db.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,8 @@ impl Db {
224224
/// Check the database for consistency
225225
pub fn check(&self, opt: CheckOpt) -> CheckerReport {
226226
let latest_rev_nodestore = self.manager.current_revision();
227-
latest_rev_nodestore.check(opt)
227+
let header = self.manager.locked_header();
228+
latest_rev_nodestore.check(&header, opt)
228229
}
229230

230231
/// Create a proposal with a specified parent. A proposal is created in parallel if `use_parallel`
@@ -1284,7 +1285,7 @@ mod test {
12841285
assert_eq!(&version, b"firewood-v1\0\0\0\0\0");
12851286

12861287
// overwrite the magic string to simulate an older version
1287-
file.write_at(b"firewood 0.0.18\0", 0).unwrap();
1288+
file.write_all_at(b"firewood 0.0.18\0", 0).unwrap();
12881289
drop(file);
12891290

12901291
let testdb = testdb.reopen();

firewood/src/manager.rs

Lines changed: 44 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
reason = "Found 3 occurrences after enabling the lint."
1111
)]
1212

13-
use parking_lot::{Mutex, RwLock};
13+
use parking_lot::{Mutex, MutexGuard, RwLock};
1414
use std::collections::{HashMap, VecDeque};
1515
use std::io;
1616
use std::num::NonZero;
@@ -29,7 +29,7 @@ use firewood_metrics::{firewood_increment, firewood_set};
2929
pub use firewood_storage::CacheReadStrategy;
3030
use firewood_storage::{
3131
BranchNode, Committed, FileBacked, FileIoError, HashedNodeReader, ImmutableProposal,
32-
NodeHashAlgorithm, NodeStore, TrieHash,
32+
NodeHashAlgorithm, NodeStore, NodeStoreHeader, TrieHash,
3333
};
3434

3535
pub(crate) const DB_FILE_NAME: &str = "firewood.db";
@@ -89,6 +89,12 @@ pub(crate) struct RevisionManager {
8989
/// is preserved on disk for historical queries.
9090
max_revisions: usize,
9191

92+
/// Persisted metadata for the database.
93+
///
94+
/// Loaded from disk on startup and updated during commits when nodes
95+
/// are persisted or deleted nodes are added to the free lists.
96+
header: Mutex<NodeStoreHeader>,
97+
9298
/// FIFO queue of committed revisions kept in memory. The queue always
9399
/// contains at least one revision.
94100
in_memory_revisions: RwLock<VecDeque<CommittedRevision>>,
@@ -157,7 +163,15 @@ impl RevisionManager {
157163
fb.lock()?;
158164

159165
let storage = Arc::new(fb);
160-
let nodestore = Arc::new(NodeStore::open(storage.clone())?);
166+
let header = match NodeStoreHeader::read_from_storage(storage.as_ref()) {
167+
Ok(header) => header,
168+
Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => {
169+
// Empty file - create a new header for a fresh database
170+
NodeStoreHeader::new(config.node_hash_algorithm)
171+
}
172+
Err(err) => return Err(err.into()),
173+
};
174+
let nodestore = Arc::new(NodeStore::open(&header, storage.clone())?);
161175
let root_store = config
162176
.root_store
163177
.then(|| {
@@ -167,23 +181,26 @@ impl RevisionManager {
167181
})
168182
.transpose()?;
169183

184+
if config.truncate {
185+
header.flush_to(storage.as_ref())?;
186+
storage.set_len(NodeStoreHeader::SIZE)?;
187+
}
188+
189+
let mut by_hash = HashMap::new();
190+
if let Some(hash) = nodestore.root_hash().or_default_root_hash() {
191+
by_hash.insert(hash, nodestore.clone());
192+
}
193+
170194
let manager = Self {
171195
max_revisions: config.manager.max_revisions,
196+
header: Mutex::new(header),
172197
in_memory_revisions: RwLock::new(VecDeque::from([nodestore.clone()])),
173-
by_hash: RwLock::new(Default::default()),
198+
by_hash: RwLock::new(by_hash),
174199
proposals: Mutex::new(Default::default()),
175200
threadpool: OnceLock::new(),
176201
root_store,
177202
};
178203

179-
if let Some(hash) = nodestore.root_hash().or_default_root_hash() {
180-
manager.by_hash.write().insert(hash, nodestore.clone());
181-
}
182-
183-
if config.truncate {
184-
nodestore.flush_header_with_padding()?;
185-
}
186-
187204
// On startup, we always write the latest revision to RootStore
188205
if let Some(root_hash) = manager.current_revision().root_hash() {
189206
let root_address = manager.current_revision().root_address().ok_or(
@@ -231,7 +248,10 @@ impl RevisionManager {
231248
});
232249
}
233250

234-
let mut committed = proposal.as_committed(&current_revision);
251+
let committed = proposal.as_committed();
252+
253+
// Lock header for the duration of reaping and persistence
254+
let mut header = self.header.lock();
235255

236256
// 2. Revision reaping
237257
// When we exceed max_revisions, remove the oldest revision from memory.
@@ -257,7 +277,7 @@ impl RevisionManager {
257277
// This guarantee is there because we have a `&mut self` reference to the manager, so
258278
// the compiler guarantees we are the only one using this manager.
259279
match Arc::try_unwrap(oldest) {
260-
Ok(oldest) => oldest.reap_deleted(&mut committed)?,
280+
Ok(oldest) => oldest.reap_deleted(&mut header)?,
261281
Err(original) => {
262282
warn!("Oldest revision could not be reaped; still referenced");
263283
self.in_memory_revisions.write().push_front(original);
@@ -273,9 +293,7 @@ impl RevisionManager {
273293
}
274294

275295
// 3. Persist to disk.
276-
// TODO: We can probably do this in another thread, but it requires that
277-
// we move the header out of NodeStore, which is in a future PR.
278-
committed.persist()?;
296+
committed.persist(&mut header)?;
279297

280298
// 4. Persist revision to root store
281299
if let Some(store) = &self.root_store
@@ -298,6 +316,10 @@ impl RevisionManager {
298316
self.by_hash.write().insert(hash, committed.clone());
299317
}
300318

319+
// At this point, we can release the lock on the header as the header
320+
// and the last committed revision are up-to-date.
321+
drop(header);
322+
301323
// 6. Proposal Cleanup
302324
// Free proposal that is being committed as well as any proposals no longer
303325
// referenced by anyone else. Track how many were discarded (dropped without commit).
@@ -405,6 +427,11 @@ impl RevisionManager {
405427
.clone()
406428
}
407429

430+
/// Acquires a lock on the header and returns a guard.
431+
pub(crate) fn locked_header(&self) -> MutexGuard<'_, NodeStoreHeader> {
432+
self.header.lock()
433+
}
434+
408435
/// Gets or creates a threadpool associated with the revision manager.
409436
///
410437
/// # Panics

firewood/src/merkle/tests/ethhash.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ fn test_root_hash_random_deletions() {
149149
items_ordered.sort_unstable();
150150
items_ordered.shuffle(&mut &rng);
151151

152-
let mut committed_merkle = init_merkle(&items);
152+
let (mut committed_merkle, mut header) = init_merkle_with_header(&items);
153153

154154
for (k, v) in items_ordered {
155155
let mut merkle = committed_merkle.fork().unwrap();
@@ -166,7 +166,7 @@ fn test_root_hash_random_deletions() {
166166
assert_eq!(merkle.get_value(k).unwrap().as_deref(), Some(v.as_ref()));
167167
}
168168

169-
committed_merkle = into_committed(merkle.hash(), committed_merkle.nodestore());
169+
committed_merkle = into_committed(merkle.hash(), &mut header);
170170

171171
let h: TrieHash = KeccakHasher::trie_root(&items).to_fixed_bytes().into();
172172

firewood/src/merkle/tests/mod.rs

Lines changed: 20 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,8 @@ use std::fmt::Write;
1616

1717
use super::*;
1818
use firewood_storage::{
19-
Committed, MemStore, MutableProposal, NodeHashAlgorithm, NodeStore, RootReader, TrieHash,
19+
Committed, MemStore, MutableProposal, NodeHashAlgorithm, NodeStore, NodeStoreHeader,
20+
RootReader, TrieHash,
2021
};
2122

2223
// Returns n random key-value pairs.
@@ -37,14 +38,26 @@ fn generate_random_kvs(rng: &firewood_storage::SeededRng, n: usize) -> Vec<(Vec<
3738

3839
fn into_committed(
3940
merkle: Merkle<NodeStore<Arc<ImmutableProposal>, MemStore>>,
40-
parent: &NodeStore<Committed, MemStore>,
41+
header: &mut NodeStoreHeader,
4142
) -> Merkle<NodeStore<Committed, MemStore>> {
42-
let mut ns = merkle.into_inner().as_committed(parent);
43-
ns.persist().unwrap();
43+
let ns = merkle.into_inner().as_committed();
44+
ns.persist(header).unwrap();
4445
ns.into()
4546
}
4647

4748
pub(crate) fn init_merkle<I, K, V>(iter: I) -> Merkle<NodeStore<Committed, MemStore>>
49+
where
50+
I: Clone + IntoIterator<Item = (K, V)>,
51+
K: AsRef<[u8]>,
52+
V: AsRef<[u8]>,
53+
{
54+
let (merkle, _header) = init_merkle_with_header(iter);
55+
merkle
56+
}
57+
58+
pub(crate) fn init_merkle_with_header<I, K, V>(
59+
iter: I,
60+
) -> (Merkle<NodeStore<Committed, MemStore>>, NodeStoreHeader)
4861
where
4962
I: Clone + IntoIterator<Item = (K, V)>,
5063
K: AsRef<[u8]>,
@@ -54,6 +67,7 @@ where
5467
Vec::with_capacity(64 * 1024),
5568
NodeHashAlgorithm::compile_option(),
5669
));
70+
let mut header = NodeStoreHeader::new(NodeHashAlgorithm::compile_option());
5771
let base = Merkle::from(NodeStore::new_empty_committed(memstore.clone()));
5872
let mut merkle = base.fork().unwrap();
5973

@@ -65,7 +79,7 @@ where
6579
}
6680

6781
let merkle = merkle.hash();
68-
let merkle = into_committed(merkle, base.nodestore());
82+
let merkle = into_committed(merkle, &mut header);
6983

7084
// Single verification pass at the end to ensure correctness
7185
for (k, v) in iter {
@@ -79,7 +93,7 @@ where
7993
);
8094
}
8195

82-
merkle
96+
(merkle, header)
8397
}
8498

8599
// generate pseudorandom data, but prefix it with some known data

fwdctl/src/check.rs

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,9 @@ use std::sync::Arc;
88
use askama::Template;
99
use clap::Args;
1010
use firewood::v2::api;
11-
use firewood_storage::{CacheReadStrategy, CheckOpt, DBStats, FileBacked, NodeStore};
11+
use firewood_storage::{
12+
CacheReadStrategy, CheckOpt, DBStats, FileBacked, NodeStore, NodeStoreHeader,
13+
};
1214
use indicatif::{ProgressBar, ProgressFinish, ProgressStyle};
1315
use nonzero_ext::nonzero;
1416
use num_format::{Locale, ToFormattedString};
@@ -69,8 +71,9 @@ pub(super) fn run(opts: &Options) -> Result<(), api::Error> {
6971
progress_bar: Some(progress_bar),
7072
};
7173

72-
let nodestore = NodeStore::open(storage)?;
73-
let check_report = nodestore.check(check_ops);
74+
let mut header = NodeStoreHeader::read_from_storage(storage.as_ref())?;
75+
let nodestore = NodeStore::open(&header, storage)?;
76+
let check_report = nodestore.check(&header, check_ops);
7477

7578
println!("Errors ({}): ", check_report.errors.len());
7679
for error in &check_report.errors {
@@ -79,7 +82,7 @@ pub(super) fn run(opts: &Options) -> Result<(), api::Error> {
7982

8083
let mut db_stats = check_report.db_stats.clone();
8184
if opts.fix {
82-
match nodestore.fix(check_report) {
85+
match nodestore.fix(&mut header, check_report) {
8386
Ok(report) => {
8487
println!("Fixed Errors ({}):", report.fixed.len());
8588
for error in report.fixed {

0 commit comments

Comments
 (0)