Skip to content

Commit 1c6ade7

Browse files
authored
chore: created benches with criterion and moved one performance test to it (#277)
* created benches with criterion and moved one performance test to it * deterministic inputs for reproducible benchmarks
1 parent 53a3305 commit 1c6ade7

File tree

3 files changed

+102
-43
lines changed

3 files changed

+102
-43
lines changed

dash-spv/Cargo.toml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,17 @@ hickory-resolver = "0.25"
5959
log = "0.4"
6060

6161
[dev-dependencies]
62+
criterion = { version = "0.8.1", features = ["async_tokio"] }
6263
tempfile = "3.0"
6364
tokio-test = "0.4"
6465
env_logger = "0.10"
6566
hex = "0.4"
6667
test-case = "3.3"
6768

69+
[[bench]]
70+
name = "storage"
71+
harness = false
72+
6873
[[bin]]
6974
name = "dash-spv"
7075
path = "src/main.rs"

dash-spv/benches/storage.rs

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
use std::time::Duration;
2+
3+
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
4+
use dash_spv::{
5+
storage::{DiskStorageManager, StorageManager},
6+
Hash,
7+
};
8+
use dashcore::{block::Version, BlockHash, CompactTarget, Header};
9+
use rand::{rngs::StdRng, Rng, SeedableRng};
10+
use tempfile::TempDir;
11+
use tokio::runtime::Builder;
12+
13+
fn create_test_header(height: u32) -> Header {
14+
Header {
15+
version: Version::from_consensus(1),
16+
prev_blockhash: BlockHash::all_zeros(),
17+
merkle_root: dashcore_hashes::sha256d::Hash::all_zeros().into(),
18+
time: height,
19+
bits: CompactTarget::from_consensus(0x207fffff),
20+
nonce: height,
21+
}
22+
}
23+
24+
fn bench_disk_storage(c: &mut Criterion) {
25+
const CHUNK_SIZE: u32 = 13_000;
26+
const NUM_ELEMENTS: u32 = CHUNK_SIZE * 20;
27+
const SEED: u64 = 42;
28+
29+
let rt = Builder::new_multi_thread().worker_threads(4).enable_all().build().unwrap();
30+
31+
let headers = (0..NUM_ELEMENTS).map(create_test_header).collect::<Vec<Header>>();
32+
let mut rng = StdRng::seed_from_u64(SEED);
33+
34+
c.bench_function("storage/disk/store", |b| {
35+
b.to_async(&rt).iter_batched(
36+
|| async {
37+
DiskStorageManager::new(TempDir::new().unwrap().path().to_path_buf()).await.unwrap()
38+
},
39+
|a| async {
40+
let mut storage = a.await;
41+
42+
for chunk in headers.chunks(CHUNK_SIZE as usize) {
43+
storage.store_headers(chunk).await.unwrap();
44+
}
45+
},
46+
BatchSize::SmallInput,
47+
)
48+
});
49+
50+
let temp_dir = TempDir::new().unwrap();
51+
52+
let mut storage = rt.block_on(async {
53+
let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await.unwrap();
54+
55+
for chunk in headers.chunks(CHUNK_SIZE as usize) {
56+
storage.store_headers(chunk).await.unwrap();
57+
}
58+
59+
storage
60+
});
61+
62+
c.bench_function("storage/disk/get", |b| {
63+
b.to_async(&rt).iter_batched(
64+
|| rng.gen::<u32>() % NUM_ELEMENTS,
65+
async |height| {
66+
let _ = storage.get_header(height).await.unwrap();
67+
},
68+
BatchSize::SmallInput,
69+
)
70+
});
71+
72+
c.bench_function("storage/disk/reverse_index", |b| {
73+
b.to_async(&rt).iter_batched(
74+
|| {
75+
let height = rand::random::<u32>() % NUM_ELEMENTS;
76+
headers[height as usize].block_hash()
77+
},
78+
async |hash| {
79+
let _ = storage.get_header_height_by_hash(&hash).await.unwrap();
80+
},
81+
BatchSize::SmallInput,
82+
)
83+
});
84+
85+
rt.block_on(async {
86+
storage.shutdown().await;
87+
});
88+
}
89+
90+
criterion_group!(
91+
name = disk_storage;
92+
config = Criterion::default()
93+
.sample_size(10)
94+
.warm_up_time(Duration::from_secs(1));
95+
targets = bench_disk_storage);
96+
criterion_main!(disk_storage);

dash-spv/tests/segmented_storage_test.rs

Lines changed: 1 addition & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use dashcore::pow::CompactTarget;
77
use dashcore::BlockHash;
88
use dashcore_hashes::Hash;
99
use std::sync::Arc;
10-
use std::time::{Duration, Instant};
10+
use std::time::Duration;
1111
use tempfile::TempDir;
1212
use tokio::time::sleep;
1313

@@ -378,45 +378,3 @@ async fn test_filter_header_persistence() {
378378
assert_eq!(loaded[3], create_test_filter_header(50_001));
379379
}
380380
}
381-
382-
#[tokio::test]
383-
async fn test_performance_improvement() {
384-
let temp_dir = TempDir::new().unwrap();
385-
let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await.unwrap();
386-
387-
// Store a large number of headers
388-
let headers: Vec<BlockHeader> = (0..200_000).map(create_test_header).collect();
389-
390-
let start = Instant::now();
391-
for chunk in headers.chunks(10_000) {
392-
storage.store_headers(chunk).await.unwrap();
393-
}
394-
let store_time = start.elapsed();
395-
396-
println!("Stored 200,000 headers in {:?}", store_time);
397-
398-
// Test random access performance
399-
let start = Instant::now();
400-
for _ in 0..1000 {
401-
let height = rand::random::<u32>() % 200_000;
402-
let _ = storage.get_header(height).await.unwrap();
403-
}
404-
let access_time = start.elapsed();
405-
406-
println!("1000 random accesses in {:?}", access_time);
407-
assert!(access_time < Duration::from_secs(1), "Random access should be fast");
408-
409-
// Test reverse index performance
410-
let start = Instant::now();
411-
for _ in 0..1000 {
412-
let height = rand::random::<u32>() % 200_000;
413-
let hash = headers[height as usize].block_hash();
414-
let _ = storage.get_header_height_by_hash(&hash).await.unwrap();
415-
}
416-
let lookup_time = start.elapsed();
417-
418-
println!("1000 hash lookups in {:?}", lookup_time);
419-
assert!(lookup_time < Duration::from_secs(1), "Hash lookups should be fast");
420-
421-
storage.shutdown().await;
422-
}

0 commit comments

Comments
 (0)