Skip to content

Commit 67ebf3a

Browse files
feat(spv): flush header index on shutdown and after header sync (#197)
* feat(spv): flush headers on shutdown * move fn lower in the impl * refactor: `MultiPeerNetworkManager` -> `PeerNetworkManager` (#184) * refactor: `MultiPeerNetworkManager` -> `PeerNetworkManager` * Fix formatting and apply review * feat: Update ffi headers (#183) * feat(spv): broadcast transaction (#180) * fix: Fix `PeerNetworkManager` cast in `broadcast_transaction` (#185) PR #180 used `MultiPeerNetworkManager`, this was renamed in #183 which was merged before #180 but after its CI run. * fix: Use non-blocking `TcpStream` in `dash-spv::network::TcpConnection` (#188) * refactor: Improve SPV shutdown handling with `CancellationToken` (#187) * refactor: `TcpConnection` -> `Peer` and `ConnectionPool` -> `PeerPool` (#190) * fix: Locking issue after #190 (#191) #190 removed the read timeouts of the `Peer::receive_message` which currently leads to a lockup of the peer because the write lock is held while waiting for the message. Needs some more refactoring but this works for now. * fix: More follow-up to #190 (#193) The sleep timeout branch introduced in #191 returns an `Err(NetworkError::Timeout)` which leads to a misbehavior update below in the `msg_result` match and eventually in a peer ban. This shouldn't happen because the `sleep` timing out only means that there is no data available right now. Instead, it now returns `Ok(None)` which will just keep things going. * flush after mn list sync too * move flush to after header sync instead of mnlist * fix --------- Co-authored-by: Kevin Rombach <35775977+xdustinface@users.noreply.github.com>
1 parent 312734a commit 67ebf3a

File tree

2 files changed

+54
-2
lines changed

2 files changed

+54
-2
lines changed

dash-spv/src/storage/disk/state.rs

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -436,6 +436,13 @@ impl DiskStorageManager {
436436

437437
// Shutdown background worker
438438
if let Some(tx) = self.worker_tx.take() {
439+
// Save the header index before shutdown
440+
let index = self.header_hash_index.read().await.clone();
441+
let _ = tx
442+
.send(super::manager::WorkerCommand::SaveIndex {
443+
index,
444+
})
445+
.await;
439446
let _ = tx.send(super::manager::WorkerCommand::Shutdown).await;
440447
}
441448

@@ -701,6 +708,29 @@ mod tests {
701708
use dashcore::{block::Version, pow::CompactTarget};
702709
use tempfile::TempDir;
703710

711+
fn build_headers(count: usize) -> Vec<BlockHeader> {
712+
let mut headers = Vec::with_capacity(count);
713+
let mut prev_hash = BlockHash::from_byte_array([0u8; 32]);
714+
715+
for i in 0..count {
716+
let header = BlockHeader {
717+
version: Version::from_consensus(1),
718+
prev_blockhash: prev_hash,
719+
merkle_root: dashcore::hashes::sha256d::Hash::from_byte_array(
720+
[(i % 255) as u8; 32],
721+
)
722+
.into(),
723+
time: 1 + i as u32,
724+
bits: CompactTarget::from_consensus(0x1d00ffff),
725+
nonce: i as u32,
726+
};
727+
prev_hash = header.block_hash();
728+
headers.push(header);
729+
}
730+
731+
headers
732+
}
733+
704734
#[tokio::test]
705735
async fn test_sentinel_headers_not_returned() -> Result<(), Box<dyn std::error::Error>> {
706736
// Create a temporary directory for the test
@@ -859,4 +889,28 @@ mod tests {
859889

860890
Ok(())
861891
}
892+
893+
#[tokio::test]
894+
async fn test_shutdown_flushes_index() -> Result<(), Box<dyn std::error::Error>> {
895+
let temp_dir = TempDir::new()?;
896+
let base_path = temp_dir.path().to_path_buf();
897+
let headers = build_headers(11_000);
898+
let last_hash = headers.last().unwrap().block_hash();
899+
900+
{
901+
let mut storage = DiskStorageManager::new(base_path.clone()).await?;
902+
903+
storage.store_headers(&headers[..10_000]).await?;
904+
super::super::segments::save_dirty_segments(&storage).await?;
905+
906+
storage.store_headers(&headers[10_000..]).await?;
907+
storage.shutdown().await?;
908+
}
909+
910+
let storage = DiskStorageManager::new(base_path).await?;
911+
let height = storage.get_header_height_by_hash(&last_hash).await?;
912+
assert_eq!(height, Some(10_999));
913+
914+
Ok(())
915+
}
862916
}

dash-spv/src/sync/sequential/message_handlers.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -392,8 +392,6 @@ impl<
392392

393393
if should_transition {
394394
self.transition_to_next_phase(storage, network, "Headers sync complete").await?;
395-
396-
// Execute the next phase
397395
self.execute_current_phase(network, storage).await?;
398396
}
399397

0 commit comments

Comments
 (0)