|
1 | 1 | //! Header synchronization with fork detection and reorganization handling. |
2 | 2 |
|
3 | 3 | use dashcore::{ |
4 | | - block::{Header as BlockHeader, Version}, |
5 | | - network::constants::NetworkExt, |
6 | | - network::message::NetworkMessage, |
7 | | - network::message_blockdata::GetHeadersMessage, |
8 | | - BlockHash, TxMerkleNode, |
| 4 | + block::Header as BlockHeader, network::constants::NetworkExt, network::message::NetworkMessage, |
| 5 | + network::message_blockdata::GetHeadersMessage, BlockHash, |
9 | 6 | }; |
10 | 7 | use dashcore_hashes::Hash; |
11 | 8 |
|
@@ -959,83 +956,6 @@ impl<S: StorageManager + Send + Sync + 'static, N: NetworkManager + Send + Sync |
959 | 956 | None |
960 | 957 | } |
961 | 958 |
|
962 | | - /// Check if we're past all checkpoints and can relax validation |
963 | | - pub fn is_past_checkpoints(&self) -> bool { |
964 | | - // Use total_headers_synced which tracks absolute blockchain height |
965 | | - self.checkpoint_manager.is_past_last_checkpoint(self.total_headers_synced) |
966 | | - } |
967 | | - |
968 | | - /// Pre-populate headers from checkpoints for fast initial sync |
969 | | - /// Note: This requires having prev_blockhash data for checkpoints |
970 | | - pub async fn prepopulate_from_checkpoints(&mut self, storage: &S) -> SyncResult<u32> { |
971 | | - // Check if we already have headers |
972 | | - if let Some(tip_height) = storage |
973 | | - .get_tip_height() |
974 | | - .await |
975 | | - .map_err(|e| SyncError::Storage(format!("Failed to get tip height: {}", e)))? |
976 | | - { |
977 | | - if tip_height > 0 { |
978 | | - tracing::debug!("Headers already exist in storage (height {}), skipping checkpoint prepopulation", tip_height); |
979 | | - return Ok(0); |
980 | | - } |
981 | | - } |
982 | | - |
983 | | - tracing::info!("Pre-populating headers from checkpoints for fast sync"); |
984 | | - |
985 | | - // Now that we have prev_blockhash data, we can implement this! |
986 | | - let checkpoints = self.checkpoint_manager.checkpoint_heights(); |
987 | | - let mut headers_to_insert = Vec::new(); |
988 | | - |
989 | | - for &height in checkpoints { |
990 | | - if let Some(checkpoint) = self.checkpoint_manager.get_checkpoint(height) { |
991 | | - // Convert checkpoint to header |
992 | | - let header = BlockHeader { |
993 | | - version: Version::from_consensus(1), |
994 | | - prev_blockhash: checkpoint.prev_blockhash, |
995 | | - merkle_root: checkpoint |
996 | | - .merkle_root |
997 | | - .map(|hash| TxMerkleNode::from_byte_array(*hash.as_byte_array())) |
998 | | - .unwrap_or_else(|| TxMerkleNode::from_byte_array([0u8; 32])), |
999 | | - time: checkpoint.timestamp, |
1000 | | - bits: checkpoint.target.to_compact_lossy(), |
1001 | | - nonce: checkpoint.nonce, |
1002 | | - }; |
1003 | | - |
1004 | | - // Verify the header hash matches the checkpoint |
1005 | | - let calculated_hash = header.block_hash(); |
1006 | | - if calculated_hash != checkpoint.block_hash { |
1007 | | - tracing::error!( |
1008 | | - "Checkpoint hash mismatch at height {}: expected {:?}, got {:?}", |
1009 | | - height, |
1010 | | - checkpoint.block_hash, |
1011 | | - calculated_hash |
1012 | | - ); |
1013 | | - continue; |
1014 | | - } |
1015 | | - |
1016 | | - headers_to_insert.push((height, header)); |
1017 | | - } |
1018 | | - } |
1019 | | - |
1020 | | - if headers_to_insert.is_empty() { |
1021 | | - tracing::warn!("No valid headers to prepopulate from checkpoints"); |
1022 | | - return Ok(0); |
1023 | | - } |
1024 | | - |
1025 | | - tracing::info!("Prepopulating {} checkpoint headers", headers_to_insert.len()); |
1026 | | - |
1027 | | - // TODO: Implement batch storage operation |
1028 | | - // For now, we'll need to store them one by one |
1029 | | - let mut count = 0; |
1030 | | - for (height, _header) in headers_to_insert { |
1031 | | - // Note: This would need proper storage implementation |
1032 | | - tracing::debug!("Would store checkpoint header at height {}", height); |
1033 | | - count += 1; |
1034 | | - } |
1035 | | - |
1036 | | - Ok(count) |
1037 | | - } |
1038 | | - |
1039 | 959 | /// Check if header sync is currently in progress |
1040 | 960 | pub fn is_syncing(&self) -> bool { |
1041 | 961 | self.syncing_headers |
|
0 commit comments