From 2e85243b2c179feb61ee83d506dcd4581888c229 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 17:58:37 +0000 Subject: [PATCH 01/19] removed chain storage an related code --- dash-spv/src/chain/fork_detector.rs | 249 +--------- dash-spv/src/chain/fork_detector_test.rs | 397 ---------------- dash-spv/src/chain/mod.rs | 6 +- dash-spv/src/chain/reorg.rs | 552 ----------------------- dash-spv/src/chain/reorg_test.rs | 129 ------ dash-spv/src/storage/mod.rs | 32 -- dash-spv/src/storage/sync_storage.rs | 92 ---- 7 files changed, 3 insertions(+), 1454 deletions(-) delete mode 100644 dash-spv/src/chain/fork_detector_test.rs delete mode 100644 dash-spv/src/chain/reorg_test.rs delete mode 100644 dash-spv/src/storage/sync_storage.rs diff --git a/dash-spv/src/chain/fork_detector.rs b/dash-spv/src/chain/fork_detector.rs index 7d02633cf..6dfe92822 100644 --- a/dash-spv/src/chain/fork_detector.rs +++ b/dash-spv/src/chain/fork_detector.rs @@ -3,18 +3,14 @@ //! This module detects when incoming headers create a fork in the blockchain //! rather than extending the current chain tip. -use super::{ChainWork, Fork}; -use crate::storage::ChainStorage; -use crate::types::ChainState; -use dashcore::{BlockHash, Header as BlockHeader}; +use super::Fork; +use dashcore::BlockHash; use std::collections::HashMap; /// Detects and manages blockchain forks pub struct ForkDetector { /// Currently known forks indexed by their tip hash forks: HashMap, - /// Maximum number of forks to track - max_forks: usize, } impl ForkDetector { @@ -24,164 +20,9 @@ impl ForkDetector { } Ok(Self { forks: HashMap::new(), - max_forks, }) } - /// Check if a header creates or extends a fork - pub fn check_header( - &mut self, - header: &BlockHeader, - chain_state: &ChainState, - storage: &CS, - ) -> ForkDetectionResult { - let header_hash = header.block_hash(); - let prev_hash = header.prev_blockhash; - - // Check if this extends the main chain - if let Some(tip_header) = chain_state.get_tip_header() { - tracing::trace!( - "Checking main chain extension - prev_hash: {}, tip_hash: {}", - prev_hash, - tip_header.block_hash() - ); - if prev_hash == tip_header.block_hash() { - return ForkDetectionResult::ExtendsMainChain; - } - } else { - // Special case: chain state is empty (shouldn't happen with genesis initialized) - // But handle it just in case - if chain_state.headers.is_empty() { - // Check if this is connecting to genesis in storage - if let Ok(Some(height)) = storage.get_header_height(&prev_hash) { - if height == 0 { - // This is the first header after genesis - return ForkDetectionResult::ExtendsMainChain; - } - } - } - } - - // Special case: Check if header connects to genesis which might be at height 0 - // This handles the case where chain_state has genesis but we're syncing the first real block - if chain_state.tip_height() == 0 { - if let Some(genesis_header) = chain_state.header_at_height(0) { - tracing::debug!( - "Checking if header connects to genesis - prev_hash: {}, genesis_hash: {}", - prev_hash, - genesis_header.block_hash() - ); - if prev_hash == genesis_header.block_hash() { - tracing::info!( - "Header extends genesis block - treating as main chain extension" - ); - return ForkDetectionResult::ExtendsMainChain; - } - } - } - - // Check if this extends a known fork - // Need to find a fork whose tip matches our prev_hash - let matching_fork = self - .forks - .iter() - .find(|(_, fork)| fork.tip_hash == prev_hash) - .map(|(_, fork)| fork.clone()); - - if let Some(mut fork) = matching_fork { - // Remove the old entry (indexed by old tip) - self.forks.remove(&fork.tip_hash); - - // Update the fork - fork.headers.push(*header); - fork.tip_hash = header_hash; - fork.tip_height += 1; - fork.chain_work = fork.chain_work.add_header(header); - - // Re-insert with new tip hash - let result_fork = fork.clone(); - self.forks.insert(header_hash, fork); - - return ForkDetectionResult::ExtendsFork(result_fork); - } - - // Check if this connects to the main chain (creates new fork) - if let Ok(Some(height)) = storage.get_header_height(&prev_hash) { - // Check if this would create a fork from before our checkpoint - if chain_state.synced_from_checkpoint() && height < chain_state.sync_base_height { - tracing::warn!( - "Rejecting header that would create fork from height {} (before checkpoint base {}). \ - This likely indicates headers from genesis were received during checkpoint sync.", - height, chain_state.sync_base_height - ); - return ForkDetectionResult::Orphan; - } - - // Found connection point - this creates a new fork - let fork_height = height; - let fork = Fork { - fork_point: prev_hash, - fork_height, - tip_hash: header_hash, - tip_height: fork_height + 1, - headers: vec![*header], - chain_work: ChainWork::from_height_and_header(fork_height, header), - }; - - self.add_fork(fork.clone()); - return ForkDetectionResult::CreatesNewFork(fork); - } - - // Additional check: see if header connects to any header in chain_state - // This helps when storage might be out of sync with chain_state - for (height, state_header) in chain_state.headers.iter().enumerate() { - if prev_hash == state_header.block_hash() { - // Calculate the actual blockchain height for this index - let actual_height = chain_state.sync_base_height + (height as u32); - - // This connects to a header in chain state but not in storage - // Treat it as extending main chain if it's the tip - if height == chain_state.headers.len() - 1 { - return ForkDetectionResult::ExtendsMainChain; - } else { - // Creates a fork from an earlier point - let fork = Fork { - fork_point: prev_hash, - fork_height: actual_height, - tip_hash: header_hash, - tip_height: actual_height + 1, - headers: vec![*header], - chain_work: ChainWork::from_height_and_header(actual_height, header), - }; - - self.add_fork(fork.clone()); - return ForkDetectionResult::CreatesNewFork(fork); - } - } - } - - // This header doesn't connect to anything we know - ForkDetectionResult::Orphan - } - - /// Add a new fork to track - fn add_fork(&mut self, fork: Fork) { - self.forks.insert(fork.tip_hash, fork); - - // Limit the number of forks we track - if self.forks.len() > self.max_forks { - // Remove the fork with least work - if let Some(weakest) = self.find_weakest_fork() { - self.forks.remove(&weakest); - } - } - } - - /// Find the fork with the least cumulative work - fn find_weakest_fork(&self) -> Option { - self.forks.iter().min_by_key(|(_, fork)| &fork.chain_work).map(|(hash, _)| *hash) - } - /// Get all known forks pub fn get_forks(&self) -> Vec<&Fork> { self.forks.values().collect() @@ -229,92 +70,6 @@ pub enum ForkDetectionResult { #[cfg(test)] mod tests { use super::*; - use crate::storage::MemoryStorage; - use dashcore::blockdata::constants::genesis_block; - use dashcore::Network; - use dashcore_hashes::Hash; - - fn create_test_header(prev_hash: BlockHash, nonce: u32) -> BlockHeader { - let mut header = genesis_block(Network::Dash).header; - header.prev_blockhash = prev_hash; - header.nonce = nonce; - header - } - - #[test] - fn test_fork_detection() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Add genesis - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis header"); - chain_state.add_header(genesis); - - // Header that extends main chain - let header1 = create_test_header(genesis.block_hash(), 1); - let result = detector.check_header(&header1, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::ExtendsMainChain)); - - // Add header1 to chain - storage.store_header(&header1, 1).expect("Failed to store header1"); - chain_state.add_header(header1); - - // Header that creates a fork from genesis - let fork_header = create_test_header(genesis.block_hash(), 2); - let result = detector.check_header(&fork_header, &chain_state, &storage); - - match result { - ForkDetectionResult::CreatesNewFork(fork) => { - assert_eq!(fork.fork_point, genesis.block_hash()); - assert_eq!(fork.fork_height, 0); - assert_eq!(fork.tip_height, 1); - assert_eq!(fork.headers.len(), 1); - } - result => panic!("Expected CreatesNewFork, got {:?}", result), - } - - // Header that extends the fork - let fork_header2 = create_test_header(fork_header.block_hash(), 3); - let result = detector.check_header(&fork_header2, &chain_state, &storage); - - assert!(matches!(result, ForkDetectionResult::ExtendsFork(_))); - assert_eq!(detector.get_forks().len(), 1); - - // Orphan header - let orphan = create_test_header( - BlockHash::from_raw_hash(dashcore_hashes::hash_x11::Hash::all_zeros()), - 4, - ); - let result = detector.check_header(&orphan, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::Orphan)); - } - - #[test] - fn test_fork_limits() { - let mut detector = ForkDetector::new(2).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Add genesis - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis header"); - chain_state.add_header(genesis); - - // Add a header to extend the main chain past genesis - let header1 = create_test_header(genesis.block_hash(), 1); - storage.store_header(&header1, 1).expect("Failed to store header1"); - chain_state.add_header(header1); - - // Create 3 forks from genesis, should only keep 2 - for i in 0..3 { - let fork_header = create_test_header(genesis.block_hash(), i + 100); - detector.check_header(&fork_header, &chain_state, &storage); - } - - assert_eq!(detector.get_forks().len(), 2); - } #[test] fn test_fork_detector_zero_max_forks() { diff --git a/dash-spv/src/chain/fork_detector_test.rs b/dash-spv/src/chain/fork_detector_test.rs deleted file mode 100644 index f87a837a2..000000000 --- a/dash-spv/src/chain/fork_detector_test.rs +++ /dev/null @@ -1,397 +0,0 @@ -//! Comprehensive tests for fork detection functionality - -#[cfg(test)] -mod tests { - use super::super::*; - use crate::storage::{ChainStorage, MemoryStorage}; - use crate::types::ChainState; - use dashcore::blockdata::constants::genesis_block; - use dashcore::{BlockHash, Header as BlockHeader, Network}; - use dashcore_hashes::Hash; - use std::sync::{Arc, Mutex}; - use std::thread; - - fn create_test_header(prev_hash: BlockHash, nonce: u32) -> BlockHeader { - let mut header = genesis_block(Network::Dash).header; - header.prev_blockhash = prev_hash; - header.nonce = nonce; - header.time = 1390095618 + nonce * 600; // Increment time for each block - header - } - - fn create_test_header_with_time(prev_hash: BlockHash, nonce: u32, time: u32) -> BlockHeader { - let mut header = create_test_header(prev_hash, nonce); - header.time = time; - header - } - - #[test] - fn test_fork_detection_with_checkpoint_sync() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Simulate checkpoint sync from height 1000 - chain_state.sync_base_height = 1000; - - // Add a checkpoint header at height 1000 - let checkpoint_header = create_test_header(BlockHash::from([0u8; 32]), 1000); - storage.store_header(&checkpoint_header, 1000).expect("Failed to store checkpoint"); - chain_state.add_header(checkpoint_header); - - // Add more headers building on checkpoint - let mut prev_hash = checkpoint_header.block_hash(); - for i in 1..5 { - let header = create_test_header(prev_hash, 1000 + i); - storage.store_header(&header, 1000 + i).expect("Failed to store header"); - chain_state.add_header(header); - prev_hash = header.block_hash(); - } - - // Try to create a fork from before the checkpoint (should be rejected) - let pre_checkpoint_hash = - BlockHash::from_raw_hash(dashcore_hashes::hash_x11::Hash::hash(&[99u8])); - storage.store_header(&checkpoint_header, 500).expect("Failed to store at height 500"); - - let fork_header = create_test_header(pre_checkpoint_hash, 999); - let result = detector.check_header(&fork_header, &chain_state, &storage); - - // Should be orphan since it tries to fork before checkpoint - assert!(matches!(result, ForkDetectionResult::Orphan)); - } - - #[test] - fn test_multiple_concurrent_forks() { - let mut detector = ForkDetector::new(5).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Setup genesis and main chain - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Build main chain - let mut main_chain_tip = genesis.block_hash(); - for i in 1..10 { - let header = create_test_header(main_chain_tip, i); - storage.store_header(&header, i).expect("Failed to store header"); - chain_state.add_header(header); - main_chain_tip = header.block_hash(); - } - - // Create multiple forks at different heights - let fork_points = vec![2, 4, 6, 8]; - let mut fork_tips = Vec::new(); - - for &height in &fork_points { - // Get the header at this height from storage - let fork_point_header = chain_state.header_at_height(height).unwrap(); - let fork_header = create_test_header(fork_point_header.block_hash(), 100 + height); - - let result = detector.check_header(&fork_header, &chain_state, &storage); - - match result { - ForkDetectionResult::CreatesNewFork(fork) => { - assert_eq!(fork.fork_height, height); - fork_tips.push(fork_header.block_hash()); - } - _ => panic!("Expected new fork creation at height {}", height), - } - } - - // Verify we have all forks tracked - assert_eq!(detector.get_forks().len(), 4); - - // Extend each fork - for (i, tip) in fork_tips.iter().enumerate() { - let extension = create_test_header(*tip, 200 + i as u32); - let result = detector.check_header(&extension, &chain_state, &storage); - - assert!(matches!(result, ForkDetectionResult::ExtendsFork(_))); - } - } - - #[test] - fn test_fork_limit_enforcement() { - let mut detector = ForkDetector::new(3).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Setup genesis and build a main chain - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Build main chain past genesis - let header1 = create_test_header(genesis.block_hash(), 1); - storage.store_header(&header1, 1).expect("Failed to store header"); - chain_state.add_header(header1); - - // Create more forks than the limit from genesis (not tip) - let mut created_forks = Vec::new(); - for i in 0..5 { - let fork_header = create_test_header(genesis.block_hash(), 100 + i); - detector.check_header(&fork_header, &chain_state, &storage); - created_forks.push(fork_header); - } - - // Should only track the maximum allowed - assert_eq!(detector.get_forks().len(), 3); - - // Verify we have 3 different forks - let remaining_forks = detector.get_forks(); - let mut fork_nonces: Vec = - remaining_forks.iter().map(|f| f.headers[0].nonce).collect(); - fork_nonces.sort(); - - // Since all forks have equal work, eviction order is not guaranteed - // Just verify we have 3 unique forks - assert_eq!(fork_nonces.len(), 3); - assert!(fork_nonces.iter().all(|&n| (100..=104).contains(&n))); - } - - #[test] - fn test_fork_chain_work_comparison() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Setup genesis and build a main chain - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Build main chain past genesis - let header1 = create_test_header(genesis.block_hash(), 1); - storage.store_header(&header1, 1).expect("Failed to store header"); - chain_state.add_header(header1); - - // Create two forks from genesis (not tip) - let fork1_header = create_test_header(genesis.block_hash(), 100); - let fork2_header = create_test_header(genesis.block_hash(), 200); - - detector.check_header(&fork1_header, &chain_state, &storage); - detector.check_header(&fork2_header, &chain_state, &storage); - - // Extend fork1 with more headers - let mut fork1_tip = fork1_header.block_hash(); - for i in 0..5 { - let header = create_test_header(fork1_tip, 300 + i); - detector.check_header(&header, &chain_state, &storage); - fork1_tip = header.block_hash(); - } - - // Extend fork2 with fewer headers - let mut fork2_tip = fork2_header.block_hash(); - for i in 0..2 { - let header = create_test_header(fork2_tip, 400 + i); - detector.check_header(&header, &chain_state, &storage); - fork2_tip = header.block_hash(); - } - - // Get the strongest fork - let strongest = detector.get_strongest_fork().expect("Should have forks"); - assert_eq!(strongest.tip_hash, fork1_tip); - assert_eq!(strongest.headers.len(), 6); // Initial + 5 extensions - } - - #[test] - fn test_fork_detection_thread_safety() { - let detector = - Arc::new(Mutex::new(ForkDetector::new(50).expect("Failed to create fork detector"))); - let storage = Arc::new(MemoryStorage::new()); - let chain_state = Arc::new(Mutex::new(ChainState::new())); - - // Setup genesis - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.lock().unwrap().add_header(genesis); - - // Build a base chain - let mut prev_hash = genesis.block_hash(); - for i in 1..20 { - let header = create_test_header(prev_hash, i); - storage.store_header(&header, i).expect("Failed to store header"); - chain_state.lock().unwrap().add_header(header); - prev_hash = header.block_hash(); - } - - // Spawn multiple threads creating forks - let mut handles = vec![]; - - for thread_id in 0..5 { - let detector_clone = Arc::clone(&detector); - let storage_clone = Arc::clone(&storage); - let chain_state_clone = Arc::clone(&chain_state); - - let handle = thread::spawn(move || { - // Each thread creates forks at different heights - for i in 0..10 { - let fork_height = thread_id * 3 + i % 3; - let chain_state_lock = chain_state_clone.lock().unwrap(); - - if let Some(fork_point_header) = chain_state_lock.header_at_height(fork_height) - { - let fork_header = create_test_header( - fork_point_header.block_hash(), - 1000 + thread_id * 100 + i, - ); - - let mut detector_lock = detector_clone.lock().unwrap(); - detector_lock.check_header( - &fork_header, - &chain_state_lock, - storage_clone.as_ref(), - ); - } - } - }); - - handles.push(handle); - } - - // Wait for all threads to complete - for handle in handles { - handle.join().expect("Thread panicked"); - } - - // Verify the detector is in a consistent state - let detector_lock = detector.lock().unwrap(); - let forks = detector_lock.get_forks(); - - // Should have multiple forks but within the limit - assert!(!forks.is_empty()); - assert!(forks.len() <= 50); - - // All forks should have valid structure - for fork in forks { - assert!(!fork.headers.is_empty()); - assert_eq!(fork.tip_hash, fork.headers.last().unwrap().block_hash()); - assert_eq!(fork.tip_height, fork.fork_height + fork.headers.len() as u32); - } - } - - #[test] - fn test_orphan_detection_edge_cases() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Test 1: Empty chain state (no genesis) - let orphan = create_test_header(BlockHash::from([0u8; 32]), 1); - let result = detector.check_header(&orphan, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::Orphan)); - - // Add genesis - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Test 2: Header connecting to non-existent block - let phantom_hash = BlockHash::from_raw_hash(dashcore_hashes::hash_x11::Hash::hash(&[42u8])); - let orphan2 = create_test_header(phantom_hash, 2); - let result = detector.check_header(&orphan2, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::Orphan)); - - // Test 3: Header with far future timestamp - let future_header = create_test_header_with_time(genesis.block_hash(), 3, u32::MAX); - let result = detector.check_header(&future_header, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::ExtendsMainChain)); - } - - #[test] - fn test_fork_removal_and_cleanup() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Setup genesis and build a main chain - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Build main chain past genesis - let header1 = create_test_header(genesis.block_hash(), 1); - storage.store_header(&header1, 1).expect("Failed to store header"); - chain_state.add_header(header1); - - // Create multiple forks from genesis (not tip) - let mut fork_tips = Vec::new(); - for i in 0..5 { - let fork_header = create_test_header(genesis.block_hash(), 100 + i); - detector.check_header(&fork_header, &chain_state, &storage); - fork_tips.push(fork_header.block_hash()); - } - - assert_eq!(detector.get_forks().len(), 5); - - // Remove specific forks - for tip in fork_tips.iter().take(3) { - let removed = detector.remove_fork(tip); - assert!(removed.is_some()); - } - - assert_eq!(detector.get_forks().len(), 2); - - // Verify removed forks can't be found - for tip in fork_tips.iter().take(3) { - assert!(detector.get_fork(tip).is_none()); - } - - // Clear all remaining forks - detector.clear_forks(); - assert_eq!(detector.get_forks().len(), 0); - assert!(!detector.has_forks()); - } - - #[test] - fn test_genesis_connection_special_case() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Add genesis to storage and chain state - let genesis = genesis_block(Network::Dash).header; - storage.store_header(&genesis, 0).expect("Failed to store genesis"); - chain_state.add_header(genesis); - - // Chain state tip is at genesis (height 0) - assert_eq!(chain_state.tip_height(), 0); - - // Header connecting to genesis should extend main chain - let header1 = create_test_header(genesis.block_hash(), 1); - let result = detector.check_header(&header1, &chain_state, &storage); - assert!(matches!(result, ForkDetectionResult::ExtendsMainChain)); - } - - #[test] - fn test_chain_state_storage_mismatch() { - let mut detector = ForkDetector::new(10).expect("Failed to create fork detector"); - let storage = MemoryStorage::new(); - let mut chain_state = ChainState::new(); - - // Add headers to chain state but not storage (simulating sync issue) - let genesis = genesis_block(Network::Dash).header; - chain_state.add_header(genesis); - - let header1 = create_test_header(genesis.block_hash(), 1); - chain_state.add_header(header1); - - let header2 = create_test_header(header1.block_hash(), 2); - chain_state.add_header(header2); - - // Try to extend from header1 (in chain state but not storage) - let header3 = create_test_header(header1.block_hash(), 3); - let result = detector.check_header(&header3, &chain_state, &storage); - - // Should create a fork since it connects to non-tip header in chain state - match result { - ForkDetectionResult::CreatesNewFork(fork) => { - assert_eq!(fork.fork_point, header1.block_hash()); - assert_eq!(fork.fork_height, 1); - } - _ => panic!("Expected fork creation"), - } - } -} diff --git a/dash-spv/src/chain/mod.rs b/dash-spv/src/chain/mod.rs index 5fcabf106..61be1f963 100644 --- a/dash-spv/src/chain/mod.rs +++ b/dash-spv/src/chain/mod.rs @@ -18,11 +18,7 @@ pub mod reorg; #[cfg(test)] mod checkpoint_test; #[cfg(test)] -mod fork_detector_test; -#[cfg(test)] mod orphan_pool_test; -#[cfg(test)] -mod reorg_test; pub use chain_tip::{ChainTip, ChainTipManager}; pub use chain_work::ChainWork; @@ -30,7 +26,7 @@ pub use chainlock_manager::{ChainLockEntry, ChainLockManager, ChainLockStats}; pub use checkpoints::{Checkpoint, CheckpointManager}; pub use fork_detector::{ForkDetectionResult, ForkDetector}; pub use orphan_pool::{OrphanBlock, OrphanPool, OrphanPoolStats}; -pub use reorg::{ReorgEvent, ReorgManager}; +pub use reorg::ReorgEvent; use dashcore::{BlockHash, Header as BlockHeader}; diff --git a/dash-spv/src/chain/reorg.rs b/dash-spv/src/chain/reorg.rs index 4ba7005f2..026f7ccd0 100644 --- a/dash-spv/src/chain/reorg.rs +++ b/dash-spv/src/chain/reorg.rs @@ -3,13 +3,7 @@ //! This module implements the core logic for handling blockchain reorganizations, //! including finding common ancestors, rolling back transactions, and switching chains. -use super::chainlock_manager::ChainLockManager; -use super::{ChainTip, Fork}; -use crate::storage::ChainStorage; -use crate::types::ChainState; use dashcore::{BlockHash, Header as BlockHeader, Transaction, Txid}; -use std::sync::Arc; -use tracing; /// Event emitted when a reorganization occurs #[derive(Debug, Clone)] @@ -44,549 +38,3 @@ pub(crate) struct ReorgData { /// Actual transactions that were affected (if available) affected_transactions: Vec, } - -/// Manages chain reorganizations -pub struct ReorgManager { - /// Maximum depth of reorganization to handle - max_reorg_depth: u32, - /// Whether to allow reorgs past chain-locked blocks - respect_chain_locks: bool, - /// Chain lock manager for checking locked blocks - chain_lock_manager: Option>, -} - -impl ReorgManager { - /// Create a new reorganization manager - pub fn new(max_reorg_depth: u32, respect_chain_locks: bool) -> Self { - Self { - max_reorg_depth, - respect_chain_locks, - chain_lock_manager: None, - } - } - - /// Create a new reorganization manager with chain lock support - pub fn new_with_chain_locks( - max_reorg_depth: u32, - chain_lock_manager: Arc, - ) -> Self { - Self { - max_reorg_depth, - respect_chain_locks: true, - chain_lock_manager: Some(chain_lock_manager), - } - } - - /// Check if a fork has more work than the current chain and should trigger a reorg - pub fn should_reorganize( - &self, - current_tip: &ChainTip, - fork: &Fork, - storage: &CS, - ) -> Result { - self.should_reorganize_with_chain_state(current_tip, fork, storage, None) - } - - /// Check if a fork has more work than the current chain and should trigger a reorg - /// This version is checkpoint-aware when chain_state is provided - pub fn should_reorganize_with_chain_state( - &self, - current_tip: &ChainTip, - fork: &Fork, - storage: &CS, - chain_state: Option<&ChainState>, - ) -> Result { - // Check if fork has more work - if fork.chain_work <= current_tip.chain_work { - return Ok(false); - } - - // Check reorg depth - account for checkpoint sync - let reorg_depth = if let Some(state) = chain_state { - if state.synced_from_checkpoint() { - // During checkpoint sync, both current_tip.height and fork.fork_height - // should be interpreted relative to sync_base_height - - // For checkpoint sync: - // - current_tip.height is absolute blockchain height - // - fork.fork_height might be from genesis-based headers - // We need to compare relative depths only - - // If the fork is from headers that started at genesis, - // we shouldn't compare against the full checkpoint height - if fork.fork_height < state.sync_base_height { - // This fork is from before our checkpoint - likely from genesis-based headers - // This scenario should be rejected at header validation level, not here - tracing::warn!( - "Fork detected from height {} which is before checkpoint base height {}. \ - This suggests headers from genesis were received during checkpoint sync.", - fork.fork_height, - state.sync_base_height - ); - - // For now, reject forks that would reorg past the checkpoint - return Err(format!( - "Cannot reorg past checkpoint: fork height {} < checkpoint base {}", - fork.fork_height, state.sync_base_height - )); - } else { - // Normal case: both heights are relative to checkpoint - current_tip.height.saturating_sub(fork.fork_height) - } - } else { - // Normal sync mode - current_tip.height.saturating_sub(fork.fork_height) - } - } else { - // Fallback to original logic when no chain state provided - current_tip.height.saturating_sub(fork.fork_height) - }; - - if reorg_depth > self.max_reorg_depth { - return Err(format!( - "Reorg depth {} exceeds maximum {}", - reorg_depth, self.max_reorg_depth - )); - } - - // Check for chain locks if enabled - if self.respect_chain_locks { - if let Some(ref chain_lock_mgr) = self.chain_lock_manager { - // Check if reorg would violate chain locks - if chain_lock_mgr.would_violate_chain_lock(fork.fork_height, current_tip.height) { - return Err(format!( - "Cannot reorg: would violate chain lock between heights {} and {}", - fork.fork_height, current_tip.height - )); - } - } else { - // Fall back to checking individual blocks - for height in (fork.fork_height + 1)..=current_tip.height { - if let Ok(Some(header)) = storage.get_header_by_height(height) { - if self.is_chain_locked(&header, storage)? { - return Err(format!( - "Cannot reorg past chain-locked block at height {}", - height - )); - } - } - } - } - } - - Ok(true) - } - - /// Check if a block is chain-locked - pub fn is_chain_locked( - &self, - header: &BlockHeader, - storage: &CS, - ) -> Result { - if let Some(ref chain_lock_mgr) = self.chain_lock_manager { - // Get the height of this header - if let Ok(Some(height)) = storage.get_header_height(&header.block_hash()) { - return Ok(chain_lock_mgr.is_block_chain_locked(&header.block_hash(), height)); - } - } - // If no chain lock manager or height not found, assume not locked - Ok(false) - } -} - -// WalletState removed - reorganization should be handled by external wallet -/* -impl ReorgManager { - /// Perform a chain reorganization using a phased approach - pub async fn reorganize( - &self, - chain_state: &mut ChainState, - wallet_state: &mut WalletState, - fork: &Fork, - storage_manager: &mut S, - ) -> Result { - // Phase 1: Collect all necessary data (read-only) - let reorg_data = self.collect_reorg_data(chain_state, fork, storage_manager).await?; - - // Phase 2: Apply the reorganization (write-only) - self.apply_reorg_with_data(chain_state, wallet_state, fork, reorg_data, storage_manager) - .await - } - - /// Collect all data needed for reorganization (read-only phase) - #[cfg(test)] - pub async fn collect_reorg_data( - &self, - chain_state: &ChainState, - fork: &Fork, - storage_manager: &S, - ) -> Result { - self.collect_reorg_data_internal(chain_state, fork, storage_manager).await - } - - #[cfg(not(test))] - async fn collect_reorg_data( - &self, - chain_state: &ChainState, - fork: &Fork, - storage_manager: &S, - ) -> Result { - self.collect_reorg_data_internal(chain_state, fork, storage_manager).await - } - - async fn collect_reorg_data_internal( - &self, - chain_state: &ChainState, - fork: &Fork, - storage: &S, - ) -> Result { - // Find the common ancestor - let (common_ancestor, common_height) = - self.find_common_ancestor_with_fork(fork, storage).await?; - - // Collect headers to disconnect - let current_height = chain_state.get_height(); - let mut disconnected_headers = Vec::new(); - let mut disconnected_blocks = Vec::new(); - - // Walk back from current tip to common ancestor - for height in ((common_height + 1)..=current_height).rev() { - if let Ok(Some(header)) = storage.get_header(height).await { - let block_hash = header.block_hash(); - disconnected_blocks.push((block_hash, height)); - disconnected_headers.push(header); - } else { - return Err(format!("Missing header at height {}", height)); - } - } - - // Collect affected transaction IDs - let affected_tx_ids = Vec::new(); // Will be populated when we have transaction storage - let affected_transactions = Vec::new(); // Will be populated when we have transaction storage - - Ok(ReorgData { - common_ancestor, - common_height, - disconnected_headers, - disconnected_blocks, - affected_tx_ids, - affected_transactions, - }) - } - - /// Apply reorganization using collected data (write-only phase) - async fn apply_reorg_with_data( - &self, - chain_state: &mut ChainState, - wallet_state: &mut WalletState, - fork: &Fork, - reorg_data: ReorgData, - storage_manager: &mut S, - ) -> Result { - // Create a checkpoint of the current chain state before making any changes - let chain_state_checkpoint = chain_state.clone(); - - // Track headers that were successfully stored for potential rollback - let mut stored_headers: Vec = Vec::new(); - - // Perform all operations in a single atomic-like block - let result = async { - // Step 1: Rollback wallet state if UTXO rollback is available - if wallet_state.rollback_manager().is_some() { - wallet_state - .rollback_to_height(reorg_data.common_height, storage_manager) - .await - .map_err(|e| format!("Failed to rollback wallet state: {:?}", e))?; - } - - // Step 2: Disconnect blocks from the old chain - for header in &reorg_data.disconnected_headers { - // Mark transactions as unconfirmed if rollback manager not available - if wallet_state.rollback_manager().is_none() { - for txid in &reorg_data.affected_tx_ids { - wallet_state.mark_transaction_unconfirmed(txid); - } - } - - // Remove header from chain state - chain_state.remove_tip(); - } - - // Step 3: Connect blocks from the new chain and store them - let mut current_height = reorg_data.common_height; - for header in &fork.headers { - current_height += 1; - - // Add header to chain state - chain_state.add_header(*header); - - // Store the header - if this fails, we need to rollback everything - storage_manager.store_headers(&[*header]).await.map_err(|e| { - format!("Failed to store header at height {}: {:?}", current_height, e) - })?; - - // Only record successfully stored headers - stored_headers.push(*header); - } - - Ok::(ReorgEvent { - common_ancestor: reorg_data.common_ancestor, - common_height: reorg_data.common_height, - disconnected_headers: reorg_data.disconnected_headers, - connected_headers: fork.headers.clone(), - affected_transactions: reorg_data.affected_transactions, - }) - } - .await; - - // If any operation failed, attempt to restore the chain state - match result { - Ok(event) => Ok(event), - Err(e) => { - // Restore the chain state to its original state - *chain_state = chain_state_checkpoint; - - // Log the rollback attempt - tracing::error!( - "Reorg failed, restored chain state. Error: {}. \ - Successfully stored {} headers before failure.", - e, - stored_headers.len() - ); - - // Note: We cannot easily rollback the wallet state or storage operations - // that have already been committed. This is a limitation of not having - // true database transactions. The error message will indicate this partial - // state to the caller. - Err(format!( - "Reorg failed after partial application. Chain state restored, \ - but wallet/storage may be in inconsistent state. Error: {}. \ - Consider resyncing from a checkpoint.", - e - )) - } - } - } - - /// Find the common ancestor between current chain and a fork - async fn find_common_ancestor_with_fork( - &self, - fork: &Fork, - storage: &dyn StorageManager, - ) -> Result<(BlockHash, u32), String> { - // First check if the fork point itself is in our chain - if let Ok(Some(height)) = storage.get_header_height_by_hash(&fork.fork_point).await { - // The fork point is already in our chain, so it's the common ancestor - return Ok((fork.fork_point, height)); - } - - // If we have fork headers, check their parent blocks - if !fork.headers.is_empty() { - // Start from the first header in the fork and walk backwards - let first_fork_header = &fork.headers[0]; - let mut current_hash = first_fork_header.prev_blockhash; - - // Check if the parent of the first fork header is in our chain - if let Ok(Some(height)) = storage.get_header_height_by_hash(¤t_hash).await { - return Ok((current_hash, height)); - } - } - - // As a fallback, the fork should specify where it diverged from - // In a properly constructed Fork, fork_height should indicate where the split occurred - if fork.fork_height > 0 { - // Get the header at fork_height - 1 which should be the common ancestor - if let Ok(Some(header)) = storage.get_header(fork.fork_height.saturating_sub(1)).await { - let hash = header.block_hash(); - return Ok((hash, fork.fork_height.saturating_sub(1))); - } - } - - Err("Cannot find common ancestor between fork and main chain".to_string()) - } - - /// Find the common ancestor between current chain and a fork point (sync version for ChainStorage) - fn find_common_ancestor( - &self, - _chain_state: &ChainState, - fork_point: &BlockHash, - storage: &dyn ChainStorage, - ) -> Result<(BlockHash, u32), String> { - // Start from the fork point and walk back until we find a block in our chain - let mut current_hash = *fork_point; - let mut iterations = 0; - const MAX_ITERATIONS: u32 = 1_000_000; // Reasonable limit for chain traversal - - loop { - if let Ok(Some(height)) = storage.get_header_height(¤t_hash) { - // Found it in our chain - return Ok((current_hash, height)); - } - - // Get the previous block - if let Ok(Some(header)) = storage.get_header(¤t_hash) { - current_hash = header.prev_blockhash; - - // Safety check: don't go back too far - if current_hash == BlockHash::from([0u8; 32]) { - return Err("Reached genesis without finding common ancestor".to_string()); - } - - // Prevent infinite loops in case of corrupted chain - iterations += 1; - if iterations > MAX_ITERATIONS { - return Err(format!("Exceeded maximum iterations ({}) while searching for common ancestor - possible corrupted chain", MAX_ITERATIONS)); - } - } else { - return Err("Failed to find common ancestor".to_string()); - } - } - } - - /// Collect headers that need to be disconnected - fn collect_headers_to_disconnect( - &self, - chain_state: &ChainState, - common_height: u32, - storage: &dyn ChainStorage, - ) -> Result, String> { - let current_height = chain_state.get_height(); - let mut headers = Vec::new(); - - // Walk back from current tip to common ancestor - for height in ((common_height + 1)..=current_height).rev() { - if let Ok(Some(header)) = storage.get_header_by_height(height) { - headers.push(header); - } else { - return Err(format!("Missing header at height {}", height)); - } - } - - Ok(headers) - } - - /// Collect transactions affected by the reorganization - fn collect_affected_transactions( - &self, - disconnected_headers: &[BlockHeader], - _connected_headers: &[BlockHeader], - wallet_state: &WalletState, - storage: &dyn ChainStorage, - ) -> Result, String> { - let mut affected = Vec::new(); - - // Collect transactions from disconnected blocks - for header in disconnected_headers { - let block_hash = header.block_hash(); - if let Ok(Some(txids)) = storage.get_block_transactions(&block_hash) { - for txid in txids { - if wallet_state.is_wallet_transaction(&txid) { - if let Ok(Some(tx)) = storage.get_transaction(&txid) { - affected.push(tx); - } - } - } - } - } - - // Note: We don't have transactions from connected headers yet, - // they would need to be downloaded after the reorg - - Ok(affected) - } - - /// Check if a block is chain-locked - pub fn is_chain_locked( - &self, - header: &BlockHeader, - storage: &dyn ChainStorage, - ) -> Result { - if let Some(ref chain_lock_mgr) = self.chain_lock_manager { - // Get the height of this header - if let Ok(Some(height)) = storage.get_header_height(&header.block_hash()) { - return Ok(chain_lock_mgr.is_block_chain_locked(&header.block_hash(), height)); - } - } - // If no chain lock manager or height not found, assume not locked - Ok(false) - } - - /// Validate that a reorganization is safe to perform - pub fn validate_reorg(&self, current_tip: &ChainTip, fork: &Fork) -> Result<(), String> { - // Check maximum reorg depth - let reorg_depth = current_tip.height.saturating_sub(fork.fork_height); - if reorg_depth > self.max_reorg_depth { - return Err(format!( - "Reorg depth {} exceeds maximum allowed {}", - reorg_depth, self.max_reorg_depth - )); - } - - // Check that fork actually has more work - if fork.chain_work <= current_tip.chain_work { - return Err("Fork does not have more work than current chain".to_string()); - } - - // Additional validation could go here - - Ok(()) - } -} -*/ - -#[cfg(test)] -mod tests { - use super::*; - use crate::chain::ChainWork; - use crate::storage::MemoryStorage; - use dashcore::blockdata::constants::genesis_block; - use dashcore::Network; - use dashcore_hashes::Hash; - - #[test] - fn test_reorg_validation() { - let reorg_mgr = ReorgManager::new(100, false); - - let genesis = genesis_block(Network::Dash).header; - let tip = ChainTip::new(genesis, 0, ChainWork::from_header(&genesis)); - - // Create a fork with less work - should not reorg - let fork = Fork { - fork_point: BlockHash::from_byte_array([0; 32]), - fork_height: 0, - tip_hash: genesis.block_hash(), - tip_height: 1, - headers: vec![genesis], - chain_work: ChainWork::zero(), // Less work - }; - - let storage = MemoryStorage::new(); - let result = reorg_mgr.should_reorganize(&tip, &fork, &storage); - // Fork has less work, so should return Ok(false), not an error - assert!(result.is_ok()); - assert!(!result.unwrap()); - } - - #[test] - fn test_max_reorg_depth() { - let reorg_mgr = ReorgManager::new(10, false); - - let genesis = genesis_block(Network::Dash).header; - let tip = ChainTip::new(genesis, 100, ChainWork::from_header(&genesis)); - - // Create a fork that would require deep reorg - let fork = Fork { - fork_point: genesis.block_hash(), - fork_height: 0, // Fork from genesis - tip_hash: BlockHash::from_byte_array([0; 32]), - tip_height: 101, - headers: vec![], - chain_work: ChainWork::from_bytes([255u8; 32]), // Max work - }; - - let storage = MemoryStorage::new(); - let result = reorg_mgr.should_reorganize(&tip, &fork, &storage); - assert!(result.is_err()); - assert!(result.unwrap_err().contains("exceeds maximum")); - } -} diff --git a/dash-spv/src/chain/reorg_test.rs b/dash-spv/src/chain/reorg_test.rs deleted file mode 100644 index 36a6d7d54..000000000 --- a/dash-spv/src/chain/reorg_test.rs +++ /dev/null @@ -1,129 +0,0 @@ -//! Tests for chain reorganization functionality - -#[cfg(test)] -mod tests { - use super::super::*; - use crate::chain::ChainWork; - use crate::storage::MemoryStorage; - use crate::types::ChainState; - use dashcore::{blockdata::constants::genesis_block, Network}; - use dashcore_hashes::Hash; - - fn create_test_header(prev: &BlockHeader, nonce: u32) -> BlockHeader { - let mut header = *prev; - header.prev_blockhash = prev.block_hash(); - header.nonce = nonce; - header.time = prev.time + 600; // 10 minutes later - header - } - - #[test] - fn test_should_reorganize() { - // Create test components - let network = Network::Dash; - let genesis = genesis_block(network).header; - let chain_state = ChainState::new_for_network(network); - let storage = MemoryStorage::new(); - - // Build main chain: genesis -> block1 -> block2 - let block1 = create_test_header(&genesis, 1); - let block2 = create_test_header(&block1, 2); - - // Create chain tip for main chain - let main_tip = ChainTip::new(block2, 2, ChainWork::from_header(&block2)); - - // Build fork chain: genesis -> block1' -> block2' -> block3' - let block1_fork = create_test_header(&genesis, 100); // Different nonce - let block2_fork = create_test_header(&block1_fork, 101); - let block3_fork = create_test_header(&block2_fork, 102); - - // Create fork with more work - let fork = Fork { - fork_point: genesis.block_hash(), - fork_height: 0, - tip_hash: block3_fork.block_hash(), - tip_height: 3, - headers: vec![block1_fork, block2_fork, block3_fork], - chain_work: ChainWork::from_bytes([255u8; 32]), // Max work - }; - - // Create reorg manager - let reorg_mgr = ReorgManager::new(100, false); - - // Should reorganize because fork has more work - let should_reorg = reorg_mgr - .should_reorganize_with_chain_state(&main_tip, &fork, &storage, Some(&chain_state)) - .unwrap(); - assert!(should_reorg); - } - - #[test] - fn test_max_reorg_depth() { - let network = Network::Dash; - let genesis = genesis_block(network).header; - let chain_state = ChainState::new_for_network(network); - let storage = MemoryStorage::new(); - - // Create a deep main chain - let main_tip = ChainTip::new(genesis, 100, ChainWork::from_header(&genesis)); - - // Create fork from genesis (depth 100) - let fork = Fork { - fork_point: genesis.block_hash(), - fork_height: 0, - tip_hash: BlockHash::from_byte_array([0; 32]), - tip_height: 101, - headers: vec![], - chain_work: ChainWork::from_bytes([255u8; 32]), // Max work - }; - - // Create reorg manager with max depth of 10 - let reorg_mgr = ReorgManager::new(10, false); - - // Should not reorganize due to depth limit - let result = reorg_mgr.should_reorganize_with_chain_state( - &main_tip, - &fork, - &storage, - Some(&chain_state), - ); - assert!(result.is_err()); - assert!(result.unwrap_err().contains("exceeds maximum")); - } - - #[test] - fn test_checkpoint_sync_reorg_protection() { - let network = Network::Dash; - let genesis = genesis_block(network).header; - let mut chain_state = ChainState::new_for_network(network); - let storage = MemoryStorage::new(); - - // Simulate checkpoint sync from height 50000 - chain_state.sync_base_height = 50000; - - // Current tip at height 50100 - let main_tip = ChainTip::new(genesis, 50100, ChainWork::from_header(&genesis)); - - // Fork from before checkpoint (should be rejected) - let fork = Fork { - fork_point: genesis.block_hash(), - fork_height: 49999, // Before checkpoint - tip_hash: BlockHash::from_byte_array([0; 32]), - tip_height: 50101, - headers: vec![], - chain_work: ChainWork::from_bytes([255u8; 32]), // Max work - }; - - let reorg_mgr = ReorgManager::new(1000, false); - - // Should reject reorg past checkpoint - let result = reorg_mgr.should_reorganize_with_chain_state( - &main_tip, - &fork, - &storage, - Some(&chain_state), - ); - assert!(result.is_err()); - assert!(result.unwrap_err().contains("checkpoint")); - } -} diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index 3dc462a12..aa8e0387a 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -2,7 +2,6 @@ pub(crate) mod io; -pub mod sync_storage; pub mod types; mod headers; @@ -21,39 +20,8 @@ use crate::error::StorageResult; use crate::types::{ChainState, MempoolState, UnconfirmedTransaction}; pub use manager::DiskStorageManager; -pub use sync_storage::MemoryStorage; pub use types::*; -use crate::error::StorageError; -use dashcore::BlockHash; - -/// Synchronous storage trait for chain operations -pub trait ChainStorage: Send + Sync { - /// Get a header by its block hash - fn get_header(&self, hash: &BlockHash) -> Result, StorageError>; - - /// Get a header by its height - fn get_header_by_height(&self, height: u32) -> Result, StorageError>; - - /// Get the height of a block by its hash - fn get_header_height(&self, hash: &BlockHash) -> Result, StorageError>; - - /// Store a header at a specific height - fn store_header(&self, header: &BlockHeader, height: u32) -> Result<(), StorageError>; - - /// Get transaction IDs in a block - fn get_block_transactions( - &self, - block_hash: &BlockHash, - ) -> Result>, StorageError>; - - /// Get a transaction by its ID - fn get_transaction( - &self, - txid: &dashcore::Txid, - ) -> Result, StorageError>; -} - /// Storage manager trait for abstracting data persistence. /// /// # Thread Safety diff --git a/dash-spv/src/storage/sync_storage.rs b/dash-spv/src/storage/sync_storage.rs deleted file mode 100644 index 102114ca7..000000000 --- a/dash-spv/src/storage/sync_storage.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! Synchronous storage wrapper for testing - -use super::ChainStorage; -use crate::error::StorageError; -use dashcore::{BlockHash, Header as BlockHeader, Transaction, Txid}; -use std::collections::HashMap; -use std::sync::RwLock; - -/// Simple in-memory storage for testing -pub struct MemoryStorage { - headers: RwLock>, - height_index: RwLock>, - transactions: RwLock>, - block_txs: RwLock>>, -} - -impl Default for MemoryStorage { - fn default() -> Self { - Self::new() - } -} - -impl MemoryStorage { - pub fn new() -> Self { - Self { - headers: RwLock::new(HashMap::new()), - height_index: RwLock::new(HashMap::new()), - transactions: RwLock::new(HashMap::new()), - block_txs: RwLock::new(HashMap::new()), - } - } -} - -impl ChainStorage for MemoryStorage { - fn get_header(&self, hash: &BlockHash) -> Result, StorageError> { - let headers = self.headers.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - Ok(headers.get(hash).map(|(h, _)| *h)) - } - - fn get_header_by_height(&self, height: u32) -> Result, StorageError> { - let height_index = self.height_index.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - if let Some(hash) = height_index.get(&height).cloned() { - drop(height_index); // Release lock before calling get_header - self.get_header(&hash) - } else { - Ok(None) - } - } - - fn get_header_height(&self, hash: &BlockHash) -> Result, StorageError> { - let headers = self.headers.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - Ok(headers.get(hash).map(|(_, h)| *h)) - } - - fn store_header(&self, header: &BlockHeader, height: u32) -> Result<(), StorageError> { - let hash = header.block_hash(); - let mut headers = self.headers.write().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire write lock: {}", e)) - })?; - headers.insert(hash, (*header, height)); - drop(headers); // Release lock before acquiring the next one - - let mut height_index = self.height_index.write().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire write lock: {}", e)) - })?; - height_index.insert(height, hash); - Ok(()) - } - - fn get_block_transactions( - &self, - block_hash: &BlockHash, - ) -> Result>, StorageError> { - let block_txs = self.block_txs.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - Ok(block_txs.get(block_hash).cloned()) - } - - fn get_transaction(&self, txid: &Txid) -> Result, StorageError> { - let transactions = self.transactions.read().map_err(|e| { - StorageError::LockPoisoned(format!("Failed to acquire read lock: {}", e)) - })?; - Ok(transactions.get(txid).cloned()) - } -} From 3ca55b5324059775b6332aa241d9e0618db6ef3f Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 20:43:22 +0000 Subject: [PATCH 02/19] removed headers from ChainState --- dash-spv/src/client/lifecycle.rs | 31 ++---- dash-spv/src/client/progress.rs | 2 +- dash-spv/src/client/status_display.rs | 2 +- dash-spv/src/client/sync_coordinator.rs | 6 +- dash-spv/src/storage/mod.rs | 6 +- dash-spv/src/storage/segments.rs | 26 ++++- dash-spv/src/storage/state.rs | 33 ++++-- dash-spv/src/sync/filters/headers.rs | 36 ++----- dash-spv/src/sync/filters/retry.rs | 10 +- dash-spv/src/sync/headers/manager.rs | 128 ++++++++++------------- dash-spv/src/sync/manager.rs | 53 ++-------- dash-spv/src/sync/masternodes/manager.rs | 36 ++----- dash-spv/src/sync/message_handlers.rs | 2 +- dash-spv/src/sync/phase_execution.rs | 9 +- dash-spv/src/sync/transitions.rs | 20 +--- dash-spv/src/types.rs | 10 +- 16 files changed, 158 insertions(+), 252 deletions(-) diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index 2711db224..ae1f9558a 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -169,30 +169,12 @@ impl< // This ensures the ChainState has headers loaded for both checkpoint and normal sync let tip_height = { let storage = self.storage.lock().await; - storage.get_tip_height().await.map_err(SpvError::Storage)?.unwrap_or(0) + storage.get_tip_height().await.unwrap_or(0) }; if tip_height > 0 { tracing::info!("Found {} headers in storage, loading into sync manager...", tip_height); - let loaded_count = { - let storage = self.storage.lock().await; - self.sync_manager.load_headers_from_storage(&storage).await - }; - - match loaded_count { - Ok(loaded_count) => { - tracing::info!("✅ Sync manager loaded {} headers from storage", loaded_count); - } - Err(e) => { - tracing::error!("Failed to load headers into sync manager: {}", e); - // For checkpoint sync, this is critical - let state = self.state.read().await; - if state.synced_from_checkpoint() { - return Err(SpvError::Sync(e)); - } - // For normal sync, we can continue as headers will be re-synced - tracing::warn!("Continuing without pre-loaded headers for normal sync"); - } - } + let storage = self.storage.lock().await; + self.sync_manager.load_headers_from_storage(&storage).await } // Connect to network @@ -271,7 +253,7 @@ impl< // Check if we already have any headers in storage let current_tip = { let storage = self.storage.lock().await; - storage.get_tip_height().await.map_err(SpvError::Storage)? + storage.get_tip_height().await }; if current_tip.is_some() { @@ -344,7 +326,6 @@ impl< // Clone the chain state for storage let chain_state_for_storage = (*chain_state).clone(); - let headers_len = chain_state_for_storage.headers.len() as u32; drop(chain_state); // Update storage with chain state including sync_base_height @@ -366,7 +347,7 @@ impl< ); // Update the sync manager's cached flags from the checkpoint-initialized state - self.sync_manager.update_chain_state_cache(checkpoint.height, headers_len); + self.sync_manager.update_chain_state_cache(checkpoint.height); tracing::info!( "Updated sync manager with checkpoint-initialized chain state" ); @@ -414,7 +395,7 @@ impl< // Verify it was stored correctly let stored_height = { let storage = self.storage.lock().await; - storage.get_tip_height().await.map_err(SpvError::Storage)? + storage.get_tip_height().await }; tracing::info!( "✅ Genesis block initialized at height 0, storage reports tip height: {:?}", diff --git a/dash-spv/src/client/progress.rs b/dash-spv/src/client/progress.rs index 7998560a6..5bc2b8d4c 100644 --- a/dash-spv/src/client/progress.rs +++ b/dash-spv/src/client/progress.rs @@ -38,7 +38,7 @@ impl< // Get current heights from storage { let storage = self.storage.lock().await; - if let Ok(Some(header_height)) = storage.get_tip_height().await { + if let Some(header_height) = storage.get_tip_height().await { stats.header_height = header_height; } diff --git a/dash-spv/src/client/status_display.rs b/dash-spv/src/client/status_display.rs index 0324fe964..3b07fca9d 100644 --- a/dash-spv/src/client/status_display.rs +++ b/dash-spv/src/client/status_display.rs @@ -76,7 +76,7 @@ impl<'a, S: StorageManager + Send + Sync + 'static, W: WalletInterface + Send + // For genesis sync: sync_base_height = 0, so height = 0 + storage_count // For checkpoint sync: height = checkpoint_height + storage_count let storage = self.storage.lock().await; - if let Ok(Some(storage_tip)) = storage.get_tip_height().await { + if let Some(storage_tip) = storage.get_tip_height().await { let blockchain_height = storage_tip; if with_logging { tracing::debug!( diff --git a/dash-spv/src/client/sync_coordinator.rs b/dash-spv/src/client/sync_coordinator.rs index de06633ec..2af4716dc 100644 --- a/dash-spv/src/client/sync_coordinator.rs +++ b/dash-spv/src/client/sync_coordinator.rs @@ -42,7 +42,7 @@ impl< let result = SyncProgress { header_height: { let storage = self.storage.lock().await; - storage.get_tip_height().await.map_err(SpvError::Storage)?.unwrap_or(0) + storage.get_tip_height().await.unwrap_or(0) }, filter_header_height: { let storage = self.storage.lock().await; @@ -241,7 +241,7 @@ impl< // Storage tip now represents the absolute blockchain height. let current_tip_height = { let storage = self.storage.lock().await; - storage.get_tip_height().await.ok().flatten().unwrap_or(0) + storage.get_tip_height().await.unwrap_or(0) }; let current_height = current_tip_height; let peer_best = self @@ -315,7 +315,7 @@ impl< // Emit filter headers progress only when heights change let (abs_header_height, filter_header_height) = { let storage = self.storage.lock().await; - let storage_tip = storage.get_tip_height().await.ok().flatten().unwrap_or(0); + let storage_tip = storage.get_tip_height().await.unwrap_or(0); let filter_tip = storage.get_filter_tip_height().await.ok().flatten().unwrap_or(0); (storage_tip, filter_tip) diff --git a/dash-spv/src/storage/mod.rs b/dash-spv/src/storage/mod.rs index aa8e0387a..8232b914d 100644 --- a/dash-spv/src/storage/mod.rs +++ b/dash-spv/src/storage/mod.rs @@ -81,7 +81,11 @@ pub trait StorageManager: Send + Sync { async fn get_header(&self, height: u32) -> StorageResult>; /// Get the current tip blockchain height. - async fn get_tip_height(&self) -> StorageResult>; + async fn get_tip_height(&self) -> Option; + + async fn get_start_height(&self) -> Option; + + async fn get_stored_headers_len(&self) -> u32; /// Store filter headers. async fn store_filter_headers(&mut self, headers: &[FilterHeader]) -> StorageResult<()>; diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 64ff4adad..62247112b 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -107,6 +107,7 @@ impl Persistable for FilterHeader { pub struct SegmentCache { segments: HashMap>, tip_height: Option, + start_height: Option, base_path: PathBuf, } @@ -164,12 +165,14 @@ impl SegmentCache { let mut cache = Self { segments: HashMap::with_capacity(Self::MAX_ACTIVE_SEGMENTS), tip_height: None, + start_height: None, base_path, }; // Building the metadata if let Ok(entries) = fs::read_dir(&items_dir) { - let mut max_segment_id = None; + let mut max_seg_id = None; + let mut min_seg_id = None; for entry in entries.flatten() { if let Some(name) = entry.file_name().to_str() { @@ -180,19 +183,27 @@ impl SegmentCache { let segment_id_end = segment_id_start + 4; if let Ok(id) = name[segment_id_start..segment_id_end].parse::() { - max_segment_id = - Some(max_segment_id.map_or(id, |max: u32| max.max(id))); + max_seg_id = Some(max_seg_id.map_or(id, |max: u32| max.max(id))); + min_seg_id = Some(min_seg_id.map_or(id, |min: u32| min.min(id))); } } } } - if let Some(segment_id) = max_segment_id { + if let Some(segment_id) = max_seg_id { let segment = cache.get_segment(&segment_id).await?; cache.tip_height = segment .last_valid_offset() - .map(|offset| segment_id * Segment::::ITEMS_PER_SEGMENT + offset); + .map(|offset| Self::segment_id_to_start_height(segment_id) + offset); + } + + if let Some(segment_id) = min_seg_id { + let segment = cache.get_segment(&segment_id).await?; + + cache.start_height = segment + .first_valid_offset() + .map(|offset| Self::segment_id_to_start_height(segment_id) + offset); } } @@ -394,6 +405,11 @@ impl SegmentCache { self.tip_height } + #[inline] + pub fn start_height(&self) -> Option { + self.start_height + } + #[inline] pub fn next_height(&self) -> u32 { match self.tip_height() { diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index 937ac3d2a..e508e2e19 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -18,10 +18,6 @@ use super::manager::DiskStorageManager; impl DiskStorageManager { /// Store chain state to disk. pub async fn store_chain_state(&mut self, state: &ChainState) -> StorageResult<()> { - // First store all headers - // For checkpoint sync, we need to store headers starting from the checkpoint height - self.store_headers_at_height(&state.headers, state.sync_base_height).await?; - // Store filter headers self.filter_headers .write() @@ -84,9 +80,6 @@ impl DiskStorageManager { }; let range_start = state.sync_base_height; - if let Some(tip_height) = self.get_tip_height().await? { - state.headers = self.load_headers(range_start..tip_height + 1).await?; - } if let Some(filter_tip_height) = self.get_filter_tip_height().await? { state.filter_headers = self.load_filter_headers(range_start..filter_tip_height + 1).await?; @@ -388,8 +381,29 @@ impl StorageManager for DiskStorageManager { Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) } - async fn get_tip_height(&self) -> StorageResult> { - Ok(self.block_headers.read().await.tip_height()) + async fn get_tip_height(&self) -> Option { + self.block_headers.read().await.tip_height() + } + + async fn get_start_height(&self) -> Option { + self.block_headers.read().await.start_height() + } + + async fn get_stored_headers_len(&self) -> u32 { + let headers_guard = self.block_headers.read().await; + let start_height = if let Some(start_height) = headers_guard.start_height() { + start_height + } else { + return 0; + }; + + let end_height = if let Some(end_height) = headers_guard.tip_height() { + end_height + } else { + return 0; + }; + + end_height - start_height + 1 } async fn store_filter_headers( @@ -621,6 +635,7 @@ mod tests { storage.store_chain_state(&base_state).await?; storage.store_headers_at_height(&headers, checkpoint_height).await?; + assert_eq!(storage.get_stored_headers_len().await, headers.len() as u32); // Verify headers are stored at correct blockchain heights let header_at_base = storage.get_header(checkpoint_height).await?; diff --git a/dash-spv/src/sync/filters/headers.rs b/dash-spv/src/sync/filters/headers.rs index 40ce1622f..f1f165949 100644 --- a/dash-spv/src/sync/filters/headers.rs +++ b/dash-spv/src/sync/filters/headers.rs @@ -82,13 +82,9 @@ impl SyncResult<(u32, u32, u32)> { - let header_tip_height = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get header tip height: {}", e)))? - .ok_or_else(|| { - SyncError::Storage("No headers available for filter sync".to_string()) - })?; + let header_tip_height = storage.get_tip_height().await.ok_or_else(|| { + SyncError::Storage("No headers available for filter sync".to_string()) + })?; let stop_height = self .find_height_for_block_hash(&cf_headers.stop_hash, storage, 0, header_tip_height) @@ -188,13 +184,9 @@ impl= header_tip_height { tracing::info!("Filter headers already synced to header tip"); @@ -773,11 +761,7 @@ impl header_tip) } diff --git a/dash-spv/src/sync/filters/retry.rs b/dash-spv/src/sync/filters/retry.rs index f998066d0..fe7103792 100644 --- a/dash-spv/src/sync/filters/retry.rs +++ b/dash-spv/src/sync/filters/retry.rs @@ -35,13 +35,9 @@ impl { chain_state: Arc>, // WalletState removed - wallet functionality is now handled externally headers2_state: Headers2StateManager, - total_headers_synced: u32, syncing_headers: bool, last_sync_progress: std::time::Instant, headers2_failed: bool, @@ -91,7 +90,6 @@ impl SyncResult { - let start_time = std::time::Instant::now(); - let mut loaded_count = 0; - let mut tip_height = 0; + pub async fn load_headers_from_storage(&mut self, storage: &S) { // First, try to load the persisted chain state which may contain sync_base_height if let Ok(Some(stored_chain_state)) = storage.load_chain_state().await { tracing::info!( @@ -114,26 +109,11 @@ impl {}, chain_state.headers.len()={}", - batch_size, - previous_total, - self.total_headers_synced, - self.chain_state.read().await.headers.len() + "Header sync progress: processed {} headers in batch, total_headers_synced: {}", + headers.len() as u32, + storage.get_stored_headers_len().await, ); // Update chain tip manager with the last header in the batch @@ -293,7 +266,7 @@ impl, + storage: &S, ) -> SyncResult<()> { let block_locator = match base_hash { Some(hash) => vec![hash], None => { // Check if we're syncing from a checkpoint - if self.is_synced_from_checkpoint() - && !self.chain_state.read().await.headers.is_empty() - { + if self.is_synced_from_checkpoint() && storage.get_stored_headers_len().await > 0 { + let first_height = storage + .get_start_height() + .await + .ok_or(SyncError::Storage(format!("Failed to get start height")))?; + let checkpoint_header = storage + .get_header(first_height) + .await + .map_err(|e| { + SyncError::Storage(format!("Failed to get first header: {}", e)) + })? + .ok_or(SyncError::Storage(format!("Storage didn't return first header")))?; + // Use the checkpoint hash from chain state - let checkpoint_hash = self.chain_state.read().await.headers[0].block_hash(); + let checkpoint_hash = checkpoint_header.block_hash(); tracing::info!( "📍 No base_hash provided but syncing from checkpoint at height {}. Using checkpoint hash: {}", self.get_sync_base_height(), @@ -498,14 +482,11 @@ impl { // No headers in storage - check if we're syncing from a checkpoint - if self.is_synced_from_checkpoint() - && !self.chain_state.read().await.headers.is_empty() - { - // We're syncing from a checkpoint and have the checkpoint header - let checkpoint_header = &self.chain_state.read().await.headers[0]; + if self.is_synced_from_checkpoint() && storage.get_stored_headers_len().await > 0 { let checkpoint_hash = checkpoint_header.block_hash(); tracing::info!( "No headers in storage but syncing from checkpoint at height {}. Using checkpoint hash: {}", @@ -595,12 +583,11 @@ impl 0 { let hash = checkpoint_header.block_hash(); tracing::info!("Using checkpoint hash for height {}: {}", height, hash); Some(hash) @@ -642,7 +629,7 @@ impl { // No headers in storage - check if we're syncing from a checkpoint if self.is_synced_from_checkpoint() { // Use the checkpoint hash from chain state - if !self.chain_state.read().await.headers.is_empty() { - let checkpoint_hash = - self.chain_state.read().await.headers[0].block_hash(); + if storage.get_stored_headers_len().await > 0 { + let checkpoint_hash = checkpoint_header.block_hash(); tracing::info!( "Using checkpoint hash for recovery: {} (chain state has {} headers, first header time: {})", checkpoint_hash, - self.chain_state.read().await.headers.len(), - self.chain_state.read().await.headers[0].time + storage.get_stored_headers_len().await, + checkpoint_header.time ); Some(checkpoint_hash) } else { @@ -723,7 +716,7 @@ impl u32 { - // Always use total_headers_synced which tracks the absolute blockchain height - self.total_headers_synced + pub async fn get_chain_height(&self, storage: &S) -> u32 { + storage.get_tip_height().await.unwrap_or(0) } /// Get the tip hash @@ -872,9 +860,7 @@ impl SyncResult { + pub async fn load_headers_from_storage(&mut self, storage: &S) { // Load headers into the header sync manager - let loaded_count = self.header_sync.load_headers_from_storage(storage).await?; - - if loaded_count > 0 { - tracing::info!("Sequential sync manager loaded {} headers from storage", loaded_count); - - // Update the current phase if we have headers - // This helps the sync manager understand where to resume from - if matches!(self.current_phase, SyncPhase::Idle) { - // We have headers but haven't started sync yet - // The phase will be properly set when start_sync is called - tracing::debug!("Headers loaded but sync not started yet"); - } - } - - Ok(loaded_count) + self.header_sync.load_headers_from_storage(storage).await; } /// Get the earliest wallet birth height hint for the configured network, if available. @@ -234,7 +220,7 @@ impl< let base_hash = self.get_base_hash_from_storage(storage).await?; // Request headers starting from our current tip - self.header_sync.request_headers(network, base_hash).await?; + self.header_sync.request_headers(network, base_hash, storage).await?; } else { // Otherwise start sync normally self.header_sync.start_sync(network, storage).await?; @@ -265,10 +251,7 @@ impl< &self, storage: &S, ) -> SyncResult> { - let current_tip_height = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get tip height: {}", e)))?; + let current_tip_height = storage.get_tip_height().await; let base_hash = match current_tip_height { None => None, @@ -284,11 +267,6 @@ impl< Ok(base_hash) } - /// Get the current chain height from the header sync manager - pub fn get_chain_height(&self) -> u32 { - self.header_sync.get_chain_height() - } - /// Get current sync progress template. /// /// **IMPORTANT**: This method returns a TEMPLATE ONLY. It does NOT query storage or network @@ -378,8 +356,8 @@ impl< } /// Update the chain state (used for checkpoint sync initialization) - pub fn update_chain_state_cache(&mut self, sync_base_height: u32, headers_len: u32) { - self.header_sync.update_cached_from_state_snapshot(sync_base_height, headers_len); + pub fn update_chain_state_cache(&mut self, sync_base_height: u32) { + self.header_sync.update_cached_from_state_snapshot(sync_base_height); } /// Get reference to the masternode engine if available. @@ -401,22 +379,7 @@ impl< } /// Get the actual blockchain height from storage height, accounting for checkpoints - pub(super) async fn get_blockchain_height_from_storage(&self, storage: &S) -> SyncResult { - let storage_height = storage - .get_tip_height() - .await - .map_err(|e| { - crate::error::SyncError::Storage(format!("Failed to get tip height: {}", e)) - })? - .unwrap_or(0); - - // Check if we're syncing from a checkpoint - if self.header_sync.is_synced_from_checkpoint() { - // For checkpoint sync, blockchain height = sync_base_height + storage_height - Ok(self.header_sync.get_sync_base_height() + storage_height) - } else { - // Normal sync: storage height IS the blockchain height - Ok(storage_height) - } + pub(super) async fn get_blockchain_height_from_storage(&self, storage: &S) -> u32 { + storage.get_tip_height().await.unwrap_or(0) } } diff --git a/dash-spv/src/sync/masternodes/manager.rs b/dash-spv/src/sync/masternodes/manager.rs index 065f26dbc..c5eebcbf0 100644 --- a/dash-spv/src/sync/masternodes/manager.rs +++ b/dash-spv/src/sync/masternodes/manager.rs @@ -391,11 +391,7 @@ impl { + Some(tip_height) => { let state = crate::storage::MasternodeState { last_height: tip_height, engine_state: Vec::new(), @@ -477,17 +473,11 @@ impl { + None => { tracing::warn!( "⚠️ Storage returned no tip height when persisting masternode state" ); } - Err(e) => { - tracing::warn!( - "⚠️ Failed to read tip height to persist masternode state: {}", - e - ); - } } } } @@ -518,13 +508,7 @@ impl { + Some(tip_height) => { let state = crate::storage::MasternodeState { last_height: tip_height, engine_state: Vec::new(), @@ -688,17 +672,11 @@ impl { + None => { tracing::warn!( "⚠️ Storage returned no tip height when persisting masternode state" ); } - Err(e) => { - tracing::warn!( - "⚠️ Failed to read tip height to persist masternode state: {}", - e - ); - } } } else { tracing::info!( diff --git a/dash-spv/src/sync/message_handlers.rs b/dash-spv/src/sync/message_handlers.rs index 027317c5c..e4479ad24 100644 --- a/dash-spv/src/sync/message_handlers.rs +++ b/dash-spv/src/sync/message_handlers.rs @@ -345,7 +345,7 @@ impl< storage: &mut S, transition_reason: &str, ) -> SyncResult<()> { - let blockchain_height = self.get_blockchain_height_from_storage(storage).await.unwrap_or(0); + let blockchain_height = self.get_blockchain_height_from_storage(storage).await; let should_transition = if let SyncPhase::DownloadingHeaders { current_height, diff --git a/dash-spv/src/sync/phase_execution.rs b/dash-spv/src/sync/phase_execution.rs index 77758d833..2f7a64331 100644 --- a/dash-spv/src/sync/phase_execution.rs +++ b/dash-spv/src/sync/phase_execution.rs @@ -32,7 +32,7 @@ impl< // Already prepared, just send the initial request let base_hash = self.get_base_hash_from_storage(storage).await?; - self.header_sync.request_headers(network, base_hash).await?; + self.header_sync.request_headers(network, base_hash, storage).await?; } else { // Not prepared yet, start sync normally self.header_sync.start_sync(network, storage).await?; @@ -44,14 +44,11 @@ impl< } => { tracing::info!("📥 Starting masternode list download phase"); // Get the effective chain height from header sync which accounts for checkpoint base - let effective_height = self.header_sync.get_chain_height(); + let effective_height = self.header_sync.get_chain_height(storage).await; let sync_base_height = self.header_sync.get_sync_base_height(); // Also get the actual tip height to verify (blockchain height) - let storage_tip = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get storage tip: {}", e)))?; + let storage_tip = storage.get_tip_height().await; // Debug: Check chain state let chain_state = storage.load_chain_state().await.map_err(|e| { diff --git a/dash-spv/src/sync/transitions.rs b/dash-spv/src/sync/transitions.rs index 505e2a541..e8ce58e93 100644 --- a/dash-spv/src/sync/transitions.rs +++ b/dash-spv/src/sync/transitions.rs @@ -177,11 +177,7 @@ impl TransitionManager { match current_phase { SyncPhase::Idle => { // Always start with headers - let start_height = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get tip height: {}", e)))? - .unwrap_or(0); + let start_height = storage.get_tip_height().await.unwrap_or(0); Ok(Some(SyncPhase::DownloadingHeaders { start_time: Instant::now(), @@ -199,13 +195,7 @@ impl TransitionManager { .. } => { if self.config.enable_masternodes { - let header_tip = storage - .get_tip_height() - .await - .map_err(|e| { - SyncError::Storage(format!("Failed to get header tip: {}", e)) - })? - .unwrap_or(0); + let header_tip = storage.get_tip_height().await.unwrap_or(0); let mn_height = match storage.load_masternode_state().await { Ok(Some(state)) => state.last_height, @@ -417,11 +407,7 @@ impl TransitionManager { &self, storage: &S, ) -> SyncResult> { - let header_tip = storage - .get_tip_height() - .await - .map_err(|e| SyncError::Storage(format!("Failed to get header tip: {}", e)))? - .unwrap_or(0); + let header_tip = storage.get_tip_height().await.unwrap_or(0); let filter_tip = storage .get_filter_tip_height() diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 3b7e99958..340ad5533 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -254,9 +254,6 @@ impl DetailedSyncProgress { /// - At 2M blocks: ~160MB for headers, ~64MB for filter headers #[derive(Clone, Default)] pub struct ChainState { - /// Block headers indexed by height. - pub headers: Vec, - /// Filter headers indexed by height. pub filter_headers: Vec, @@ -308,8 +305,11 @@ impl ChainState { // Add genesis header to the chain state state.headers.push(genesis_header); - tracing::debug!("Initialized ChainState with genesis block - network: {:?}, hash: {}, headers_count: {}", - network, genesis_header.block_hash(), state.headers.len()); + tracing::debug!( + "Initialized ChainState with genesis block - network: {:?}, hash: {}", + network, + genesis_header.block_hash() + ); // Initialize masternode engine for the network let mut engine = MasternodeListEngine::default_for_network(network); From 8f3d065a2d021a165857b30ce7857b7ccb379186 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 20:52:52 +0000 Subject: [PATCH 03/19] tip_height method removed --- dash-spv/src/client/core.rs | 3 +-- dash-spv/src/sync/headers/manager.rs | 8 ++++---- dash-spv/src/types.rs | 12 ------------ 3 files changed, 5 insertions(+), 18 deletions(-) diff --git a/dash-spv/src/client/core.rs b/dash-spv/src/client/core.rs index e697ca462..6e59b0ccc 100644 --- a/dash-spv/src/client/core.rs +++ b/dash-spv/src/client/core.rs @@ -195,8 +195,7 @@ impl< /// Returns the current chain tip height (absolute), accounting for checkpoint base. pub async fn tip_height(&self) -> u32 { - let state = self.state.read().await; - state.tip_height() + self.storage.lock().await.get_tip_height().await.unwrap_or(0) } /// Get current chain state (read-only). diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index f269bcfd0..7e94818b8 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -130,7 +130,7 @@ impl 0 } - /// Get the current tip height. - pub fn tip_height(&self) -> u32 { - if self.headers.is_empty() { - // When headers is empty, sync_base_height represents our current position - // This happens when we're syncing from a checkpoint but haven't received headers yet - self.sync_base_height - } else { - // Normal case: base + number of headers - 1 - self.sync_base_height + self.headers.len() as u32 - 1 - } - } - /// Get the current tip hash. pub fn tip_hash(&self) -> Option { self.headers.last().map(|h| h.block_hash()) From 53be7f470d3c0ad3b4049d8661bb320e94cbc4ec Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:01:20 +0000 Subject: [PATCH 04/19] removed get_tip_hesh --- dash-spv/src/client/core.rs | 8 ++++++-- dash-spv/src/sync/headers/manager.rs | 5 ----- dash-spv/src/types.rs | 5 ----- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/dash-spv/src/client/core.rs b/dash-spv/src/client/core.rs index 6e59b0ccc..dd576633e 100644 --- a/dash-spv/src/client/core.rs +++ b/dash-spv/src/client/core.rs @@ -189,8 +189,12 @@ impl< /// Returns the current chain tip hash if available. pub async fn tip_hash(&self) -> Option { - let state = self.state.read().await; - state.tip_hash() + let storage = self.storage.lock().await; + + let tip_height = storage.get_tip_height().await?; + let header = storage.get_header(tip_height).await.ok()??; + + Some(header.block_hash()) } /// Returns the current chain tip height (absolute), accounting for checkpoint base. diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 7e94818b8..5e974b92e 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -844,11 +844,6 @@ impl Option { - self.chain_state.read().await.tip_hash() - } - /// Get the sync base height (used when syncing from checkpoint) pub fn get_sync_base_height(&self) -> u32 { self.cached_sync_base_height diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 52d862720..f2277f40f 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -329,11 +329,6 @@ impl ChainState { self.sync_base_height > 0 } - /// Get the current tip hash. - pub fn tip_hash(&self) -> Option { - self.headers.last().map(|h| h.block_hash()) - } - /// Get header at the given height. pub fn header_at_height(&self, height: u32) -> Option<&BlockHeader> { if height < self.sync_base_height { From eb32b7bcafc20ef07005a1d80c68d2a4a7657e31 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:11:54 +0000 Subject: [PATCH 05/19] replaced header_at_height --- dash-spv/src/chain/chainlock_manager.rs | 6 +++++- dash-spv/src/sync/headers/manager.rs | 20 ++++++++++++++------ dash-spv/src/types.rs | 9 --------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/dash-spv/src/chain/chainlock_manager.rs b/dash-spv/src/chain/chainlock_manager.rs index 977ad95ab..a94020909 100644 --- a/dash-spv/src/chain/chainlock_manager.rs +++ b/dash-spv/src/chain/chainlock_manager.rs @@ -175,7 +175,11 @@ impl ChainLockManager { } // Verify the block exists in our chain - if let Some(header) = chain_state.header_at_height(chain_lock.block_height) { + if let Some(header) = storage + .get_header(chain_lock.block_height) + .await + .map_err(|e| ValidationError::StorageError(e))? + { let header_hash = header.block_hash(); if header_hash != chain_lock.block_hash { return Err(ValidationError::InvalidChainLock(format!( diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 5e974b92e..3bee8123d 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -416,15 +416,19 @@ impl 0 } - /// Get header at the given height. - pub fn header_at_height(&self, height: u32) -> Option<&BlockHeader> { - if height < self.sync_base_height { - return None; // Height is before our sync base - } - let index = (height - self.sync_base_height) as usize; - self.headers.get(index) - } - /// Get filter header at the given height. pub fn filter_header_at_height(&self, height: u32) -> Option<&FilterHeader> { if height < self.sync_base_height { From 2bf3a91a62b17f7187b1c9348a84036272175d0e Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:14:51 +0000 Subject: [PATCH 06/19] removed unused methods --- dash-spv/src/types.rs | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index fd1d12464..cafab0da1 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -338,11 +338,6 @@ impl ChainState { self.filter_headers.get(index) } - /// Add headers to the chain. - pub fn add_headers(&mut self, headers: Vec) { - self.headers.extend(headers); - } - /// Add filter headers to the chain. pub fn add_filter_headers(&mut self, filter_headers: Vec) { if let Some(last) = filter_headers.last() { @@ -366,11 +361,6 @@ impl ChainState { self.headers.push(header); } - /// Remove the tip header (for reorgs) - pub fn remove_tip(&mut self) -> Option { - self.headers.pop() - } - /// Update chain lock status pub fn update_chain_lock(&mut self, height: u32, hash: BlockHash) { // Only update if this is a newer chain lock @@ -403,26 +393,6 @@ impl ChainState { Some(Vec::new()) } - /// Calculate the total chain work up to the tip - pub fn calculate_chain_work(&self) -> Option { - use crate::chain::chain_work::ChainWork; - - // If we have no headers, return None - if self.headers.is_empty() { - return None; - } - - // Start with zero work - let mut total_work = ChainWork::zero(); - - // Add work from each header - for header in &self.headers { - total_work = total_work.add_header(header); - } - - Some(total_work) - } - /// Initialize chain state from a checkpoint. pub fn init_from_checkpoint( &mut self, @@ -471,7 +441,6 @@ impl ChainState { impl std::fmt::Debug for ChainState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ChainState") - .field("headers", &format!("{} headers", self.headers.len())) .field("filter_headers", &format!("{} filter headers", self.filter_headers.len())) .field("last_chainlock_height", &self.last_chainlock_height) .field("last_chainlock_hash", &self.last_chainlock_hash) From f758a7fc55d6ffa5aea08ec4280cf17d490e1904 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:24:39 +0000 Subject: [PATCH 07/19] init_from_checkpoint sync --- dash-spv/src/client/lifecycle.rs | 1 + dash-spv/src/types.rs | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index ae1f9558a..4dd6193be 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -331,6 +331,7 @@ impl< // Update storage with chain state including sync_base_height { let mut storage = self.storage.lock().await; + storage.store_headers(&[checkpoint_header]); storage .store_chain_state(&chain_state_for_storage) .await diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index cafab0da1..58a55a577 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -401,15 +401,11 @@ impl ChainState { network: Network, ) { // Clear any existing headers - self.headers.clear(); self.filter_headers.clear(); // Set sync base height to checkpoint self.sync_base_height = checkpoint_height; - // Add the checkpoint header as our first header - self.headers.push(checkpoint_header); - tracing::info!( "Initialized ChainState from checkpoint - height: {}, hash: {}, network: {:?}", checkpoint_height, From f995b8926ecdfe4e0e2423926616c34ca1cd4e0a Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:31:35 +0000 Subject: [PATCH 08/19] tip_header removed --- dash-spv/src/sync/headers/manager.rs | 16 ++++++++++++---- dash-spv/src/types.rs | 5 ----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 3bee8123d..f4fbe2bda 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -144,10 +144,18 @@ impl Option { - self.headers.last().copied() - } - /// Get the height pub fn get_height(&self) -> u32 { self.tip_height() From 7acccc0b1084348625e74091835d387958a31920 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:39:24 +0000 Subject: [PATCH 09/19] removed two methos that where invovled in the same process --- dash-spv/src/sync/headers/manager.rs | 27 +------------------- dash-spv/src/sync/phase_execution.rs | 38 ---------------------------- dash-spv/src/types.rs | 10 -------- 3 files changed, 1 insertion(+), 74 deletions(-) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index f4fbe2bda..e230bf831 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -219,31 +219,6 @@ impl { tracing::info!("📥 Starting masternode list download phase"); - // Get the effective chain height from header sync which accounts for checkpoint base - let effective_height = self.header_sync.get_chain_height(storage).await; - let sync_base_height = self.header_sync.get_sync_base_height(); - - // Also get the actual tip height to verify (blockchain height) - let storage_tip = storage.get_tip_height().await; - - // Debug: Check chain state - let chain_state = storage.load_chain_state().await.map_err(|e| { - SyncError::Storage(format!("Failed to load chain state: {}", e)) - })?; - let chain_state_height = chain_state.as_ref().map(|s| s.get_height()).unwrap_or(0); - - tracing::info!( - "Starting masternode sync: effective_height={}, sync_base={}, storage_tip={:?}, chain_state_height={}, expected_storage_index={}", - effective_height, - sync_base_height, - storage_tip, - chain_state_height, - if sync_base_height > 0 { effective_height.saturating_sub(sync_base_height) } else { effective_height } - ); - - // Use the minimum of effective height and what's actually in storage - let _safe_height = if let Some(tip) = storage_tip { - let storage_based_height = tip; - if storage_based_height < effective_height { - tracing::warn!( - "Chain state height {} exceeds storage height {}, using storage height", - effective_height, - storage_based_height - ); - storage_based_height - } else { - effective_height - } - } else { - effective_height - }; // Start masternode sync (unified processing) match self.masternode_sync.start_sync(network, storage).await { diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 6232fbd07..222415821 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -346,16 +346,6 @@ impl ChainState { self.filter_headers.extend(filter_headers); } - /// Get the height - pub fn get_height(&self) -> u32 { - self.tip_height() - } - - /// Add a single header - pub fn add_header(&mut self, header: BlockHeader) { - self.headers.push(header); - } - /// Update chain lock status pub fn update_chain_lock(&mut self, height: u32, hash: BlockHash) { // Only update if this is a newer chain lock From 90957cf70505f12dda440a8727c3b267ef6ced72 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 21:42:39 +0000 Subject: [PATCH 10/19] fixed ffi --- dash-spv-ffi/src/types.rs | 2 -- dash-spv-ffi/tests/unit/test_type_conversions.rs | 2 -- 2 files changed, 4 deletions(-) diff --git a/dash-spv-ffi/src/types.rs b/dash-spv-ffi/src/types.rs index c644c52da..0703bf557 100644 --- a/dash-spv-ffi/src/types.rs +++ b/dash-spv-ffi/src/types.rs @@ -181,7 +181,6 @@ impl From for FFIDetailedSyncProgress { #[repr(C)] pub struct FFIChainState { - pub header_height: u32, pub filter_header_height: u32, pub masternode_height: u32, pub last_chainlock_height: u32, @@ -192,7 +191,6 @@ pub struct FFIChainState { impl From for FFIChainState { fn from(state: ChainState) -> Self { FFIChainState { - header_height: state.headers.len() as u32, filter_header_height: state.filter_headers.len() as u32, masternode_height: state.last_masternode_diff_height.unwrap_or(0), last_chainlock_height: state.last_chainlock_height.unwrap_or(0), diff --git a/dash-spv-ffi/tests/unit/test_type_conversions.rs b/dash-spv-ffi/tests/unit/test_type_conversions.rs index 58e29ce5f..a0c760e5b 100644 --- a/dash-spv-ffi/tests/unit/test_type_conversions.rs +++ b/dash-spv-ffi/tests/unit/test_type_conversions.rs @@ -163,7 +163,6 @@ mod tests { #[test] fn test_chain_state_none_values() { let state = dash_spv::ChainState { - headers: vec![], filter_headers: vec![], last_chainlock_height: None, last_chainlock_hash: None, @@ -174,7 +173,6 @@ mod tests { }; let ffi_state = FFIChainState::from(state); - assert_eq!(ffi_state.header_height, 0); assert_eq!(ffi_state.filter_header_height, 0); assert_eq!(ffi_state.masternode_height, 0); assert_eq!(ffi_state.last_chainlock_height, 0); From c4437640e742f9194eb593fc390da5578f3fb444 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 22:27:49 +0000 Subject: [PATCH 11/19] test updated to the changes --- dash-spv/src/client/lifecycle.rs | 2 +- dash-spv/src/types.rs | 2 +- dash-spv/tests/header_sync_test.rs | 65 ++++---------------- dash-spv/tests/integration_real_node_test.rs | 2 +- dash-spv/tests/segmented_storage_debug.rs | 2 +- dash-spv/tests/segmented_storage_test.rs | 14 ++--- dash-spv/tests/simple_header_test.rs | 2 +- dash-spv/tests/simple_segmented_test.rs | 4 +- dash-spv/tests/storage_consistency_test.rs | 30 ++++----- dash-spv/tests/storage_test.rs | 5 +- 10 files changed, 41 insertions(+), 87 deletions(-) diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index 4dd6193be..44bc4691b 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -331,7 +331,7 @@ impl< // Update storage with chain state including sync_base_height { let mut storage = self.storage.lock().await; - storage.store_headers(&[checkpoint_header]); + storage.store_headers(&[checkpoint_header]).await?; storage .store_chain_state(&chain_state_for_storage) .await diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 222415821..2433111e8 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -303,7 +303,7 @@ impl ChainState { }; // Add genesis header to the chain state - state.headers.push(genesis_header); + // TODO: Check if this is necessary -> state.headers.push(genesis_header); tracing::debug!( "Initialized ChainState with genesis block - network: {:?}, hash: {}", diff --git a/dash-spv/tests/header_sync_test.rs b/dash-spv/tests/header_sync_test.rs index 2da0fdde4..8acb726c1 100644 --- a/dash-spv/tests/header_sync_test.rs +++ b/dash-spv/tests/header_sync_test.rs @@ -30,7 +30,7 @@ async fn test_basic_header_sync_from_genesis() { .expect("Failed to create tmp storage"); // Verify empty initial state - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); // Create test chain state for mainnet let chain_state = ChainState::new_for_network(Network::Dash); @@ -57,7 +57,7 @@ async fn test_header_sync_continuation() { storage.store_headers(&existing_headers).await.expect("Failed to store existing headers"); // Verify we have the expected tip - assert_eq!(storage.get_tip_height().await.unwrap(), Some(99)); + assert_eq!(storage.get_tip_height().await, Some(99)); // Simulate adding more headers (continuation) let continuation_headers = create_test_header_chain_from(100, 50); @@ -67,7 +67,7 @@ async fn test_header_sync_continuation() { .expect("Failed to store continuation headers"); // Verify the chain extended properly - assert_eq!(storage.get_tip_height().await.unwrap(), Some(149)); + assert_eq!(storage.get_tip_height().await, Some(149)); // Verify continuity by checking some headers for height in 95..105 { @@ -102,7 +102,7 @@ async fn test_header_batch_processing() { let expected_tip = batch_end - 1; assert_eq!( - storage.get_tip_height().await.unwrap(), + storage.get_tip_height().await, Some(expected_tip as u32), "Tip height should be {} after batch {}-{}", expected_tip, @@ -112,7 +112,7 @@ async fn test_header_batch_processing() { } // Verify total count - let final_tip = storage.get_tip_height().await.unwrap(); + let final_tip = storage.get_tip_height().await; assert_eq!(final_tip, Some((total_headers - 1) as u32)); // Verify we can retrieve headers from different parts of the chain @@ -140,17 +140,17 @@ async fn test_header_sync_edge_cases() { // Test 1: Empty header batch let empty_headers: Vec = vec![]; storage.store_headers(&empty_headers).await.expect("Should handle empty header batch"); - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); // Test 2: Single header let single_header = create_test_header_chain(1); storage.store_headers(&single_header).await.expect("Should handle single header"); - assert_eq!(storage.get_tip_height().await.unwrap(), Some(0)); + assert_eq!(storage.get_tip_height().await, Some(0)); // Test 3: Large batch let large_batch = create_test_header_chain_from(1, 5000); storage.store_headers(&large_batch).await.expect("Should handle large header batch"); - assert_eq!(storage.get_tip_height().await.unwrap(), Some(5000)); + assert_eq!(storage.get_tip_height().await, Some(5000)); // Test 4: Out-of-order access let header_4500 = storage.get_header(4500).await.unwrap(); @@ -191,7 +191,7 @@ async fn test_header_chain_validation() { storage.store_headers(&chain).await.expect("Failed to store header chain"); // Verify the chain is stored correctly - assert_eq!(storage.get_tip_height().await.unwrap(), Some(9)); + assert_eq!(storage.get_tip_height().await, Some(9)); // Verify we can retrieve the entire chain let retrieved_chain = storage.load_headers(0..10).await.unwrap(); @@ -229,7 +229,7 @@ async fn test_header_sync_performance() { let sync_duration = start_time.elapsed(); // Verify sync completed correctly - assert_eq!(storage.get_tip_height().await.unwrap(), Some((total_headers - 1) as u32)); + assert_eq!(storage.get_tip_height().await, Some((total_headers - 1) as u32)); // Performance assertions (these are rough benchmarks) assert!( @@ -338,7 +338,7 @@ async fn test_header_storage_consistency() { storage.store_headers(&headers).await.expect("Failed to store headers"); // Test consistency: get tip and verify it matches the last stored header - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); let tip_header = storage.get_header(tip_height).await.unwrap().unwrap(); let expected_tip = &headers[headers.len() - 1]; @@ -358,48 +358,6 @@ async fn test_header_storage_consistency() { info!("Header storage consistency test completed"); } -#[test_case(0, 0 ; "genesis_0_blocks")] -#[test_case(0, 1 ; "genesis_1_block")] -#[test_case(0, 60000 ; "genesis_60000_blocks")] -#[test_case(100, 0 ; "checkpoint_0_blocks")] -#[test_case(170000, 1 ; "checkpoint_1_block")] -#[test_case(12345, 60000 ; "checkpoint_60000_blocks")] -#[tokio::test] -async fn test_load_headers_from_storage(sync_base_height: u32, header_count: usize) { - // Setup: Create storage with 100 headers - let temp_dir = TempDir::new().expect("Failed to create temp dir"); - let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()) - .await - .expect("Failed to create storage"); - - let test_headers = create_test_header_chain(header_count); - - // Store chain state - let mut chain_state = ChainState::new_for_network(Network::Dash); - chain_state.sync_base_height = sync_base_height; - chain_state.headers = test_headers.clone(); - storage.store_chain_state(&chain_state).await.expect("Failed to store chain state"); - - // Create HeaderSyncManager and load headers - let config = ClientConfig::new(Network::Dash); - let chain_state = Arc::new(RwLock::new(ChainState::new_for_network(Network::Dash))); - let mut header_sync = HeaderSyncManager::::new( - &config, - ReorgConfig::default(), - chain_state.clone(), - ) - .expect("Failed to create HeaderSyncManager"); - - // Load headers from storage - let loaded_count = - header_sync.load_headers_from_storage(&storage).await.expect("Failed to load headers"); - - let cs = chain_state.read().await; - - assert_eq!(loaded_count as usize, header_count, "Loaded count mismatch"); - assert_eq!(header_count, cs.headers.len(), "Chain state count mismatch"); -} - #[test_case(0, 1 ; "genesis_1_block")] #[test_case(0, 70000 ; "genesis_70000_blocks")] #[test_case(5000, 1 ; "checkpoint_1_block")] @@ -417,7 +375,6 @@ async fn test_prepare_sync(sync_base_height: u32, header_count: usize) { // Create and store chain state let mut chain_state = ChainState::new_for_network(Network::Dash); chain_state.sync_base_height = sync_base_height; - chain_state.headers = headers; storage.store_chain_state(&chain_state).await.expect("Failed to store chain state"); // Create HeaderSyncManager and load from storage diff --git a/dash-spv/tests/integration_real_node_test.rs b/dash-spv/tests/integration_real_node_test.rs index 8979da6f6..63fe2bcb8 100644 --- a/dash-spv/tests/integration_real_node_test.rs +++ b/dash-spv/tests/integration_real_node_test.rs @@ -206,7 +206,7 @@ async fn test_real_header_sync_up_to_10k() { .expect("Failed to create tmp storage"); // Verify starting from empty state - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); let mut client = create_test_client(config.clone()).await.expect("Failed to create SPV client"); diff --git a/dash-spv/tests/segmented_storage_debug.rs b/dash-spv/tests/segmented_storage_debug.rs index 611a5eaa0..a26bec774 100644 --- a/dash-spv/tests/segmented_storage_debug.rs +++ b/dash-spv/tests/segmented_storage_debug.rs @@ -38,7 +38,7 @@ async fn test_basic_storage() { println!("Headers stored"); // Check tip height - let tip = storage.get_tip_height().await.unwrap(); + let tip = storage.get_tip_height().await; println!("Tip height: {:?}", tip); assert_eq!(tip, Some(9)); diff --git a/dash-spv/tests/segmented_storage_test.rs b/dash-spv/tests/segmented_storage_test.rs index 9b8995024..4bf7ac604 100644 --- a/dash-spv/tests/segmented_storage_test.rs +++ b/dash-spv/tests/segmented_storage_test.rs @@ -46,7 +46,7 @@ async fn test_segmented_storage_basic_operations() { } // Verify we can read them back - assert_eq!(storage.get_tip_height().await.unwrap(), Some(99_999)); + assert_eq!(storage.get_tip_height().await, Some(99_999)); // Check individual headers assert_eq!(storage.get_header(0).await.unwrap().unwrap().time, 0); @@ -76,7 +76,7 @@ async fn test_segmented_storage_persistence() { let mut storage = DiskStorageManager::new(path.clone()).await.unwrap(); // Verify storage starts empty - assert_eq!(storage.get_tip_height().await.unwrap(), None, "Storage should start empty"); + assert_eq!(storage.get_tip_height().await, None, "Storage should start empty"); let headers: Vec = (0..75_000).map(create_test_header).collect(); storage.store_headers(&headers).await.unwrap(); @@ -91,7 +91,7 @@ async fn test_segmented_storage_persistence() { { let storage = DiskStorageManager::new(path).await.unwrap(); - let actual_tip = storage.get_tip_height().await.unwrap(); + let actual_tip = storage.get_tip_height().await; if actual_tip != Some(74_999) { println!("Expected tip 74,999 but got {:?}", actual_tip); // Try to understand what's stored @@ -265,7 +265,7 @@ async fn test_background_save_timing() { // Verify data was saved { let storage = DiskStorageManager::new(path).await.unwrap(); - assert_eq!(storage.get_tip_height().await.unwrap(), Some(19_999)); + assert_eq!(storage.get_tip_height().await, Some(19_999)); assert_eq!(storage.get_header(15_000).await.unwrap().unwrap().time, 15_000); } } @@ -279,13 +279,13 @@ async fn test_clear_storage() { let headers: Vec = (0..10_000).map(create_test_header).collect(); storage.store_headers(&headers).await.unwrap(); - assert_eq!(storage.get_tip_height().await.unwrap(), Some(9_999)); + assert_eq!(storage.get_tip_height().await, Some(9_999)); // Clear storage storage.clear().await.unwrap(); // Verify everything is cleared - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); assert_eq!(storage.get_header_height_by_hash(&headers[0].block_hash()).await.unwrap(), None); } @@ -311,7 +311,7 @@ async fn test_mixed_operations() { storage.store_metadata("test_key", b"test_value").await.unwrap(); // Verify everything - assert_eq!(storage.get_tip_height().await.unwrap(), Some(74_999)); + assert_eq!(storage.get_tip_height().await, Some(74_999)); assert_eq!(storage.get_filter_tip_height().await.unwrap(), Some(74_999)); let filters = storage.load_filters(1000..1001).await.unwrap(); diff --git a/dash-spv/tests/simple_header_test.rs b/dash-spv/tests/simple_header_test.rs index 40d0ce791..3fc2c6e71 100644 --- a/dash-spv/tests/simple_header_test.rs +++ b/dash-spv/tests/simple_header_test.rs @@ -57,7 +57,7 @@ async fn test_simple_header_sync() { .expect("Failed to create tmp storage"); // Verify starting from empty state - assert_eq!(storage.get_tip_height().await.unwrap(), None); + assert_eq!(storage.get_tip_height().await, None); // Create network manager let network_manager = diff --git a/dash-spv/tests/simple_segmented_test.rs b/dash-spv/tests/simple_segmented_test.rs index 422bb78ed..327c08779 100644 --- a/dash-spv/tests/simple_segmented_test.rs +++ b/dash-spv/tests/simple_segmented_test.rs @@ -28,7 +28,7 @@ async fn test_simple_storage() { let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await.unwrap(); println!("Testing get_tip_height before storing anything..."); - let initial_tip = storage.get_tip_height().await.unwrap(); + let initial_tip = storage.get_tip_height().await; println!("Initial tip: {:?}", initial_tip); assert_eq!(initial_tip, None); @@ -40,7 +40,7 @@ async fn test_simple_storage() { println!("Single header stored"); println!("Checking tip height..."); - let tip = storage.get_tip_height().await.unwrap(); + let tip = storage.get_tip_height().await; println!("Tip height after storing one header: {:?}", tip); assert_eq!(tip, Some(0)); diff --git a/dash-spv/tests/storage_consistency_test.rs b/dash-spv/tests/storage_consistency_test.rs index 8bdd682b7..a5640bf74 100644 --- a/dash-spv/tests/storage_consistency_test.rs +++ b/dash-spv/tests/storage_consistency_test.rs @@ -36,7 +36,7 @@ async fn test_tip_height_header_consistency_basic() { storage.store_headers(&headers).await.unwrap(); // Check consistency immediately - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; println!("Tip height: {:?}", tip_height); if let Some(height) = tip_height { @@ -72,7 +72,7 @@ async fn test_tip_height_header_consistency_after_save() { // Wait for background save to complete sleep(Duration::from_secs(1)).await; - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; println!("Phase 1 - Tip height: {:?}", tip_height); if let Some(height) = tip_height { @@ -87,7 +87,7 @@ async fn test_tip_height_header_consistency_after_save() { { let storage = DiskStorageManager::new(storage_path.clone()).await.unwrap(); - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; println!("Phase 2 - Tip height after reload: {:?}", tip_height); if let Some(height) = tip_height { @@ -129,7 +129,7 @@ async fn test_tip_height_header_consistency_large_dataset() { storage.store_headers(&headers).await.unwrap(); // Check consistency after each batch - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { let header = storage.get_header(height).await.unwrap(); if header.is_none() { @@ -155,7 +155,7 @@ async fn test_tip_height_header_consistency_large_dataset() { } // Final consistency check - let final_tip = storage.get_tip_height().await.unwrap(); + let final_tip = storage.get_tip_height().await; println!("Final tip height: {:?}", final_tip); if let Some(height) = final_tip { @@ -206,7 +206,7 @@ async fn test_concurrent_tip_header_access() { let handle = tokio::spawn(async move { // Repeatedly check consistency for iteration in 0..100 { - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { let header = storage.get_header(height).await.unwrap(); @@ -278,7 +278,7 @@ async fn test_reproduce_filter_sync_bug() { storage.store_headers(&tip_header).await.unwrap(); // Now check what get_tip_height() returns - let reported_tip = storage.get_tip_height().await.unwrap(); + let reported_tip = storage.get_tip_height().await; println!("Storage reports tip height: {:?}", reported_tip); if let Some(tip_height) = reported_tip { @@ -346,7 +346,7 @@ async fn test_reproduce_filter_sync_bug_small() { storage.store_headers(&tip_header).await.unwrap(); // Now check what get_tip_height() returns - let reported_tip = storage.get_tip_height().await.unwrap(); + let reported_tip = storage.get_tip_height().await; println!("Storage reports tip height: {:?}", reported_tip); if let Some(tip_height) = reported_tip { @@ -406,7 +406,7 @@ async fn test_segment_boundary_consistency() { segment_size + 1, // Second in second segment ]; - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); println!("Tip height: {}", tip_height); for height in boundary_heights { @@ -461,7 +461,7 @@ async fn test_reproduce_tip_height_segment_eviction_race() { storage.store_headers(&headers).await.unwrap(); // Immediately check for race condition - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { // Try to access the tip header multiple times to catch race condition @@ -542,7 +542,7 @@ async fn test_concurrent_tip_height_access_with_eviction() { // Reduced from 50 to 20 iterations for iteration in 0..20 { // Get tip height - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { // Immediately try to access the tip header @@ -606,7 +606,7 @@ async fn test_concurrent_tip_height_access_with_eviction_heavy() { let handle = tokio::spawn(async move { for iteration in 0..50 { // Get tip height - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; if let Some(height) = tip_height { // Immediately try to access the tip header @@ -659,7 +659,7 @@ async fn test_tip_height_segment_boundary_race() { storage.store_headers(&headers).await.unwrap(); // Verify tip is at segment boundary - let tip_height = storage.get_tip_height().await.unwrap(); + let tip_height = storage.get_tip_height().await; assert_eq!(tip_height, Some(segment_size - 1)); storage.shutdown().await; @@ -678,7 +678,7 @@ async fn test_tip_height_segment_boundary_race() { storage.store_headers(&headers).await.unwrap(); // After storing each segment, verify tip consistency - let reported_tip = storage.get_tip_height().await.unwrap(); + let reported_tip = storage.get_tip_height().await; if let Some(tip) = reported_tip { let header = storage.get_header(tip).await.unwrap(); if header.is_none() { @@ -698,7 +698,7 @@ async fn test_tip_height_segment_boundary_race() { } // But the current tip should always be accessible - let current_tip = storage.get_tip_height().await.unwrap(); + let current_tip = storage.get_tip_height().await; if let Some(tip) = current_tip { let header = storage.get_header(tip).await.unwrap(); assert!(header.is_some(), "Current tip header must always be accessible"); diff --git a/dash-spv/tests/storage_test.rs b/dash-spv/tests/storage_test.rs index 254a5162e..d078cc3f1 100644 --- a/dash-spv/tests/storage_test.rs +++ b/dash-spv/tests/storage_test.rs @@ -57,7 +57,7 @@ async fn test_disk_storage_reopen_after_clean_shutdown() { assert!(storage.is_ok(), "Should reopen after clean shutdown"); let storage = storage.unwrap(); - let tip = storage.get_tip_height().await.unwrap(); + let tip = storage.get_tip_height().await; assert_eq!(tip, Some(4), "Data should persist across reopen"); } @@ -80,9 +80,6 @@ async fn test_disk_storage_concurrent_access_blocked() { } other => panic!("Expected DirectoryLocked error, got: {:?}", other), } - - // First storage manager should still be usable - assert!(_storage1.get_tip_height().await.is_ok()); } #[tokio::test] From 4be4cefb0e4e15758a5ee121ab6b356e10bbdd0d Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Thu, 18 Dec 2025 16:57:59 -0800 Subject: [PATCH 12/19] chore: removed fork detector and fork structs (#290) --- dash-spv/src/chain/fork_detector.rs | 80 ---------------------------- dash-spv/src/chain/mod.rs | 24 --------- dash-spv/src/chain/reorg.rs | 40 -------------- dash-spv/src/sync/headers/manager.rs | 9 +--- 4 files changed, 1 insertion(+), 152 deletions(-) delete mode 100644 dash-spv/src/chain/fork_detector.rs delete mode 100644 dash-spv/src/chain/reorg.rs diff --git a/dash-spv/src/chain/fork_detector.rs b/dash-spv/src/chain/fork_detector.rs deleted file mode 100644 index 6dfe92822..000000000 --- a/dash-spv/src/chain/fork_detector.rs +++ /dev/null @@ -1,80 +0,0 @@ -//! Fork detection logic for identifying blockchain forks -//! -//! This module detects when incoming headers create a fork in the blockchain -//! rather than extending the current chain tip. - -use super::Fork; -use dashcore::BlockHash; -use std::collections::HashMap; - -/// Detects and manages blockchain forks -pub struct ForkDetector { - /// Currently known forks indexed by their tip hash - forks: HashMap, -} - -impl ForkDetector { - pub fn new(max_forks: usize) -> Result { - if max_forks == 0 { - return Err("max_forks must be greater than 0"); - } - Ok(Self { - forks: HashMap::new(), - }) - } - - /// Get all known forks - pub fn get_forks(&self) -> Vec<&Fork> { - self.forks.values().collect() - } - - /// Get a specific fork by its tip hash - pub fn get_fork(&self, tip_hash: &BlockHash) -> Option<&Fork> { - self.forks.get(tip_hash) - } - - /// Remove a fork (e.g., after it's been processed) - pub fn remove_fork(&mut self, tip_hash: &BlockHash) -> Option { - self.forks.remove(tip_hash) - } - - /// Check if we have any forks - pub fn has_forks(&self) -> bool { - !self.forks.is_empty() - } - - /// Get the strongest fork (most cumulative work) - pub fn get_strongest_fork(&self) -> Option<&Fork> { - self.forks.values().max_by_key(|fork| &fork.chain_work) - } - - /// Clear all forks - pub fn clear_forks(&mut self) { - self.forks.clear(); - } -} - -/// Result of fork detection for a header -#[derive(Debug, Clone)] -pub enum ForkDetectionResult { - /// Header extends the current main chain tip - ExtendsMainChain, - /// Header extends an existing fork - ExtendsFork(Fork), - /// Header creates a new fork from the main chain - CreatesNewFork(Fork), - /// Header doesn't connect to any known chain - Orphan, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_fork_detector_zero_max_forks() { - let result = ForkDetector::new(0); - assert!(result.is_err()); - assert_eq!(result.err(), Some("max_forks must be greater than 0")); - } -} diff --git a/dash-spv/src/chain/mod.rs b/dash-spv/src/chain/mod.rs index 61be1f963..e533b7be3 100644 --- a/dash-spv/src/chain/mod.rs +++ b/dash-spv/src/chain/mod.rs @@ -1,7 +1,6 @@ //! Chain management module with reorganization support //! //! This module provides functionality for managing blockchain state including: -//! - Fork detection and handling //! - Chain reorganization //! - Multiple chain tip tracking //! - Chain work calculation @@ -11,9 +10,7 @@ pub mod chain_tip; pub mod chain_work; pub mod chainlock_manager; pub mod checkpoints; -pub mod fork_detector; pub mod orphan_pool; -pub mod reorg; #[cfg(test)] mod checkpoint_test; @@ -24,25 +21,4 @@ pub use chain_tip::{ChainTip, ChainTipManager}; pub use chain_work::ChainWork; pub use chainlock_manager::{ChainLockEntry, ChainLockManager, ChainLockStats}; pub use checkpoints::{Checkpoint, CheckpointManager}; -pub use fork_detector::{ForkDetectionResult, ForkDetector}; pub use orphan_pool::{OrphanBlock, OrphanPool, OrphanPoolStats}; -pub use reorg::ReorgEvent; - -use dashcore::{BlockHash, Header as BlockHeader}; - -/// Represents a potential chain fork -#[derive(Debug, Clone)] -pub struct Fork { - /// The block hash where the fork diverges from the main chain - pub fork_point: BlockHash, - /// The height of the fork point - pub fork_height: u32, - /// The tip of the forked chain - pub tip_hash: BlockHash, - /// The height of the fork tip - pub tip_height: u32, - /// Headers in the fork (from fork point to tip) - pub headers: Vec, - /// Cumulative chain work of this fork - pub chain_work: ChainWork, -} diff --git a/dash-spv/src/chain/reorg.rs b/dash-spv/src/chain/reorg.rs deleted file mode 100644 index 026f7ccd0..000000000 --- a/dash-spv/src/chain/reorg.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Chain reorganization handling -//! -//! This module implements the core logic for handling blockchain reorganizations, -//! including finding common ancestors, rolling back transactions, and switching chains. - -use dashcore::{BlockHash, Header as BlockHeader, Transaction, Txid}; - -/// Event emitted when a reorganization occurs -#[derive(Debug, Clone)] -pub struct ReorgEvent { - /// The common ancestor where chains diverged - pub common_ancestor: BlockHash, - /// Height of the common ancestor - pub common_height: u32, - /// Headers that were removed from the main chain - pub disconnected_headers: Vec, - /// Headers that were added to the main chain - pub connected_headers: Vec, - /// Transactions that may have changed confirmation status - pub affected_transactions: Vec, -} - -/// Data collected during the read phase of reorganization -#[allow(dead_code)] -#[derive(Debug)] -#[cfg_attr(test, derive(Clone))] -pub(crate) struct ReorgData { - /// The common ancestor where chains diverged - pub(crate) common_ancestor: BlockHash, - /// Height of the common ancestor - pub(crate) common_height: u32, - /// Headers that need to be disconnected from the main chain - disconnected_headers: Vec, - /// Block hashes and heights for disconnected blocks - disconnected_blocks: Vec<(BlockHash, u32)>, - /// Transaction IDs from disconnected blocks that affect the wallet - affected_tx_ids: Vec, - /// Actual transactions that were affected (if available) - affected_transactions: Vec, -} diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 2db5fda4f..8e4a2f41b 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -7,7 +7,7 @@ use dashcore::{ use dashcore_hashes::Hash; use crate::chain::checkpoints::{mainnet_checkpoints, testnet_checkpoints, CheckpointManager}; -use crate::chain::{ChainTip, ChainTipManager, ChainWork, ForkDetector}; +use crate::chain::{ChainTip, ChainTipManager, ChainWork}; use crate::client::ClientConfig; use crate::error::{SyncError, SyncResult}; use crate::network::NetworkManager; @@ -47,7 +47,6 @@ pub struct HeaderSyncManager { _phantom_s: std::marker::PhantomData, _phantom_n: std::marker::PhantomData, config: ClientConfig, - fork_detector: ForkDetector, tip_manager: ChainTipManager, checkpoint_manager: CheckpointManager, reorg_config: ReorgConfig, @@ -83,8 +82,6 @@ impl Date: Fri, 19 Dec 2025 17:34:35 +0000 Subject: [PATCH 13/19] get_header now checks out of bound to return None instead of panics --- dash-spv/src/storage/state.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index e508e2e19..a975e1e05 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -378,6 +378,22 @@ impl StorageManager for DiskStorageManager { } async fn get_header(&self, height: u32) -> StorageResult> { + if let Some(tip_height) = self.get_tip_height().await { + if height > tip_height { + return Ok(None); + } + } else { + return Ok(None); + } + + if let Some(start_height) = self.get_start_height().await { + if height < start_height { + return Ok(None); + } + } else { + return Ok(None); + } + Ok(self.block_headers.write().await.get_items(height..height + 1).await?.first().copied()) } From 06d35e9b6539485640e3d33b001edc0835d59062 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 17:42:12 +0000 Subject: [PATCH 14/19] start height was not being updated properly --- dash-spv/src/storage/segments.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/dash-spv/src/storage/segments.rs b/dash-spv/src/storage/segments.rs index 62247112b..5fd52a210 100644 --- a/dash-spv/src/storage/segments.rs +++ b/dash-spv/src/storage/segments.rs @@ -379,6 +379,11 @@ impl SegmentCache { None => Some(height - 1), }; + self.start_height = match self.start_height { + Some(current) => Some(current.min(start_height)), + None => Some(start_height), + }; + // Persist dirty segments periodically (every 1000 filter items) if items.len() >= 1000 || start_height.is_multiple_of(1000) { self.persist_dirty(manager).await; From abb37f8a4444c0e6a4d0c29d24416cc8d7171540 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 18:01:35 +0000 Subject: [PATCH 15/19] fixed other test by correctly storing the headers in the storage --- dash-spv/tests/header_sync_test.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/dash-spv/tests/header_sync_test.rs b/dash-spv/tests/header_sync_test.rs index 8acb726c1..da1939966 100644 --- a/dash-spv/tests/header_sync_test.rs +++ b/dash-spv/tests/header_sync_test.rs @@ -376,6 +376,7 @@ async fn test_prepare_sync(sync_base_height: u32, header_count: usize) { let mut chain_state = ChainState::new_for_network(Network::Dash); chain_state.sync_base_height = sync_base_height; storage.store_chain_state(&chain_state).await.expect("Failed to store chain state"); + storage.store_headers(&headers).await.expect("Failed to store headers"); // Create HeaderSyncManager and load from storage let config = ClientConfig::new(Network::Dash); From 4a26a7f74950599f425997796fe479ac34480f41 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 18:39:39 +0000 Subject: [PATCH 16/19] removed genesis block creation in ChainState creation --- dash-spv/src/types.rs | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/dash-spv/src/types.rs b/dash-spv/src/types.rs index 2433111e8..5c6dcba3b 100644 --- a/dash-spv/src/types.rs +++ b/dash-spv/src/types.rs @@ -245,13 +245,10 @@ impl DetailedSyncProgress { /// /// ## Checkpoint Sync /// When syncing from a checkpoint (not genesis), `sync_base_height` is non-zero. -/// The `headers` vector contains headers starting from the checkpoint, not from genesis. -/// Use `tip_height()` to get the absolute blockchain height. /// /// ## Memory Considerations -/// - headers: ~80 bytes per header /// - filter_headers: 32 bytes per filter header -/// - At 2M blocks: ~160MB for headers, ~64MB for filter headers +/// - At 2M blocks: ~64MB for filter headers #[derive(Clone, Default)] pub struct ChainState { /// Filter headers indexed by height. @@ -286,31 +283,6 @@ impl ChainState { pub fn new_for_network(network: Network) -> Self { let mut state = Self::default(); - // Initialize with genesis block - let genesis_header = match network { - Network::Dash => { - // Use known genesis for mainnet - dashcore::blockdata::constants::genesis_block(network).header - } - Network::Testnet => { - // Use known genesis for testnet - dashcore::blockdata::constants::genesis_block(network).header - } - _ => { - // For other networks, use the existing genesis block function - dashcore::blockdata::constants::genesis_block(network).header - } - }; - - // Add genesis header to the chain state - // TODO: Check if this is necessary -> state.headers.push(genesis_header); - - tracing::debug!( - "Initialized ChainState with genesis block - network: {:?}, hash: {}", - network, - genesis_header.block_hash() - ); - // Initialize masternode engine for the network let mut engine = MasternodeListEngine::default_for_network(network); if let Some(genesis_hash) = network.known_genesis_block_hash() { From bc544a552c6b088eccc3110127214b26e0801576 Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 18:53:26 +0000 Subject: [PATCH 17/19] fixed clippy warnings --- dash-spv/src/chain/chainlock_manager.rs | 2 +- dash-spv/src/client/lifecycle.rs | 3 +-- dash-spv/src/sync/headers/manager.rs | 14 ++++++++------ dash-spv/tests/edge_case_filter_sync_test.rs | 2 +- dash-spv/tests/filter_header_verification_test.rs | 4 ++-- dash-spv/tests/rollback_test.rs | 4 ++-- 6 files changed, 15 insertions(+), 14 deletions(-) diff --git a/dash-spv/src/chain/chainlock_manager.rs b/dash-spv/src/chain/chainlock_manager.rs index a94020909..ddf2eea5e 100644 --- a/dash-spv/src/chain/chainlock_manager.rs +++ b/dash-spv/src/chain/chainlock_manager.rs @@ -178,7 +178,7 @@ impl ChainLockManager { if let Some(header) = storage .get_header(chain_lock.block_height) .await - .map_err(|e| ValidationError::StorageError(e))? + .map_err(ValidationError::StorageError)? { let header_hash = header.block_hash(); if header_hash != chain_lock.block_hash { diff --git a/dash-spv/src/client/lifecycle.rs b/dash-spv/src/client/lifecycle.rs index 44bc4691b..d4fcaf76e 100644 --- a/dash-spv/src/client/lifecycle.rs +++ b/dash-spv/src/client/lifecycle.rs @@ -191,8 +191,7 @@ impl< // Get initial header count from storage let (header_height, filter_height) = { let storage = self.storage.lock().await; - let h_height = - storage.get_tip_height().await.map_err(SpvError::Storage)?.unwrap_or(0); + let h_height = storage.get_tip_height().await.unwrap_or(0); let f_height = storage.get_filter_tip_height().await.map_err(SpvError::Storage)?.unwrap_or(0); (h_height, f_height) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index e230bf831..cbe6a0409 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -271,14 +271,16 @@ impl { @@ -660,12 +662,12 @@ impl { diff --git a/dash-spv/tests/edge_case_filter_sync_test.rs b/dash-spv/tests/edge_case_filter_sync_test.rs index 370cf88d8..c5d4760b5 100644 --- a/dash-spv/tests/edge_case_filter_sync_test.rs +++ b/dash-spv/tests/edge_case_filter_sync_test.rs @@ -144,7 +144,7 @@ async fn test_filter_sync_at_tip_edge_case() { storage.store_filter_headers(&filter_headers).await.unwrap(); // Verify initial state - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); let filter_tip_height = storage.get_filter_tip_height().await.unwrap().unwrap(); assert_eq!(tip_height, height - 1); // 0-indexed assert_eq!(filter_tip_height, height - 1); // 0-indexed diff --git a/dash-spv/tests/filter_header_verification_test.rs b/dash-spv/tests/filter_header_verification_test.rs index 0cb6a5fa5..e8753411e 100644 --- a/dash-spv/tests/filter_header_verification_test.rs +++ b/dash-spv/tests/filter_header_verification_test.rs @@ -197,7 +197,7 @@ async fn test_filter_header_verification_failure_reproduction() { let initial_headers = create_test_headers_range(1000, 5000); // Headers 1000-4999 storage.store_headers(&initial_headers).await.expect("Failed to store initial headers"); - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); println!("Initial header chain stored: tip height = {}", tip_height); assert_eq!(tip_height, 4999); @@ -361,7 +361,7 @@ async fn test_overlapping_batches_from_different_peers() { let initial_headers = create_test_headers_range(1, 3000); // Headers 1-2999 storage.store_headers(&initial_headers).await.expect("Failed to store initial headers"); - let tip_height = storage.get_tip_height().await.unwrap().unwrap(); + let tip_height = storage.get_tip_height().await.unwrap(); println!("Header chain stored: tip height = {}", tip_height); assert_eq!(tip_height, 2999); diff --git a/dash-spv/tests/rollback_test.rs b/dash-spv/tests/rollback_test.rs index d2424f972..7634648c6 100644 --- a/dash-spv/tests/rollback_test.rs +++ b/dash-spv/tests/rollback_test.rs @@ -42,7 +42,7 @@ async fn test_disk_storage_rollback() -> Result<(), Box> storage.store_headers(&headers).await?; // Verify we have 10 headers - let tip_height = storage.get_tip_height().await?; + let tip_height = storage.get_tip_height().await; assert_eq!(tip_height, Some(9)); // Load all headers to verify @@ -54,7 +54,7 @@ async fn test_disk_storage_rollback() -> Result<(), Box> // TODO: Test assertions commented out because rollback_to_height is not implemented // Verify tip height is now 5 - let _ = storage.get_tip_height().await?; + let _ = storage.get_tip_height().await; // assert_eq!(tip_height_after_rollback, Some(5)); // Verify we can only load headers up to height 5 From 4be7e6e9e4c43ef46d3fbecae4cac701d82eb93e Mon Sep 17 00:00:00 2001 From: Borja Castellano Date: Fri, 19 Dec 2025 19:50:14 +0000 Subject: [PATCH 18/19] dropped unuseed code --- dash-spv/src/sync/headers/manager.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 3ee55fea5..115d7d7a3 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -49,7 +49,6 @@ pub struct HeaderSyncManager { config: ClientConfig, tip_manager: ChainTipManager, checkpoint_manager: CheckpointManager, - reorg_config: ReorgConfig, chain_state: Arc>, // WalletState removed - wallet functionality is now handled externally headers2_state: Headers2StateManager, @@ -83,7 +82,6 @@ impl Date: Fri, 2 Jan 2026 21:45:15 +0000 Subject: [PATCH 19/19] code review suggestions --- dash-spv/src/storage/state.rs | 12 ++---------- dash-spv/src/sync/headers/manager.rs | 18 ++++++++++++++++++ dash-spv/tests/storage_test.rs | 6 ++++-- 3 files changed, 24 insertions(+), 12 deletions(-) diff --git a/dash-spv/src/storage/state.rs b/dash-spv/src/storage/state.rs index 559787e63..31f5fdda8 100644 --- a/dash-spv/src/storage/state.rs +++ b/dash-spv/src/storage/state.rs @@ -330,19 +330,11 @@ impl StorageManager for DiskStorageManager { } async fn get_header(&self, height: u32) -> StorageResult> { - if let Some(tip_height) = self.get_tip_height().await { - if height > tip_height { - return Ok(None); - } - } else { + if self.get_tip_height().await.is_none_or(|tip_height| height > tip_height) { return Ok(None); } - if let Some(start_height) = self.get_start_height().await { - if height < start_height { - return Ok(None); - } - } else { + if self.get_start_height().await.is_none_or(|start_height| height < start_height) { return Ok(None); } diff --git a/dash-spv/src/sync/headers/manager.rs b/dash-spv/src/sync/headers/manager.rs index 115d7d7a3..81e54a662 100644 --- a/dash-spv/src/sync/headers/manager.rs +++ b/dash-spv/src/sync/headers/manager.rs @@ -49,6 +49,7 @@ pub struct HeaderSyncManager { config: ClientConfig, tip_manager: ChainTipManager, checkpoint_manager: CheckpointManager, + reorg_config: ReorgConfig, chain_state: Arc>, // WalletState removed - wallet functionality is now handled externally headers2_state: Headers2StateManager, @@ -82,6 +83,7 @@ impl