From 543aefc9dc0449b13e47ab51a4160ac493422203 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Wed, 18 Feb 2026 04:18:32 +0100 Subject: [PATCH 1/4] feat: add `dashd` integration tests for SPV sync Adds comprehensive integration tests that verify SPV sync against a real `dashd` instance with pre-generated regtest blockchain: [dashpay/regtest-blockchain](https://github.com/dashpay/regtest-blockchain) - CI infrastructure: `contrib/setup-dashd.py` for cross-platform dashd/test-data setup, GitHub Actions caching and log retention on test failure - Shared test utilities in `dash-spv/src/test_utils/`: `DashCoreNode` (`dashd` process management, RPC), `DashdTestContext` (common setup), filesystem helpers - FFI test utilities in `dash-spv-ffi/src/test_utils/`: `CallbackTracker` (callback verification), `FFITestContext` (FFI client wrapper lifecycle management) - SPV tests: basic sync, empty wallet, multi-wallet, restart consistency, restart with fresh wallet, multiple restarts, random restarts, peer disconnection (exclusive and non-exclusive mode), incremental transactions (single block, across blocks) - FFI tests: wallet sync, incremental sync, restart consistency, all-callbacks verification, post-sync transaction and disconnect callbacks --- .github/workflows/build-and-test.yml | 33 ++ .github/workflows/sanitizer.yml | 2 + CLAUDE.md | 27 ++ contrib/setup-dashd.py | 160 +++++++ dash-spv-ffi/Cargo.toml | 7 +- dash-spv-ffi/src/lib.rs | 3 + dash-spv-ffi/src/test_utils/callbacks.rs | 421 +++++++++++++++++ dash-spv-ffi/src/test_utils/context.rs | 445 ++++++++++++++++++ dash-spv-ffi/src/test_utils/mod.rs | 11 + .../tests/callback_integration_test.rs | 375 +++++++++++++++ dash-spv-ffi/tests/dashd_ffi_sync_test.rs | 426 +++++++++++++++++ dash-spv/Cargo.toml | 6 +- dash-spv/src/test_utils/context.rs | 90 ++++ dash-spv/src/test_utils/fs_helpers.rs | 50 ++ dash-spv/src/test_utils/mod.rs | 13 + dash-spv/src/test_utils/node.rs | 330 +++++++++++++ dash-spv/tests/dashd_sync.rs | 16 + dash-spv/tests/dashd_sync/helpers.rs | 207 ++++++++ dash-spv/tests/dashd_sync/setup.rs | 358 ++++++++++++++ dash-spv/tests/dashd_sync/tests_basic.rs | 128 +++++ dash-spv/tests/dashd_sync/tests_disconnect.rs | 46 ++ dash-spv/tests/dashd_sync/tests_restart.rs | 192 ++++++++ .../tests/dashd_sync/tests_transaction.rs | 231 +++++++++ 23 files changed, 3575 insertions(+), 2 deletions(-) create mode 100755 contrib/setup-dashd.py create mode 100644 dash-spv-ffi/src/test_utils/callbacks.rs create mode 100644 dash-spv-ffi/src/test_utils/context.rs create mode 100644 dash-spv-ffi/src/test_utils/mod.rs create mode 100644 dash-spv-ffi/tests/callback_integration_test.rs create mode 100644 dash-spv-ffi/tests/dashd_ffi_sync_test.rs create mode 100644 dash-spv/src/test_utils/context.rs create mode 100644 dash-spv/src/test_utils/fs_helpers.rs create mode 100644 dash-spv/src/test_utils/node.rs create mode 100644 dash-spv/tests/dashd_sync.rs create mode 100644 dash-spv/tests/dashd_sync/helpers.rs create mode 100644 dash-spv/tests/dashd_sync/setup.rs create mode 100644 dash-spv/tests/dashd_sync/tests_basic.rs create mode 100644 dash-spv/tests/dashd_sync/tests_disconnect.rs create mode 100644 dash-spv/tests/dashd_sync/tests_restart.rs create mode 100644 dash-spv/tests/dashd_sync/tests_transaction.rs diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index 314603ca7..65ee36f76 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -17,6 +17,12 @@ on: permissions: contents: read +# Keep these defaults in sync with contrib/setup-dashd.py +env: + DASHVERSION: "23.1.0" + TEST_DATA_REPO: "dashpay/regtest-blockchain" + TEST_DATA_VERSION: "v0.0.2" + jobs: test: name: ${{ matrix.group }} @@ -41,8 +47,26 @@ jobs: uses: taiki-e/install-action@cargo-llvm-cov - run: pip install pyyaml + + # Set up dashd and test data for groups that need it + - name: Cache dashd and test data + if: matrix.group == 'spv' || matrix.group == 'ffi' + uses: actions/cache@v4 + with: + path: .rust-dashcore-test + key: rust-dashcore-test-${{ inputs.os }}-${{ env.DASHVERSION }}-${{ env.TEST_DATA_REPO }}-${{ env.TEST_DATA_VERSION }} + + - name: Setup dashd for integration tests + if: matrix.group == 'spv' || matrix.group == 'ffi' + env: + CACHE_DIR: ${{ github.workspace }}/.rust-dashcore-test + shell: bash + run: python contrib/setup-dashd.py >> "$GITHUB_ENV" + - name: Run tests id: tests + env: + DASHD_TEST_RETAIN_DIR: ${{ (matrix.group == 'spv' || matrix.group == 'ffi') && '/tmp/dashd-test-logs' || '' }} run: > python .github/scripts/ci_config.py run-group ${{ matrix.group }} --os ${{ inputs.os }} @@ -60,3 +84,12 @@ jobs: flags: ${{ steps.tests.outputs.crate_flags }} token: ${{ secrets.CODECOV_TOKEN }} fail_ci_if_error: true + + - name: Upload failed dashd test logs + if: failure() && (matrix.group == 'spv' || matrix.group == 'ffi') + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.group }}-test-logs-${{ inputs.os }} + path: /tmp/dashd-test-logs/ + retention-days: 7 + if-no-files-found: ignore diff --git a/.github/workflows/sanitizer.yml b/.github/workflows/sanitizer.yml index 81f9eb896..e500344ac 100644 --- a/.github/workflows/sanitizer.yml +++ b/.github/workflows/sanitizer.yml @@ -40,6 +40,7 @@ jobs: RUSTFLAGS: "-Zsanitizer=address -Cdebuginfo=2 -Cforce-frame-pointers=yes" ASAN_OPTIONS: "symbolize=1:allow_addr2line=1" LSAN_OPTIONS: "fast_unwind_on_malloc=0" + SKIP_DASHD_TESTS: 1 run: | # FFI crates (C interop) cargo +nightly test -Zbuild-std --target x86_64-unknown-linux-gnu \ @@ -63,6 +64,7 @@ jobs: RUST_BACKTRACE: 1 RUSTFLAGS: "-Zsanitizer=thread -Cdebuginfo=2" TSAN_OPTIONS: "second_deadlock_stack=1" + SKIP_DASHD_TESTS: 1 run: | # Async crate with concurrent code cargo +nightly test -Zbuild-std --target x86_64-unknown-linux-gnu \ diff --git a/CLAUDE.md b/CLAUDE.md index 2779b0cb6..a21637895 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -101,6 +101,33 @@ DO_LINT=true ./contrib/test.sh DO_FMT=true ./contrib/test.sh ``` +### Integration Tests (dashd) + +The `dash-spv` and `dash-spv-ffi` crates include integration tests that run against a real `dashd` regtest node. These tests cover SPV sync, wallet operations, restarts, disconnections, and transactions. + +**Setup:** `contrib/setup-dashd.py` downloads the dashd binary and regtest blockchain test data, caching them in `~/.rust-dashcore-test/`. It outputs the required environment variables. + +```bash +eval $(python3 contrib/setup-dashd.py) +``` + +**Running:** +```bash +cargo test -p dash-spv dashd_sync +cargo test -p dash-spv-ffi dashd_ffi_sync_test +SKIP_DASHD_TESTS=1 cargo test # skip when dashd is unavailable +``` + +**Debugging:** +- `DASHD_TEST_LOG=1` — enable per-test console logging (use with `--nocapture`) +- `DASHD_TEST_RETAIN_DIR=` — retain test data directories on failure +- `DASHD_TEST_RETAIN_ALWAYS=1` — retain even on success + +**Key files:** +- `dash-spv/tests/dashd_sync/` — test modules (basic, restart, disconnect, transaction) +- `dash-spv/src/test_utils/` — shared infrastructure (`DashdTestContext`, `DashCoreNode`) +- `.github/ci-groups.yml` — CI test group definitions (`spv` and `ffi` groups run dashd tests) + ## Development Commands ### Linting and Formatting diff --git a/contrib/setup-dashd.py b/contrib/setup-dashd.py new file mode 100755 index 000000000..d00eb11c7 --- /dev/null +++ b/contrib/setup-dashd.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +"""Cross-platform setup script for dashd and test blockchain data. + +Downloads the Dash Core binary and regtest test data for integration tests. +Outputs DASHD_PATH and DASHD_DATADIR lines suitable for appending to GITHUB_ENV +or evaluating in a shell. + +Environment variables: + DASHVERSION - Dash Core version (default: 23.1.0) + TEST_DATA_VERSION - Test data release version (default: v0.0.2) + TEST_DATA_REPO - GitHub repo for test data (default: dashpay/regtest-blockchain) + CACHE_DIR - Cache directory (default: ~/.rust-dashcore-test) +""" + +import os +import platform +import sys +import tarfile +import time +import urllib.request +import zipfile + +# Keep these defaults in sync with .github/workflows/build-and-test.yml +DASHVERSION = os.environ.get("DASHVERSION", "23.1.0") +TEST_DATA_VERSION = os.environ.get("TEST_DATA_VERSION", "v0.0.2") +TEST_DATA_REPO = os.environ.get("TEST_DATA_REPO", "dashpay/regtest-blockchain") + + +def get_cache_dir(): + if "CACHE_DIR" in os.environ: + return os.environ["CACHE_DIR"] + home = os.environ.get("HOME") or os.environ.get("USERPROFILE") + if not home: + sys.exit("Cannot determine home directory: neither HOME nor USERPROFILE is set") + return os.path.join(home, ".rust-dashcore-test") + + +def get_asset_info(): + """Return the asset filename for the current platform.""" + system = platform.system() + machine = platform.machine() + + if system == "Linux": + linux_archs = {"aarch64": "aarch64", "arm64": "aarch64", "x86_64": "x86_64", "amd64": "x86_64"} + arch = linux_archs.get(machine) + if not arch: + sys.exit(f"Unsupported Linux architecture: {machine}") + asset = f"dashcore-{DASHVERSION}-{arch}-linux-gnu.tar.gz" + elif system == "Darwin": + darwin_archs = {"arm64": "arm64", "x86_64": "x86_64"} + arch = darwin_archs.get(machine) + if not arch: + sys.exit(f"Unsupported macOS architecture: {machine}") + asset = f"dashcore-{DASHVERSION}-{arch}-apple-darwin.tar.gz" + elif system == "Windows": + asset = f"dashcore-{DASHVERSION}-win64.zip" + else: + sys.exit(f"Unsupported platform: {system}") + + return asset + + +def log(msg): + print(msg, file=sys.stderr) + + +def download(url, dest, timeout=300, retries=3): + for attempt in range(1, retries + 1): + try: + log(f"Downloading {url} (attempt {attempt}/{retries})...") + with urllib.request.urlopen(url, timeout=timeout) as response: + with open(dest, "wb") as f: + while chunk := response.read(8192): + f.write(chunk) + return + except Exception as e: + log(f"Download failed: {e}") + if attempt == retries: + sys.exit(f"Failed to download {url} after {retries} attempts") + time.sleep(5 * attempt) + + +def extract(archive_path, dest_dir): + if archive_path.endswith(".zip"): + with zipfile.ZipFile(archive_path, "r") as zf: + zf.extractall(dest_dir) + else: + with tarfile.open(archive_path, "r:gz") as tf: + tf.extractall(dest_dir, filter="data") + + +def setup_dashd(cache_dir): + """Download and extract dashd binary. Returns the path to the dashd binary.""" + asset = get_asset_info() + dashd_dir = os.path.join(cache_dir, f"dashcore-{DASHVERSION}") + + ext = ".exe" if platform.system() == "Windows" else "" + dashd_bin = os.path.join(dashd_dir, "bin", f"dashd{ext}") + + if os.path.isfile(dashd_bin): + log(f"dashd {DASHVERSION} already available") + return dashd_bin + + log(f"Downloading dashd {DASHVERSION}...") + archive_path = os.path.join(cache_dir, asset) + url = f"https://github.com/dashpay/dash/releases/download/v{DASHVERSION}/{asset}" + download(url, archive_path) + extract(archive_path, cache_dir) + os.remove(archive_path) + log(f"Downloaded dashd to {dashd_dir}") + + if not os.path.isfile(dashd_bin): + sys.exit(f"Expected binary not found after extraction: {dashd_bin}") + + return dashd_bin + + +def setup_test_data(cache_dir): + """Download and extract test blockchain data. Returns the datadir path.""" + test_data_dir = os.path.join( + cache_dir, f"regtest-blockchain-{TEST_DATA_VERSION}", "regtest-40000" + ) + blocks_dir = os.path.join(test_data_dir, "regtest", "blocks") + + if os.path.isdir(blocks_dir): + log(f"Test blockchain data {TEST_DATA_VERSION} already available") + return test_data_dir + + log(f"Downloading test blockchain data {TEST_DATA_VERSION}...") + parent_dir = os.path.join(cache_dir, f"regtest-blockchain-{TEST_DATA_VERSION}") + os.makedirs(parent_dir, exist_ok=True) + + archive_path = os.path.join(cache_dir, "regtest-40000.tar.gz") + url = f"https://github.com/{TEST_DATA_REPO}/releases/download/{TEST_DATA_VERSION}/regtest-40000.tar.gz" + download(url, archive_path) + extract(archive_path, parent_dir) + os.remove(archive_path) + + if not os.path.isdir(blocks_dir): + sys.exit(f"Expected blocks directory not found after extraction: {blocks_dir}") + + log(f"Downloaded test data to {test_data_dir}") + + return test_data_dir + + +def main(): + cache_dir = get_cache_dir() + os.makedirs(cache_dir, exist_ok=True) + + dashd_path = setup_dashd(cache_dir) + datadir = setup_test_data(cache_dir) + + # Output lines for GITHUB_ENV or shell eval + print(f"DASHD_PATH={dashd_path}") + print(f"DASHD_DATADIR={datadir}") + + +if __name__ == "__main__": + main() diff --git a/dash-spv-ffi/Cargo.toml b/dash-spv-ffi/Cargo.toml index 547c762f8..7cf150c27 100644 --- a/dash-spv-ffi/Cargo.toml +++ b/dash-spv-ffi/Cargo.toml @@ -31,9 +31,14 @@ key-wallet = { path = "../key-wallet" } key-wallet-manager = { path = "../key-wallet-manager" } rand = "0.8" clap = { version = "4.5", features = ["derive"] } +tempfile = { version = "3.8", optional = true } + +[features] +test-utils = ["dep:tempfile", "dash-spv/test-utils"] [dev-dependencies] -tempfile = "3.8" +dash-spv = { path = "../dash-spv", features = ["test-utils"] } +dash-spv-ffi = { path = ".", features = ["test-utils"] } serial_test = "3.0" env_logger = "0.10" diff --git a/dash-spv-ffi/src/lib.rs b/dash-spv-ffi/src/lib.rs index d53a16d56..95982b49e 100644 --- a/dash-spv-ffi/src/lib.rs +++ b/dash-spv-ffi/src/lib.rs @@ -14,6 +14,9 @@ pub use platform_integration::*; pub use types::*; pub use utils::*; +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; + // FFINetwork is now defined in types.rs for cbindgen compatibility // It must match the definition in key_wallet_ffi diff --git a/dash-spv-ffi/src/test_utils/callbacks.rs b/dash-spv-ffi/src/test_utils/callbacks.rs new file mode 100644 index 000000000..4f6d7c454 --- /dev/null +++ b/dash-spv-ffi/src/test_utils/callbacks.rs @@ -0,0 +1,421 @@ +//! FFI callback implementations and tracker for integration tests. + +use std::ffi::CStr; +use std::os::raw::{c_char, c_void}; +use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; + +use crate::*; + +/// Tracks callback invocations for verification. +/// +/// Fields are updated atomically from FFI callbacks and read in test assertions. +#[derive(Default)] +pub struct CallbackTracker { + // Sync event tracking + pub sync_start_count: AtomicU32, + pub block_headers_stored_count: AtomicU32, + pub block_header_sync_complete_count: AtomicU32, + pub filter_headers_stored_count: AtomicU32, + pub filter_headers_sync_complete_count: AtomicU32, + pub filters_stored_count: AtomicU32, + pub filters_sync_complete_count: AtomicU32, + pub blocks_needed_count: AtomicU32, + pub block_processed_count: AtomicU32, + pub masternode_state_updated_count: AtomicU32, + pub chainlock_received_count: AtomicU32, + pub instantlock_received_count: AtomicU32, + pub manager_error_count: AtomicU32, + pub sync_complete_count: AtomicU32, + + // Network event tracking + pub peer_connected_count: AtomicU32, + pub peer_disconnected_count: AtomicU32, + pub peers_updated_count: AtomicU32, + + // Wallet event tracking + pub transaction_received_count: AtomicU32, + pub balance_updated_count: AtomicU32, + + // Data from callbacks + pub last_header_tip: AtomicU32, + pub last_filter_tip: AtomicU32, + pub last_connected_peer_count: AtomicU32, + pub last_best_height: AtomicU32, + pub connected_peers: Mutex>, + pub errors: Mutex>, + + // Transaction data from on_transaction_received + pub received_txids: Mutex>, + pub received_amounts: Mutex>, + + // Balance data from on_balance_updated + pub last_spendable: AtomicU64, + pub last_unconfirmed: AtomicU64, + + // Lifecycle ordering via global sequence counter + pub sequence_counter: AtomicU32, + pub sync_start_seq: AtomicU32, + pub header_complete_seq: AtomicU32, + pub filter_header_complete_seq: AtomicU32, + pub filters_sync_complete_seq: AtomicU32, + pub sync_complete_seq: AtomicU32, + + // Filter header range validation: (start, end, tip) + pub filter_header_ranges: Mutex>, + + // Block processed heights + pub processed_block_heights: Mutex>, + + // Completion tracking + pub last_sync_cycle: AtomicU32, + + // Baseline for `wait_for_sync`: captured before the client starts so that + // a SyncComplete firing between client start and `wait_for_sync` entry is + // not missed. + pub sync_count_baseline: AtomicU32, +} + +impl CallbackTracker { + /// Assert that no errors were recorded during sync. + pub fn assert_no_errors(&self) { + let errors = self.errors.lock().unwrap(); + assert!(errors.is_empty(), "Unexpected sync errors: {:?}", *errors); + } +} + +/// Extract the `CallbackTracker` reference from a `user_data` pointer. +/// Returns `None` if the pointer is null. +/// +/// # Safety +/// +/// The pointer must point to a valid, live `CallbackTracker` +/// (e.g. obtained via `Arc::as_ptr`). +unsafe fn tracker_from(user_data: *mut c_void) -> Option<&'static CallbackTracker> { + if user_data.is_null() { + None + } else { + Some(&*(user_data as *const CallbackTracker)) + } +} + +/// Convert a nullable C string pointer to an owned `String`. +/// Returns `"Unknown"` if the pointer is null. +/// +/// # Safety +/// +/// The pointer must point to a valid, null-terminated C string if non-null. +unsafe fn cstr_or_unknown(ptr: *const c_char) -> String { + if ptr.is_null() { + "Unknown".to_string() + } else { + CStr::from_ptr(ptr).to_string_lossy().into_owned() + } +} + +extern "C" fn on_sync_start(manager_id: FFIManagerId, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.sync_start_count.fetch_add(1, Ordering::SeqCst); + let seq = tracker.sequence_counter.fetch_add(1, Ordering::SeqCst); + tracker.sync_start_seq.store(seq, Ordering::SeqCst); + tracing::debug!("on_sync_start: manager={:?}, seq={}", manager_id, seq); +} + +extern "C" fn on_block_headers_stored(tip_height: u32, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.block_headers_stored_count.fetch_add(1, Ordering::SeqCst); + tracker.last_header_tip.store(tip_height, Ordering::SeqCst); + tracing::debug!("on_block_headers_stored: tip={}", tip_height); +} + +extern "C" fn on_block_header_sync_complete(tip_height: u32, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.block_header_sync_complete_count.fetch_add(1, Ordering::SeqCst); + tracker.last_header_tip.store(tip_height, Ordering::SeqCst); + let seq = tracker.sequence_counter.fetch_add(1, Ordering::SeqCst); + tracker.header_complete_seq.store(seq, Ordering::SeqCst); + tracing::info!("on_block_header_sync_complete: tip={}, seq={}", tip_height, seq); +} + +extern "C" fn on_filter_headers_stored( + start_height: u32, + end_height: u32, + tip_height: u32, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.filter_headers_stored_count.fetch_add(1, Ordering::SeqCst); + tracker.last_filter_tip.store(tip_height, Ordering::SeqCst); + if let Ok(mut ranges) = tracker.filter_header_ranges.lock() { + ranges.push((start_height, end_height, tip_height)); + } + tracing::debug!( + "on_filter_headers_stored: start={}, end={}, tip={}", + start_height, + end_height, + tip_height + ); +} + +extern "C" fn on_filter_headers_sync_complete(tip_height: u32, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.filter_headers_sync_complete_count.fetch_add(1, Ordering::SeqCst); + tracker.last_filter_tip.store(tip_height, Ordering::SeqCst); + let seq = tracker.sequence_counter.fetch_add(1, Ordering::SeqCst); + tracker.filter_header_complete_seq.store(seq, Ordering::SeqCst); + tracing::info!("on_filter_headers_sync_complete: tip={}, seq={}", tip_height, seq); +} + +extern "C" fn on_filters_stored(start_height: u32, end_height: u32, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.filters_stored_count.fetch_add(1, Ordering::SeqCst); + tracing::debug!("on_filters_stored: {}-{}", start_height, end_height); +} + +extern "C" fn on_filters_sync_complete(tip_height: u32, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.filters_sync_complete_count.fetch_add(1, Ordering::SeqCst); + tracker.last_filter_tip.store(tip_height, Ordering::SeqCst); + let seq = tracker.sequence_counter.fetch_add(1, Ordering::SeqCst); + tracker.filters_sync_complete_seq.store(seq, Ordering::SeqCst); + tracing::info!("on_filters_sync_complete: tip={}, seq={}", tip_height, seq); +} + +extern "C" fn on_blocks_needed( + _blocks: *const crate::FFIBlockNeeded, + count: u32, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.blocks_needed_count.fetch_add(1, Ordering::SeqCst); + tracing::debug!("on_blocks_needed: count={}", count); +} + +extern "C" fn on_block_processed( + height: u32, + _hash: *const [u8; 32], + new_address_count: u32, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.block_processed_count.fetch_add(1, Ordering::SeqCst); + if let Ok(mut heights) = tracker.processed_block_heights.lock() { + heights.push(height); + } + tracing::debug!("on_block_processed: height={}, new_addresses={}", height, new_address_count); +} + +extern "C" fn on_masternode_state_updated(height: u32, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.masternode_state_updated_count.fetch_add(1, Ordering::SeqCst); + tracing::debug!("on_masternode_state_updated: height={}", height); +} + +extern "C" fn on_chainlock_received( + height: u32, + _hash: *const [u8; 32], + _signature: *const [u8; 96], + validated: bool, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.chainlock_received_count.fetch_add(1, Ordering::SeqCst); + tracing::info!("on_chainlock_received: height={}, validated={}", height, validated); +} + +extern "C" fn on_instantlock_received( + _txid: *const [u8; 32], + _instantlock_data: *const u8, + _instantlock_len: usize, + validated: bool, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.instantlock_received_count.fetch_add(1, Ordering::SeqCst); + tracing::debug!("on_instantlock_received: validated={}", validated); +} + +extern "C" fn on_manager_error( + manager_id: FFIManagerId, + error: *const c_char, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.manager_error_count.fetch_add(1, Ordering::SeqCst); + let error_str = unsafe { cstr_or_unknown(error) }; + tracing::error!("on_manager_error: manager={:?}, error={}", manager_id, error_str); + tracker.errors.lock().unwrap_or_else(|e| e.into_inner()).push(error_str); +} + +extern "C" fn on_sync_complete(header_tip: u32, cycle: u32, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.sync_complete_count.fetch_add(1, Ordering::SeqCst); + tracker.last_header_tip.store(header_tip, Ordering::SeqCst); + tracker.last_sync_cycle.store(cycle, Ordering::SeqCst); + let seq = tracker.sequence_counter.fetch_add(1, Ordering::SeqCst); + tracker.sync_complete_seq.store(seq, Ordering::SeqCst); + tracing::info!("on_sync_complete: header_tip={}, cycle={}, seq={}", header_tip, cycle, seq); +} + +extern "C" fn on_peer_connected(address: *const c_char, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.peer_connected_count.fetch_add(1, Ordering::SeqCst); + let addr_str = unsafe { cstr_or_unknown(address) }; + tracing::info!("on_peer_connected: {}", addr_str); + if let Ok(mut peers) = tracker.connected_peers.lock() { + peers.push(addr_str); + } +} + +extern "C" fn on_peer_disconnected(address: *const c_char, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.peer_disconnected_count.fetch_add(1, Ordering::SeqCst); + let addr_str = unsafe { cstr_or_unknown(address) }; + tracing::info!("on_peer_disconnected: {}", addr_str); +} + +extern "C" fn on_peers_updated(connected_count: u32, best_height: u32, user_data: *mut c_void) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.peers_updated_count.fetch_add(1, Ordering::SeqCst); + tracker.last_connected_peer_count.store(connected_count, Ordering::SeqCst); + tracker.last_best_height.store(best_height, Ordering::SeqCst); + tracing::debug!("on_peers_updated: connected={}, best_height={}", connected_count, best_height); +} + +extern "C" fn on_transaction_received( + wallet_id: *const c_char, + account_index: u32, + txid: *const [u8; 32], + amount: i64, + _addresses: *const c_char, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.transaction_received_count.fetch_add(1, Ordering::SeqCst); + if !txid.is_null() { + let txid_bytes = unsafe { *txid }; + if let Ok(mut txids) = tracker.received_txids.lock() { + txids.push(txid_bytes); + } + } + if let Ok(mut amounts) = tracker.received_amounts.lock() { + amounts.push(amount); + } + let wallet_str = unsafe { cstr_or_unknown(wallet_id) }; + tracing::info!( + "on_transaction_received: wallet={}, account={}, amount={}", + wallet_str, + account_index, + amount + ); +} + +extern "C" fn on_balance_updated( + wallet_id: *const c_char, + spendable: u64, + unconfirmed: u64, + immature: u64, + locked: u64, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + tracker.balance_updated_count.fetch_add(1, Ordering::SeqCst); + tracker.last_spendable.store(spendable, Ordering::SeqCst); + tracker.last_unconfirmed.store(unconfirmed, Ordering::SeqCst); + let wallet_str = unsafe { cstr_or_unknown(wallet_id) }; + tracing::info!( + "on_balance_updated: wallet={}, spendable={}, unconfirmed={}, immature={}, locked={}", + wallet_str, + spendable, + unconfirmed, + immature, + locked, + ); +} + +/// Create sync callbacks with all event handlers wired to the tracker. +/// +/// The `user_data` pointer borrows the tracker Arc. The caller must ensure the +/// Arc outlives all callback invocations (i.e. stop the client before dropping it). +pub fn create_sync_callbacks(tracker: &Arc) -> FFISyncEventCallbacks { + FFISyncEventCallbacks { + on_sync_start: Some(on_sync_start), + on_block_headers_stored: Some(on_block_headers_stored), + on_block_header_sync_complete: Some(on_block_header_sync_complete), + on_filter_headers_stored: Some(on_filter_headers_stored), + on_filter_headers_sync_complete: Some(on_filter_headers_sync_complete), + on_filters_stored: Some(on_filters_stored), + on_filters_sync_complete: Some(on_filters_sync_complete), + on_blocks_needed: Some(on_blocks_needed), + on_block_processed: Some(on_block_processed), + on_masternode_state_updated: Some(on_masternode_state_updated), + on_chainlock_received: Some(on_chainlock_received), + on_instantlock_received: Some(on_instantlock_received), + on_manager_error: Some(on_manager_error), + on_sync_complete: Some(on_sync_complete), + user_data: Arc::as_ptr(tracker) as *mut c_void, + } +} + +/// Create network event callbacks wired to the tracker. +/// +/// The `user_data` pointer borrows the tracker Arc. The caller must ensure the +/// Arc outlives all callback invocations. +pub fn create_network_callbacks(tracker: &Arc) -> FFINetworkEventCallbacks { + FFINetworkEventCallbacks { + on_peer_connected: Some(on_peer_connected), + on_peer_disconnected: Some(on_peer_disconnected), + on_peers_updated: Some(on_peers_updated), + user_data: Arc::as_ptr(tracker) as *mut c_void, + } +} + +/// Create wallet event callbacks wired to the tracker. +/// +/// The `user_data` pointer borrows the tracker Arc. The caller must ensure the +/// Arc outlives all callback invocations. +pub fn create_wallet_callbacks(tracker: &Arc) -> FFIWalletEventCallbacks { + FFIWalletEventCallbacks { + on_transaction_received: Some(on_transaction_received), + on_balance_updated: Some(on_balance_updated), + user_data: Arc::as_ptr(tracker) as *mut c_void, + } +} diff --git a/dash-spv-ffi/src/test_utils/context.rs b/dash-spv-ffi/src/test_utils/context.rs new file mode 100644 index 000000000..5b451a81f --- /dev/null +++ b/dash-spv-ffi/src/test_utils/context.rs @@ -0,0 +1,445 @@ +//! FFI test context for integration tests. + +use std::collections::HashSet; +use std::ffi::{CStr, CString}; +use std::path::PathBuf; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::time::Duration; + +use crate::client::{ + dash_spv_ffi_client_destroy, dash_spv_ffi_client_get_wallet_manager, dash_spv_ffi_client_new, + dash_spv_ffi_client_run, dash_spv_ffi_client_set_network_event_callbacks, + dash_spv_ffi_client_set_sync_event_callbacks, dash_spv_ffi_client_set_wallet_event_callbacks, + dash_spv_ffi_client_stop, dash_spv_ffi_wallet_manager_free, FFIDashSpvClient, +}; +use crate::config::{ + dash_spv_ffi_config_add_peer, dash_spv_ffi_config_destroy, dash_spv_ffi_config_new, + dash_spv_ffi_config_set_data_dir, dash_spv_ffi_config_set_masternode_sync_enabled, + dash_spv_ffi_config_set_restrict_to_configured_peers, FFIClientConfig, +}; +use crate::types::FFIWalletManager as FFIWalletManagerOpaque; +use dash_spv::logging::{LogFileConfig, LoggingConfig, LoggingGuard}; +use dash_spv::test_utils::{retain_test_dir, SYNC_TIMEOUT}; +use dashcore::hashes::Hash; +use dashcore::{Address, Txid}; +use key_wallet_ffi::managed_account::{ + managed_core_account_free, managed_core_account_free_transactions, + managed_core_account_get_transaction_count, managed_core_account_get_transactions, + managed_wallet_get_account, FFIManagedCoreAccount, FFITransactionRecord, +}; +use key_wallet_ffi::managed_wallet::{ + managed_wallet_get_next_bip44_receive_address, managed_wallet_info_free, +}; +use key_wallet_ffi::types::FFIAccountType; +use key_wallet_ffi::wallet::wallet_free_const; +use key_wallet_ffi::wallet_manager::{ + wallet_manager_add_wallet_from_mnemonic, wallet_manager_get_managed_wallet_info, +}; +use key_wallet_ffi::{ + wallet_manager_free_string, wallet_manager_free_wallet_ids, wallet_manager_get_wallet, + wallet_manager_get_wallet_balance, wallet_manager_get_wallet_ids, FFIError, FFINetwork, + FFIWalletManager, +}; +use tempfile::TempDir; + +use super::callbacks::{ + create_network_callbacks, create_sync_callbacks, create_wallet_callbacks, CallbackTracker, +}; + +/// State that stays fixed across client restarts (temp dir, logging, config). +struct FixedState { + _temp_dir: TempDir, + _log_guard: LoggingGuard, + storage_dir: PathBuf, + config: *mut FFIClientConfig, +} + +impl Drop for FixedState { + fn drop(&mut self) { + retain_test_dir(&self.storage_dir, "spv"); + unsafe { + dash_spv_ffi_config_destroy(self.config); + } + } +} + +/// Per-session FFI state (client, wallet_manager, tracker). Recreated on restart. +struct SessionState { + client: *mut FFIDashSpvClient, + wallet_manager: *mut FFIWalletManagerOpaque, + tracker: Arc, +} + +impl Drop for SessionState { + fn drop(&mut self) { + unsafe { + dash_spv_ffi_client_stop(self.client); + dash_spv_ffi_wallet_manager_free(self.wallet_manager); + dash_spv_ffi_client_destroy(self.client); + } + } +} + +/// Shared FFI test context. +/// +/// Split into `FixedState` (stays fixed across restarts) and `SessionState` +/// (recreated on restart). +pub struct FFITestContext { + fixed: FixedState, + session: SessionState, +} + +impl FFITestContext { + /// Create a new FFI test context connected to the given peer. + /// + /// # Safety + /// + /// Calls FFI functions that allocate and configure opaque pointers. + pub unsafe fn new(peer_addr: std::net::SocketAddr) -> Self { + let temp_dir = TempDir::new().expect("Failed to create temp dir"); + let storage_dir = temp_dir.path().to_path_buf(); + let log_dir = storage_dir.join("logs"); + + let log_guard = dash_spv::init_logging(LoggingConfig { + level: Some(dash_spv::LevelFilter::DEBUG), + console: std::env::var("DASHD_TEST_LOG").is_ok(), + file: Some(LogFileConfig { + log_dir, + max_files: 1, + }), + thread_local: true, + }) + .expect("Failed to initialize test logging"); + + let config = dash_spv_ffi_config_new(FFINetwork::Regtest); + assert!(!config.is_null(), "Failed to create FFI config"); + + let path = CString::new(temp_dir.path().to_str().unwrap()).unwrap(); + let result = dash_spv_ffi_config_set_data_dir(config, path.as_ptr()); + assert_eq!(result, 0, "Failed to set data dir"); + + let result = dash_spv_ffi_config_set_masternode_sync_enabled(config, false); + assert_eq!(result, 0, "Failed to disable masternode sync"); + + let peer_str = CString::new(peer_addr.to_string()).unwrap(); + let result = dash_spv_ffi_config_add_peer(config, peer_str.as_ptr()); + assert_eq!(result, 0, "Failed to add peer"); + + let result = dash_spv_ffi_config_set_restrict_to_configured_peers(config, true); + assert_eq!(result, 0, "Failed to restrict peers"); + + let client = dash_spv_ffi_client_new(config); + assert!(!client.is_null(), "Failed to create FFI client"); + + let wallet_manager = dash_spv_ffi_client_get_wallet_manager(client); + assert!(!wallet_manager.is_null(), "Failed to get wallet manager"); + + FFITestContext { + fixed: FixedState { + _temp_dir: temp_dir, + _log_guard: log_guard, + storage_dir, + config, + }, + session: SessionState { + client, + wallet_manager, + tracker: Arc::new(CallbackTracker::default()), + }, + } + } + + /// The FFI client pointer. + pub fn client(&self) -> *mut FFIDashSpvClient { + self.session.client + } + + /// The callback tracker. + pub fn tracker(&self) -> &Arc { + &self.session.tracker + } + + /// Add a wallet from mnemonic via FFI. + /// + /// # Safety + /// + /// Calls FFI wallet functions through raw pointers held by the context. + pub unsafe fn add_wallet(&self, mnemonic: &str) -> Vec { + let mnemonic_c = CString::new(mnemonic).unwrap(); + let passphrase = CString::new("").unwrap(); + let mut error = FFIError::success(); + let wm = self.session.wallet_manager as *mut FFIWalletManager; + + let success = wallet_manager_add_wallet_from_mnemonic( + wm, + mnemonic_c.as_ptr(), + passphrase.as_ptr(), + &mut error, + ); + if !success { + let error_msg = if !error.message.is_null() { + CStr::from_ptr(error.message).to_str().unwrap_or("Unknown error") + } else { + "No error message" + }; + panic!("Failed to add wallet from mnemonic: code={:?}, msg={}", error.code, error_msg); + } + + let mut wallet_ids_ptr: *mut u8 = std::ptr::null_mut(); + let mut wallet_count: usize = 0; + let success = + wallet_manager_get_wallet_ids(wm, &mut wallet_ids_ptr, &mut wallet_count, &mut error); + assert!(success && wallet_count > 0, "Failed to get wallet IDs"); + + let wallet_id = std::slice::from_raw_parts(wallet_ids_ptr, 32).to_vec(); + wallet_manager_free_wallet_ids(wallet_ids_ptr, wallet_count); + wallet_id + } + + /// Get wallet balance via FFI. Returns (confirmed, unconfirmed). + /// + /// # Safety + /// + /// Calls FFI wallet functions through raw pointers held by the context. + pub unsafe fn get_wallet_balance(&self, wallet_id: &[u8]) -> (u64, u64) { + let mut confirmed: u64 = 0; + let mut unconfirmed: u64 = 0; + let mut error = FFIError::success(); + let wm = self.session.wallet_manager as *mut FFIWalletManager; + + let success = wallet_manager_get_wallet_balance( + wm, + wallet_id.as_ptr(), + &mut confirmed, + &mut unconfirmed, + &mut error, + ); + assert!(success, "Failed to get wallet balance"); + (confirmed, unconfirmed) + } + + /// Set up sync event callbacks and run the client. + /// + /// # Safety + /// + /// Calls FFI client functions through raw pointers held by the context. + pub unsafe fn run_with_sync_callbacks(&self) { + let sync_callbacks = create_sync_callbacks(&self.session.tracker); + let result = + dash_spv_ffi_client_set_sync_event_callbacks(self.session.client, sync_callbacks); + assert_eq!(result, 0, "Failed to set sync event callbacks"); + + self.snapshot_sync_baseline(); + let result = dash_spv_ffi_client_run(self.session.client); + assert_eq!(result, 0, "Failed to run FFI client"); + } + + /// Set up sync, network, and wallet event callbacks, then run the client. + /// + /// # Safety + /// + /// Calls FFI client functions through raw pointers held by the context. + pub unsafe fn run_with_all_callbacks(&self) { + let sync_cbs = create_sync_callbacks(&self.session.tracker); + let network_cbs = create_network_callbacks(&self.session.tracker); + let wallet_cbs = create_wallet_callbacks(&self.session.tracker); + + let result = dash_spv_ffi_client_set_sync_event_callbacks(self.session.client, sync_cbs); + assert_eq!(result, 0, "Failed to set sync event callbacks"); + let result = + dash_spv_ffi_client_set_network_event_callbacks(self.session.client, network_cbs); + assert_eq!(result, 0, "Failed to set network event callbacks"); + let result = + dash_spv_ffi_client_set_wallet_event_callbacks(self.session.client, wallet_cbs); + assert_eq!(result, 0, "Failed to set wallet event callbacks"); + + self.snapshot_sync_baseline(); + let result = dash_spv_ffi_client_run(self.session.client); + assert_eq!(result, 0, "Failed to run FFI client"); + } + + /// Captures the current `sync_complete_count` as the baseline for the next + /// `wait_for_sync` call. Called automatically by the `run_*` methods before + /// starting the client, and by `wait_for_sync` after each successful wait. + fn snapshot_sync_baseline(&self) { + let current = self.session.tracker.sync_complete_count.load(Ordering::SeqCst); + self.session.tracker.sync_count_baseline.store(current, Ordering::SeqCst); + } + + /// Polls until a new `SyncComplete` event fires with both header and filter + /// tips at or above `expected_height`. + pub fn wait_for_sync(&self, expected_height: u32) { + let baseline = self.session.tracker.sync_count_baseline.load(Ordering::SeqCst); + let start = std::time::Instant::now(); + + loop { + let sync_fired = + self.session.tracker.sync_complete_count.load(Ordering::SeqCst) > baseline; + let current_header = self.session.tracker.last_header_tip.load(Ordering::SeqCst); + let current_filter = self.session.tracker.last_filter_tip.load(Ordering::SeqCst); + + if sync_fired && current_header >= expected_height && current_filter >= expected_height + { + self.snapshot_sync_baseline(); + break; + } + + assert!( + start.elapsed() < SYNC_TIMEOUT, + "Sync did not complete within {:?} (headers={}/{}, filters={}/{})", + SYNC_TIMEOUT, + current_header, + expected_height, + current_filter, + expected_height, + ); + + std::thread::sleep(Duration::from_millis(50)); + } + } + + /// Get a receive address for the given wallet via FFI. + /// + /// # Safety + /// + /// Calls FFI wallet functions through raw pointers held by the context. + pub unsafe fn get_receive_address(&self, wallet_id: &[u8]) -> Address { + let mut error = FFIError::success(); + let wm = self.session.wallet_manager as *mut FFIWalletManager; + + let ffi_wallet = wallet_manager_get_wallet(wm, wallet_id.as_ptr(), &mut error); + assert!(!ffi_wallet.is_null(), "Failed to get FFI wallet"); + + let ffi_info = wallet_manager_get_managed_wallet_info(wm, wallet_id.as_ptr(), &mut error); + assert!(!ffi_info.is_null(), "Failed to get FFI managed wallet info"); + + let addr_ptr = + managed_wallet_get_next_bip44_receive_address(ffi_info, ffi_wallet, 0, &mut error); + assert!(!addr_ptr.is_null(), "Failed to get receive address"); + + let addr_str = CStr::from_ptr(addr_ptr).to_str().unwrap(); + let address = addr_str.parse::>().unwrap().assume_checked(); + wallet_manager_free_string(addr_ptr); + + managed_wallet_info_free(ffi_info); + wallet_free_const(ffi_wallet); + + address + } + + /// Get the BIP44 account 0 for a wallet, call the provided closure, then free the account. + /// + /// # Safety + /// + /// Calls FFI managed account functions through raw pointers. + unsafe fn with_bip44_account( + &self, + wallet_id: &[u8], + f: impl FnOnce(*const FFIManagedCoreAccount) -> T, + ) -> T { + let wm = self.session.wallet_manager as *const FFIWalletManager; + let result = + managed_wallet_get_account(wm, wallet_id.as_ptr(), 0, FFIAccountType::StandardBIP44); + assert!( + result.error_code == 0 && !result.account.is_null(), + "Failed to get BIP44 account 0" + ); + let value = f(result.account); + managed_core_account_free(result.account); + value + } + + /// Get the number of transactions in the BIP44 account 0 for a wallet. + /// + /// # Safety + /// + /// Calls FFI managed account functions through raw pointers. + pub unsafe fn transaction_count(&self, wallet_id: &[u8]) -> usize { + self.with_bip44_account(wallet_id, |account| { + managed_core_account_get_transaction_count(account) as usize + }) + } + + /// Check whether the BIP44 account 0 contains a specific transaction. + /// + /// # Safety + /// + /// Calls FFI managed account functions through raw pointers. + pub unsafe fn has_transaction(&self, wallet_id: &[u8], txid: &Txid) -> bool { + self.with_bip44_account(wallet_id, |account| { + let mut txs_ptr: *mut FFITransactionRecord = std::ptr::null_mut(); + let mut count: usize = 0; + let ok = managed_core_account_get_transactions(account, &mut txs_ptr, &mut count); + assert!(ok, "Failed to get transactions"); + + let found = if count > 0 && !txs_ptr.is_null() { + let txs = std::slice::from_raw_parts(txs_ptr, count); + let target = txid.to_byte_array(); + txs.iter().any(|t| t.txid == target) + } else { + false + }; + + managed_core_account_free_transactions(txs_ptr, count); + found + }) + } + + /// Collect all transaction IDs from BIP44 account 0 as hex strings (display order). + /// + /// # Safety + /// + /// Calls FFI managed account functions through raw pointers. + pub unsafe fn wallet_txids(&self, wallet_id: &[u8]) -> HashSet { + self.with_bip44_account(wallet_id, |account| { + let mut txs_ptr: *mut FFITransactionRecord = std::ptr::null_mut(); + let mut count: usize = 0; + let ok = managed_core_account_get_transactions(account, &mut txs_ptr, &mut count); + assert!(ok, "Failed to get transactions"); + + let mut txids = HashSet::new(); + if count > 0 && !txs_ptr.is_null() { + let txs = std::slice::from_raw_parts(txs_ptr, count); + for t in txs { + // Reverse bytes for display order (internal is little-endian) + let txid = Txid::from_byte_array(t.txid); + txids.insert(txid.to_string()); + } + } + + managed_core_account_free_transactions(txs_ptr, count); + txids + }) + } + + /// Stop the client and recreate it with the same config and storage. + /// + /// Resets the tracker and returns the new context. The wallet must be + /// re-added after calling this. + /// + /// # Safety + /// + /// Calls FFI client functions through raw pointers held by the context. + pub unsafe fn restart(self) -> Self { + let fixed = self.fixed; + // Drop the session (stops client, frees wallet manager, destroys client) + drop(self.session); + + // Recreate client from same config (same storage dir and peers) + let client = dash_spv_ffi_client_new(fixed.config); + assert!(!client.is_null(), "Failed to recreate FFI client"); + + let wallet_manager = dash_spv_ffi_client_get_wallet_manager(client); + assert!(!wallet_manager.is_null(), "Failed to get wallet manager after restart"); + + FFITestContext { + fixed, + session: SessionState { + client, + wallet_manager, + tracker: Arc::new(CallbackTracker::default()), + }, + } + } +} diff --git a/dash-spv-ffi/src/test_utils/mod.rs b/dash-spv-ffi/src/test_utils/mod.rs new file mode 100644 index 000000000..04b22b4ea --- /dev/null +++ b/dash-spv-ffi/src/test_utils/mod.rs @@ -0,0 +1,11 @@ +//! Shared test infrastructure for FFI integration tests. +//! +//! Provides reusable context, callbacks, and helpers that FFI integration tests share. +//! Gated behind the `test-utils` feature so integration tests can import via +//! `dash_spv_ffi::test_utils`. + +pub mod callbacks; +pub mod context; + +pub use callbacks::*; +pub use context::*; diff --git a/dash-spv-ffi/tests/callback_integration_test.rs b/dash-spv-ffi/tests/callback_integration_test.rs new file mode 100644 index 000000000..86100c0b6 --- /dev/null +++ b/dash-spv-ffi/tests/callback_integration_test.rs @@ -0,0 +1,375 @@ +//! Integration test for FFI event callbacks. +//! +//! This test verifies all three callback structs work correctly in a real sync scenario: +//! - FFISyncEventCallbacks +//! - FFINetworkEventCallbacks +//! - FFIWalletEventCallbacks + +use dash_spv::test_utils::DashdTestContext; +use dash_spv_ffi::test_utils::FFITestContext; +use dashcore::hashes::Hash; +use dashcore::Amount; +use std::sync::atomic::Ordering; +use std::time::Duration; + +#[test] +fn test_all_callbacks_during_sync() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + return; + }; + + unsafe { + let ctx = FFITestContext::new(dashd.addr); + let tracker = ctx.tracker().clone(); + + ctx.add_wallet(&dashd.wallet.mnemonic); + ctx.run_with_all_callbacks(); + tracing::info!("FFI client running with all callback types"); + + ctx.wait_for_sync(dashd.initial_height); + + // Validate sync event callbacks + let sync_start = tracker.sync_start_count.load(Ordering::SeqCst); + let headers_stored = tracker.block_headers_stored_count.load(Ordering::SeqCst); + let header_complete = tracker.block_header_sync_complete_count.load(Ordering::SeqCst); + let filter_headers_stored = tracker.filter_headers_stored_count.load(Ordering::SeqCst); + let filter_header_complete = + tracker.filter_headers_sync_complete_count.load(Ordering::SeqCst); + let filters_stored = tracker.filters_stored_count.load(Ordering::SeqCst); + let filters_sync_complete = tracker.filters_sync_complete_count.load(Ordering::SeqCst); + let blocks_needed = tracker.blocks_needed_count.load(Ordering::SeqCst); + let block_processed = tracker.block_processed_count.load(Ordering::SeqCst); + let sync_complete = tracker.sync_complete_count.load(Ordering::SeqCst); + + tracing::info!("Callback Summary"); + tracing::info!( + "Sync: start={}, headers_stored={}, header_complete={}, filter_headers={}, \ + filter_complete={}, filters_stored={}, filters_sync={}, blocks_needed={}, \ + block_processed={}, sync_complete={}", + sync_start, + headers_stored, + header_complete, + filter_headers_stored, + filter_header_complete, + filters_stored, + filters_sync_complete, + blocks_needed, + block_processed, + sync_complete + ); + + assert!(sync_start > 0, "on_sync_start should have been called"); + assert!(headers_stored > 0, "on_block_headers_stored should have been called"); + assert_eq!(header_complete, 1, "on_block_header_sync_complete should be called once"); + assert!(filter_headers_stored > 0, "on_filter_headers_stored should have been called"); + assert_eq!( + filter_header_complete, 1, + "on_filter_headers_sync_complete should be called once" + ); + assert!(filters_stored > 0, "on_filters_stored should have been called"); + assert!(filters_sync_complete > 0, "on_filters_sync_complete should have been called"); + assert!(blocks_needed > 0, "on_blocks_needed should have been called"); + assert!(block_processed > 0, "on_block_processed should have been called"); + assert_eq!(sync_complete, 1, "on_sync_complete should be called once"); + + // Validate network event callbacks + let peer_connected = tracker.peer_connected_count.load(Ordering::SeqCst); + let peers_updated = tracker.peers_updated_count.load(Ordering::SeqCst); + let last_peer_count = tracker.last_connected_peer_count.load(Ordering::SeqCst); + let last_best_height = tracker.last_best_height.load(Ordering::SeqCst); + + tracing::info!( + "Network: peer_connected={}, peers_updated={}, last_peer_count={}, best_height={}", + peer_connected, + peers_updated, + last_peer_count, + last_best_height + ); + + assert!(peer_connected > 0, "on_peer_connected should have been called"); + assert!(peers_updated > 0, "on_peers_updated should have been called"); + assert!(last_peer_count > 0, "at least one peer should be tracked"); + assert!(last_best_height > 0, "best height from peers should be positive"); + + let connected_peers = tracker.connected_peers.lock().unwrap(); + assert!(!connected_peers.is_empty(), "connected_peers should contain at least one entry"); + let dashd_addr = dashd.addr.to_string(); + assert!( + connected_peers.iter().any(|p| p.contains(&dashd_addr)), + "connected_peers should contain the dashd address {}: {:?}", + dashd_addr, + *connected_peers + ); + drop(connected_peers); + + // Validate wallet event callbacks (test wallet has transactions) + let tx_received = tracker.transaction_received_count.load(Ordering::SeqCst); + let balance_updated = tracker.balance_updated_count.load(Ordering::SeqCst); + + tracing::info!("Wallet: tx_received={}, balance_updated={}", tx_received, balance_updated); + + assert!( + tx_received > 0, + "on_transaction_received should fire for wallet with transactions" + ); + assert!(balance_updated > 0, "on_balance_updated should fire for wallet with transactions"); + + // Validate sync cycle (initial sync is cycle 0) + let last_sync_cycle = tracker.last_sync_cycle.load(Ordering::SeqCst); + assert_eq!(last_sync_cycle, 0, "Initial sync should be cycle 0"); + + // Validate callback lifecycle ordering + let sync_start_seq = tracker.sync_start_seq.load(Ordering::SeqCst); + let header_complete_seq = tracker.header_complete_seq.load(Ordering::SeqCst); + let filter_header_complete_seq = tracker.filter_header_complete_seq.load(Ordering::SeqCst); + let filters_sync_complete_seq = tracker.filters_sync_complete_seq.load(Ordering::SeqCst); + let sync_complete_seq = tracker.sync_complete_seq.load(Ordering::SeqCst); + + tracing::info!( + "Sequence ordering: sync_start={}, header_complete={}, filter_header_complete={}, \ + filters_sync_complete={}, sync_complete={}", + sync_start_seq, + header_complete_seq, + filter_header_complete_seq, + filters_sync_complete_seq, + sync_complete_seq + ); + + assert!( + sync_start_seq < header_complete_seq, + "sync_start ({}) should precede header_complete ({})", + sync_start_seq, + header_complete_seq + ); + assert!( + header_complete_seq < filter_header_complete_seq, + "header_complete ({}) should precede filter_header_complete ({})", + header_complete_seq, + filter_header_complete_seq + ); + assert!( + filter_header_complete_seq < filters_sync_complete_seq, + "filter_header_complete ({}) should precede filters_sync_complete ({})", + filter_header_complete_seq, + filters_sync_complete_seq + ); + assert!( + filters_sync_complete_seq < sync_complete_seq, + "filters_sync_complete ({}) should precede sync_complete ({})", + filters_sync_complete_seq, + sync_complete_seq + ); + + // Validate filter header ranges + let filter_ranges = tracker.filter_header_ranges.lock().unwrap(); + assert!(!filter_ranges.is_empty(), "filter header ranges should be recorded"); + for &(start, end, tip) in filter_ranges.iter() { + assert!( + start <= end, + "filter header range start ({}) should be <= end ({})", + start, + end + ); + assert!(end <= tip, "filter header range end ({}) should be <= tip ({})", end, tip); + } + drop(filter_ranges); + + // Validate block processed heights + let block_heights = tracker.processed_block_heights.lock().unwrap(); + assert!(!block_heights.is_empty(), "block processed heights should be recorded"); + for &h in block_heights.iter() { + assert!( + h >= 1 && h <= dashd.initial_height, + "block processed height {} should be within [1, {}]", + h, + dashd.initial_height + ); + } + drop(block_heights); + + // Validate final state + let final_header = tracker.last_header_tip.load(Ordering::SeqCst); + let final_filter = tracker.last_filter_tip.load(Ordering::SeqCst); + assert_eq!(final_header, dashd.initial_height, "Final header tip mismatch"); + assert_eq!(final_filter, dashd.initial_height, "Final filter tip mismatch"); + + // Validate best height matches initial height + assert_eq!( + last_best_height, dashd.initial_height, + "best height from peers should match initial height" + ); + + // Validate transaction data from initial sync + let received_txids = tracker.received_txids.lock().unwrap(); + assert!(!received_txids.is_empty(), "should have received transaction txids during sync"); + drop(received_txids); + + let received_amounts = tracker.received_amounts.lock().unwrap(); + assert!( + !received_amounts.is_empty(), + "should have received transaction amounts during sync" + ); + assert!( + received_amounts.iter().any(|&a| a != 0), + "at least one received transaction amount should be non-zero" + ); + drop(received_amounts); + + // Masternodes are disabled in test config, so these should not fire + let masternode_updated = tracker.masternode_state_updated_count.load(Ordering::SeqCst); + assert_eq!( + masternode_updated, 0, + "masternode callbacks should not fire with masternodes disabled" + ); + + tracker.assert_no_errors(); + } +} + +/// Verify wallet and network callbacks fire correctly after initial sync completes. +/// +/// After initial sync, sends DASH to the wallet and mines a block. Verifies that +/// on_transaction_received and on_balance_updated callbacks fire. Then disconnects +/// dashd peers and verifies on_peer_disconnected fires, followed by on_peer_connected +/// after automatic reconnection. +#[test] +fn test_callbacks_post_sync_transactions_and_disconnect() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + return; + }; + if !dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + unsafe { + let ctx = FFITestContext::new(dashd.addr); + let tracker = ctx.tracker().clone(); + + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + ctx.run_with_all_callbacks(); + + // Wait for initial sync + ctx.wait_for_sync(dashd.initial_height); + tracing::info!("Initial sync complete"); + + // Record callback counts before post-sync operations + let tx_received_before = tracker.transaction_received_count.load(Ordering::SeqCst); + let balance_updated_before = tracker.balance_updated_count.load(Ordering::SeqCst); + + // Send DASH to the wallet and mine a block + let receive_address = ctx.get_receive_address(&wallet_id); + let send_amount = Amount::from_sat(100_000_000); + let txid = dashd.node.send_to_address(&receive_address, send_amount); + tracing::info!("Sent {} to wallet, txid: {}", send_amount, txid); + + let miner_address = dashd.node.get_new_address_from_wallet("default"); + dashd.node.generate_blocks(1, &miner_address); + + // Wait for incremental sync to complete + ctx.wait_for_sync(dashd.initial_height + 1); + + // Verify on_transaction_received fired for the new transaction + let tx_received_after = tracker.transaction_received_count.load(Ordering::SeqCst); + assert!( + tx_received_after > tx_received_before, + "on_transaction_received should fire for post-sync transaction: {} -> {}", + tx_received_before, + tx_received_after + ); + tracing::info!( + "Transaction callback verified: {} -> {}", + tx_received_before, + tx_received_after + ); + + // Verify the sent txid appears in the callback data + let sent_txid_bytes = *txid.as_byte_array(); + let received_txids = tracker.received_txids.lock().unwrap(); + assert!( + received_txids.contains(&sent_txid_bytes), + "sent txid should appear in received_txids callback data" + ); + drop(received_txids); + + // Verify 1 DASH (100_000_000 satoshis) appears in received amounts + let received_amounts = tracker.received_amounts.lock().unwrap(); + assert!( + received_amounts.contains(&100_000_000), + "1 DASH (100_000_000 sat) should appear in received_amounts: {:?}", + *received_amounts + ); + drop(received_amounts); + + // Verify on_balance_updated fired after the new transaction + let balance_updated_after = tracker.balance_updated_count.load(Ordering::SeqCst); + assert!( + balance_updated_after > balance_updated_before, + "on_balance_updated should fire for post-sync transaction: {} -> {}", + balance_updated_before, + balance_updated_after + ); + tracing::info!( + "Balance updated callback verified: {} -> {}", + balance_updated_before, + balance_updated_after + ); + + // Verify balance data from callback reflects a positive spendable balance + let last_spendable = tracker.last_spendable.load(Ordering::SeqCst); + assert!( + last_spendable > 0, + "last_spendable from on_balance_updated should be positive after receiving funds" + ); + tracing::info!("Balance data verified: last_spendable={}", last_spendable); + + // Record connect count before disconnect + let connect_before = tracker.peer_connected_count.load(Ordering::SeqCst); + + // Disconnect peers via dashd and verify on_peer_disconnected fires + let disconnect_before = tracker.peer_disconnected_count.load(Ordering::SeqCst); + dashd.node.disconnect_all_peers(); + + // Wait for disconnect callback + let deadline = std::time::Instant::now() + Duration::from_secs(15); + while tracker.peer_disconnected_count.load(Ordering::SeqCst) <= disconnect_before + && std::time::Instant::now() < deadline + { + std::thread::sleep(Duration::from_millis(200)); + } + + let disconnect_after = tracker.peer_disconnected_count.load(Ordering::SeqCst); + assert!( + disconnect_after > disconnect_before, + "on_peer_disconnected should fire after disconnect: {} -> {}", + disconnect_before, + disconnect_after + ); + tracing::info!( + "Disconnect callback verified: {} -> {}", + disconnect_before, + disconnect_after + ); + + // Wait for automatic reconnection (on_peer_connected should fire again) + let deadline = std::time::Instant::now() + Duration::from_secs(30); + while tracker.peer_connected_count.load(Ordering::SeqCst) <= connect_before + && std::time::Instant::now() < deadline + { + std::thread::sleep(Duration::from_millis(200)); + } + + let connect_after = tracker.peer_connected_count.load(Ordering::SeqCst); + assert!( + connect_after > connect_before, + "on_peer_connected should fire after reconnection: {} -> {}", + connect_before, + connect_after + ); + tracing::info!("Reconnect callback verified: {} -> {}", connect_before, connect_after); + + tracker.assert_no_errors(); + } +} diff --git a/dash-spv-ffi/tests/dashd_ffi_sync_test.rs b/dash-spv-ffi/tests/dashd_ffi_sync_test.rs new file mode 100644 index 000000000..3dffd1b9e --- /dev/null +++ b/dash-spv-ffi/tests/dashd_ffi_sync_test.rs @@ -0,0 +1,426 @@ +//! FFI Sync tests using dashd. +//! +//! These tests mirror Rust SPV sync tests but use FFI bindings +//! with the event-based API (dash_spv_ffi_client_run + event callbacks). + +use std::collections::HashSet; +use std::sync::atomic::Ordering; + +use dash_spv::test_utils::DashdTestContext; +use dash_spv_ffi::test_utils::FFITestContext; +use dashcore::hashes::Hash; +use dashcore::Amount; + +#[test] +fn test_wallet_sync_via_ffi() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + eprintln!("Skipping test (dashd context unavailable)"); + return; + }; + + unsafe { + let ctx = FFITestContext::new(dashd.addr); + + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + tracing::info!("Added wallet, ID: {}", hex::encode(&wallet_id)); + + ctx.run_with_sync_callbacks(); + tracing::info!("FFI client running"); + + ctx.wait_for_sync(dashd.initial_height); + + ctx.tracker().assert_no_errors(); + + // Validate sync heights + let final_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); + let final_filter = ctx.tracker().last_filter_tip.load(Ordering::SeqCst); + + assert_eq!(final_header, dashd.initial_height, "Header height mismatch"); + assert_eq!(final_filter, dashd.initial_height, "Filter header height mismatch"); + assert_eq!( + ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), + 0, + "Initial sync should be cycle 0" + ); + tracing::info!("Heights match: headers={}, filters={}", final_header, final_filter); + + // Validate wallet balance + let (confirmed, _unconfirmed) = ctx.get_wallet_balance(&wallet_id); + let expected_balance = (dashd.wallet.balance * 100_000_000.0).round() as u64; + tracing::info!( + "Balance: confirmed={} satoshis, expected={} satoshis", + confirmed, + expected_balance + ); + + assert_eq!(confirmed, expected_balance, "Balance mismatch"); + + // Validate transaction set against dashd baseline + let spv_txids = ctx.wallet_txids(&wallet_id); + let expected_txids: HashSet = dashd + .wallet + .transactions + .iter() + .filter_map(|tx| tx.get("txid").and_then(|v| v.as_str()).map(String::from)) + .collect(); + + let missing: Vec<_> = expected_txids.difference(&spv_txids).collect(); + let extra: Vec<_> = spv_txids.difference(&expected_txids).collect(); + + assert!( + missing.is_empty(), + "SPV wallet is missing {} transactions: {:?}", + missing.len(), + missing + ); + assert!( + extra.is_empty(), + "SPV wallet has {} unexpected transactions: {:?}", + extra.len(), + extra + ); + tracing::info!("Transaction set validated: {} transactions match", spv_txids.len()); + } +} + +/// Verify incremental sync works via FFI by generating blocks after initial sync. +/// +/// Generates a single block (with a wallet transaction) and a batch of blocks, +/// verifying deterministic cycle counting and wallet balance updates. +#[test] +fn test_ffi_sync_then_generate_blocks() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + eprintln!("Skipping test (dashd context unavailable)"); + return; + }; + if !dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + unsafe { + let ctx = FFITestContext::new(dashd.addr); + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + + ctx.run_with_all_callbacks(); + ctx.wait_for_sync(dashd.initial_height); + + assert_eq!( + ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), + 0, + "Initial sync should be cycle 0" + ); + + let (initial_balance, _) = ctx.get_wallet_balance(&wallet_id); + let initial_tx_count = ctx.transaction_count(&wallet_id); + tracing::info!( + "Initial state: balance={} satoshis, tx_count={}", + initial_balance, + initial_tx_count + ); + + let miner_address = dashd.node.get_new_address_from_wallet("default"); + + // Generate a block containing a wallet transaction and wait for sync. + let cycle_before = ctx.tracker().last_sync_cycle.load(Ordering::SeqCst); + let receive_address = ctx.get_receive_address(&wallet_id); + let send_amount = Amount::from_sat(100_000_000); + let txid = dashd.node.send_to_address(&receive_address, send_amount); + tracing::info!("Sent {} to FFI wallet, txid: {}", send_amount, txid); + + dashd.node.generate_blocks(1, &miner_address); + let height_after_one = dashd.initial_height + 1; + ctx.wait_for_sync(height_after_one); + + let cycle_after_first = ctx.tracker().last_sync_cycle.load(Ordering::SeqCst); + assert_eq!( + cycle_after_first, + cycle_before + 1, + "Single block should produce exactly one sync cycle: before={}, after={}", + cycle_before, + cycle_after_first + ); + + // Verify the transaction was received via wallet callback + let received_txids = ctx.tracker().received_txids.lock().unwrap(); + let txid_bytes = *txid.as_byte_array(); + assert!( + received_txids.contains(&txid_bytes), + "Wallet callback should have received txid {}", + txid + ); + drop(received_txids); + + // Verify via wallet query as well + assert!( + ctx.has_transaction(&wallet_id, &txid), + "Wallet should contain transaction {}", + txid + ); + + // Verify balance changed from the transaction + let (balance_after_tx, _) = ctx.get_wallet_balance(&wallet_id); + assert!( + balance_after_tx < initial_balance, + "Balance should decrease by fees: initial={}, after_tx={}", + initial_balance, + balance_after_tx + ); + let fees = initial_balance - balance_after_tx; + assert!(fees < 1_000_000, "Fees ({}) should be reasonable", fees); + + // Generate multiple blocks at once and verify the cycle advances + let cycle_before_batch = ctx.tracker().last_sync_cycle.load(Ordering::SeqCst); + dashd.node.generate_blocks(5, &miner_address); + let expected_final_height = dashd.initial_height + 6; + ctx.wait_for_sync(expected_final_height); + + let cycle_after_batch = ctx.tracker().last_sync_cycle.load(Ordering::SeqCst); + assert!( + cycle_after_batch > cycle_before_batch, + "Sync cycle should advance after batch: before={}, after={}", + cycle_before_batch, + cycle_after_batch + ); + + let final_tx_count = ctx.transaction_count(&wallet_id); + assert!( + final_tx_count > initial_tx_count, + "Transaction count should have increased: {} -> {}", + initial_tx_count, + final_tx_count + ); + + ctx.tracker().assert_no_errors(); + } +} + +/// Verify FFI client restart preserves consistent state across stop/recreate cycles. +#[test] +fn test_ffi_restart_consistency() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + eprintln!("Skipping test (dashd context unavailable)"); + return; + }; + + unsafe { + // First sync + tracing::info!("First FFI sync"); + let ctx = FFITestContext::new(dashd.addr); + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + + ctx.run_with_sync_callbacks(); + ctx.wait_for_sync(dashd.initial_height); + + let (first_balance, _) = ctx.get_wallet_balance(&wallet_id); + let first_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); + + ctx.tracker().assert_no_errors(); + assert_eq!( + ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), + 0, + "First sync should be cycle 0" + ); + + tracing::info!("First sync: balance={}, header_tip={}", first_balance, first_header); + + // Restart with same storage + tracing::info!("Restarting FFI client"); + let ctx = ctx.restart(); + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + + ctx.run_with_sync_callbacks(); + ctx.wait_for_sync(dashd.initial_height); + + let (second_balance, _) = ctx.get_wallet_balance(&wallet_id); + let second_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); + + ctx.tracker().assert_no_errors(); + assert_eq!( + ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), + 0, + "Restart sync should be cycle 0 (fresh client)" + ); + + tracing::info!("Second sync: balance={}, header_tip={}", second_balance, second_header); + + // Verify state is identical + assert_eq!(first_balance, second_balance, "Balance mismatch after restart"); + assert_eq!(first_header, second_header, "Header tip mismatch after restart"); + } +} + +/// Verify that multiple transactions sent in quick succession and mined in a single block +/// are all detected by the SPV client via FFI. +#[test] +fn test_ffi_multiple_transactions_in_single_block() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + eprintln!("Skipping test (dashd context unavailable)"); + return; + }; + if !dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + unsafe { + let ctx = FFITestContext::new(dashd.addr); + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + + ctx.run_with_all_callbacks(); + ctx.wait_for_sync(dashd.initial_height); + + let baseline_tx_count = ctx.transaction_count(&wallet_id); + let (baseline_balance, _) = ctx.get_wallet_balance(&wallet_id); + tracing::info!("Baseline: tx_count={}, balance={}", baseline_tx_count, baseline_balance); + + // Send 3 transactions of different amounts to the SPV wallet + let receive_address = ctx.get_receive_address(&wallet_id); + let amounts = [ + Amount::from_sat(50_000_000), + Amount::from_sat(75_000_000), + Amount::from_sat(120_000_000), + ]; + let mut txids = Vec::new(); + for amount in &amounts { + let txid = dashd.node.send_to_address(&receive_address, *amount); + tracing::info!("Sent {} to FFI wallet, txid: {}", amount, txid); + txids.push(txid); + } + + // Mine a single block to confirm all 3 + let miner_address = dashd.node.get_new_address_from_wallet("default"); + dashd.node.generate_blocks(1, &miner_address); + let expected_height = dashd.initial_height + 1; + ctx.wait_for_sync(expected_height); + + let final_tx_count = ctx.transaction_count(&wallet_id); + let (final_balance, _) = ctx.get_wallet_balance(&wallet_id); + + assert_eq!( + final_tx_count, + baseline_tx_count + 3, + "Expected 3 new transactions, got {}", + final_tx_count - baseline_tx_count + ); + + // Since dashd and SPV share the same wallet, sends are internal transfers. + // The only balance change is the transaction fees deducted by dashd. + let fees_paid = baseline_balance - final_balance; + assert!( + final_balance < baseline_balance, + "Balance should decrease by fees for internal transfers" + ); + assert!(fees_paid < 1_000_000, "Total fees ({}) should be reasonable", fees_paid); + + for txid in &txids { + assert!( + ctx.has_transaction(&wallet_id, txid), + "Wallet should contain transaction {}", + txid + ); + } + + ctx.tracker().assert_no_errors(); + tracing::info!( + "All 3 transactions found: tx_count {} -> {}, balance {} -> {} (fees={})", + baseline_tx_count, + final_tx_count, + baseline_balance, + final_balance, + fees_paid + ); + } +} + +/// Verify that transactions sent one per block over several blocks are each detected +/// incrementally by the SPV client via FFI. +#[test] +fn test_ffi_multiple_transactions_across_blocks() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + eprintln!("Skipping test (dashd context unavailable)"); + return; + }; + if !dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + unsafe { + let ctx = FFITestContext::new(dashd.addr); + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + + ctx.run_with_all_callbacks(); + ctx.wait_for_sync(dashd.initial_height); + + let baseline_tx_count = ctx.transaction_count(&wallet_id); + let (baseline_balance, _) = ctx.get_wallet_balance(&wallet_id); + tracing::info!("Baseline: tx_count={}, balance={}", baseline_tx_count, baseline_balance); + + // Send 1 tx per block, 3 iterations + let amounts = [ + Amount::from_sat(30_000_000), + Amount::from_sat(60_000_000), + Amount::from_sat(90_000_000), + ]; + let miner_address = dashd.node.get_new_address_from_wallet("default"); + let mut current_height = dashd.initial_height; + let mut txids = Vec::new(); + + for (i, amount) in amounts.iter().enumerate() { + let receive_address = ctx.get_receive_address(&wallet_id); + let txid = dashd.node.send_to_address(&receive_address, *amount); + tracing::info!("Iteration {}: sent {} to FFI wallet, txid: {}", i, amount, txid); + txids.push(txid); + + dashd.node.generate_blocks(1, &miner_address); + current_height += 1; + ctx.wait_for_sync(current_height); + + let tx_count = ctx.transaction_count(&wallet_id); + assert_eq!( + tx_count, + baseline_tx_count + i + 1, + "After iteration {}, expected {} transactions, got {}", + i, + baseline_tx_count + i + 1, + tx_count + ); + tracing::info!("Iteration {}: tx_count={}", i, tx_count); + } + + // Final verification + let (final_balance, _) = ctx.get_wallet_balance(&wallet_id); + + // Internal transfers: only fees are deducted + let fees_paid = baseline_balance - final_balance; + assert!( + final_balance < baseline_balance, + "Balance should decrease by fees for internal transfers" + ); + assert!(fees_paid < 1_000_000, "Total fees ({}) should be reasonable", fees_paid); + + for txid in &txids { + assert!( + ctx.has_transaction(&wallet_id, txid), + "Wallet should contain transaction {}", + txid + ); + } + + ctx.tracker().assert_no_errors(); + tracing::info!( + "All iterations complete: tx_count {} -> {}, balance {} -> {} (fees={})", + baseline_tx_count, + baseline_tx_count + amounts.len(), + baseline_balance, + final_balance, + fees_paid + ); + } +} diff --git a/dash-spv/Cargo.toml b/dash-spv/Cargo.toml index 20b400456..dd00740ea 100644 --- a/dash-spv/Cargo.toml +++ b/dash-spv/Cargo.toml @@ -51,6 +51,10 @@ indexmap = "2.0" # Parallelization rayon = "1.11" +# Test utilities (optional) +tempfile = { version = "3.0", optional = true } +dashcore-rpc = { path = "../rpc-client", optional = true } + # DNS (trust-dns-resolver was renamed to hickory_resolver) hickory-resolver = "0.25" @@ -81,4 +85,4 @@ name = "dash_spv" path = "src/lib.rs" [features] -test-utils = [] +test-utils = ["dashcore/test-utils", "dep:tempfile", "dep:dashcore-rpc"] diff --git a/dash-spv/src/test_utils/context.rs b/dash-spv/src/test_utils/context.rs new file mode 100644 index 000000000..3b66fe4d2 --- /dev/null +++ b/dash-spv/src/test_utils/context.rs @@ -0,0 +1,90 @@ +//! Shared dashd test context for integration tests. +//! +//! Provides `DashdTestContext` which encapsulates the common setup logic for +//! launching a dashd node with a pre-built blockchain and loading wallet data. +//! Used by both `dash-spv` and `dash-spv-ffi` integration tests. + +use std::net::SocketAddr; + +use tempfile::TempDir; +use tracing::info; + +use super::fs_helpers::{copy_dir, retain_test_dir}; +use super::{DashCoreConfig, DashCoreNode, WalletFile}; + +/// Shared test infrastructure for dashd integration tests. +/// +/// Manages a dashd node instance backed by a copied blockchain directory, +/// along with the expected chain height and a pre-loaded wallet file. +pub struct DashdTestContext { + /// The managed dashd process. + pub node: DashCoreNode, + /// P2P address of the running dashd node. + pub addr: SocketAddr, + /// Block height at startup (before any test-generated blocks). + pub initial_height: u32, + /// Pre-loaded wallet data from the test blockchain directory. + pub wallet: WalletFile, + /// Whether the dashd binary supports the `generatetoaddress` RPC. + pub supports_mining: bool, + /// Temporary directory containing the blockchain data. + datadir: TempDir, +} + +impl DashdTestContext { + /// Create a new dashd test context. + /// + /// Returns `None` if `SKIP_DASHD_TESTS` is set. Panics if required env vars + /// are missing or if dashd fails to start. + pub async fn new() -> Option { + if std::env::var("SKIP_DASHD_TESTS").is_ok() { + eprintln!("Skipping dashd integration test (SKIP_DASHD_TESTS is set)"); + return None; + } + + let mut config = DashCoreConfig::from_env(); + let datadir = TempDir::new().expect("failed to create temp dir"); + copy_dir(&config.datadir, datadir.path()).expect("failed to copy datadir"); + config.datadir = datadir.path().to_path_buf(); + config.wallet = "wallet".to_string(); + + let wallet = WalletFile::from_json(datadir.path(), "wallet"); + info!( + "Loaded '{}' wallet: {} transactions, {} UTXOs, balance: {:.8} DASH", + wallet.wallet_name, wallet.transaction_count, wallet.utxo_count, wallet.balance + ); + + let mut node = DashCoreNode::with_config(config); + let addr = node.start().await; + info!("DashCoreNode started at {}", addr); + + // Load a separate wallet for mining so coinbase rewards don't pollute + // the test wallet's address space (the "wallet" wallet and SPV wallet + // share the same mnemonic). + node.ensure_wallet("default"); + info!("Mining wallet 'default' ready"); + + let initial_height = node.get_block_count(); + info!("Dashd has {} blocks", initial_height); + + let supports_mining = node.supports_mining(); + if !supports_mining { + info!("RPC miner not available (tests requiring block generation will be skipped)"); + } + + Some(DashdTestContext { + node, + addr, + initial_height, + wallet, + supports_mining, + datadir, + }) + } +} + +impl Drop for DashdTestContext { + fn drop(&mut self) { + retain_test_dir(self.datadir.path(), "dashd"); + } +} diff --git a/dash-spv/src/test_utils/fs_helpers.rs b/dash-spv/src/test_utils/fs_helpers.rs new file mode 100644 index 000000000..fcb31c511 --- /dev/null +++ b/dash-spv/src/test_utils/fs_helpers.rs @@ -0,0 +1,50 @@ +//! Filesystem helpers for test infrastructure. + +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +/// Recursively copy a directory and all its contents. +pub fn copy_dir(src: &Path, dst: &Path) -> io::Result<()> { + fs::create_dir_all(dst)?; + for entry in fs::read_dir(src)? { + let entry = entry?; + let dst_path = dst.join(entry.file_name()); + if entry.file_type()?.is_dir() { + copy_dir(&entry.path(), &dst_path)?; + } else { + fs::copy(entry.path(), dst_path)?; + } + } + Ok(()) +} + +/// When `DASHD_TEST_RETAIN_DIR` is set, copy `src` to a test-named +/// subdirectory for post-mortem inspection. +/// +/// By default only retains on panic. Set `DASHD_TEST_RETAIN_ALWAYS=1` +/// to also retain directories from passing tests. +pub fn retain_test_dir(src: &Path, label: &str) { + let retain_always = std::env::var("DASHD_TEST_RETAIN_ALWAYS") + .map(|v| v == "1" || v.eq_ignore_ascii_case("true")) + .unwrap_or(false); + + if !retain_always && !std::thread::panicking() { + return; + } + + let Ok(retain_dir) = std::env::var("DASHD_TEST_RETAIN_DIR") else { + return; + }; + + let test_name = std::thread::current().name().unwrap_or("unknown").replace(":", "_"); + let dest = PathBuf::from(&retain_dir).join(&test_name).join(label); + if dest.exists() { + let _ = fs::remove_dir_all(&dest); + } + if let Err(e) = copy_dir(src, &dest) { + eprintln!("Failed to retain test data: {}", e); + } else { + eprintln!("Test data retained at: {}", dest.display()); + } +} diff --git a/dash-spv/src/test_utils/mod.rs b/dash-spv/src/test_utils/mod.rs index e7dccee58..7b27ee99f 100644 --- a/dash-spv/src/test_utils/mod.rs +++ b/dash-spv/src/test_utils/mod.rs @@ -1,8 +1,21 @@ mod chain_tip; mod chain_work; mod checkpoint; +mod context; mod filter; +mod fs_helpers; mod network; +mod node; mod types; +use std::time::Duration; + +/// Default timeout for sync operations in integration tests. +pub const SYNC_TIMEOUT: Duration = Duration::from_secs(180); + +pub use context::DashdTestContext; +pub use fs_helpers::retain_test_dir; pub use network::{test_socket_address, MockNetworkManager}; +pub use node::{DashCoreNode, WalletFile}; + +pub(crate) use node::DashCoreConfig; diff --git a/dash-spv/src/test_utils/node.rs b/dash-spv/src/test_utils/node.rs new file mode 100644 index 000000000..57a56d2fd --- /dev/null +++ b/dash-spv/src/test_utils/node.rs @@ -0,0 +1,330 @@ +//! Dash Core node test infrastructure for integration testing. +//! +//! This provides utilities for managing a dashd instance and loading test wallet data. + +use dashcore::{Address, Amount, BlockHash, Txid}; +use dashcore_rpc::{Auth, Client, RpcApi}; +use serde::Deserialize; +use std::fs; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; +use std::sync::atomic::{AtomicU16, Ordering}; +use std::time::Duration; +use tokio::process::Child; +use tokio::time::{sleep, timeout}; + +/// Atomic counter for unique port allocation across parallel tests. +/// Starts below the standard Dash regtest ports (19898/19899) to avoid conflicts. +static NEXT_PORT: AtomicU16 = AtomicU16::new(19400); + +const MAX_PORT_ATTEMPTS: usize = 100; + +/// Allocate a unique, available TCP port for test use. +fn find_available_port() -> u16 { + for _ in 0..MAX_PORT_ATTEMPTS { + let port = NEXT_PORT.fetch_add(1, Ordering::Relaxed); + assert!(port >= 1024, "port counter overflowed"); + if std::net::TcpListener::bind(("127.0.0.1", port)).is_ok() { + return port; + } + } + panic!("failed to find an available port after {} attempts", MAX_PORT_ATTEMPTS); +} + +/// Configuration for Dash Core node. +pub struct DashCoreConfig { + /// Path to dashd binary + pub dashd_path: PathBuf, + /// Path to existing datadir with blockchain data + pub datadir: PathBuf, + /// Wallet name to load on startup + pub wallet: String, + /// P2P port for the node + pub p2p_port: u16, + /// RPC port for the node + pub rpc_port: u16, +} + +impl DashCoreConfig { + /// Create a config from environment variables with dynamically allocated ports. + /// + /// Reads `DASHD_PATH` and `DASHD_DATADIR`. Panics if either variable + /// is not set or if the dashd binary doesn't exist. + pub fn from_env() -> Self { + let error = "DASHD_PATH and DASHD_DATADIR environment variables are required. \ + Either run `eval $(python3 contrib/setup-dashd.py)` to set them up, \ + or set SKIP_DASHD_TESTS=1 to skip these tests. \ + In CI, the setup-dashd step in build-and-test.yml handles this automatically."; + let dashd_path = std::env::var("DASHD_PATH").ok().map(PathBuf::from).expect(error); + + assert!( + dashd_path.exists(), + "DASHD_PATH points to a file that does not exist: {}", + dashd_path.display() + ); + + let datadir = std::env::var("DASHD_DATADIR").ok().map(PathBuf::from).expect(error); + + Self { + dashd_path, + datadir, + wallet: "default".to_string(), + p2p_port: find_available_port(), + rpc_port: find_available_port(), + } + } +} + +/// Test infrastructure for managing a Dash Core node. +pub struct DashCoreNode { + config: DashCoreConfig, + process: Option, +} + +impl DashCoreNode { + /// Create a new Dash Core node with custom configuration + pub fn with_config(config: DashCoreConfig) -> Self { + Self { + config, + process: None, + } + } + + /// Start the Dash Core node + pub async fn start(&mut self) -> SocketAddr { + tracing::info!("Starting dashd..."); + tracing::info!(" Binary: {:?}", self.config.dashd_path); + tracing::info!(" Datadir: {:?}", self.config.datadir); + tracing::info!(" P2P port: {}", self.config.p2p_port); + tracing::info!(" RPC port: {}", self.config.rpc_port); + + fs::create_dir_all(&self.config.datadir).expect("failed to create datadir"); + + let args_vec = vec![ + "-regtest".to_string(), + format!("-datadir={}", self.config.datadir.display()), + format!("-port={}", self.config.p2p_port), + format!("-rpcport={}", self.config.rpc_port), + "-server=1".to_string(), + "-daemon=0".to_string(), + "-fallbackfee=0.00001".to_string(), + "-rpcbind=127.0.0.1".to_string(), + "-rpcallowip=127.0.0.1".to_string(), + "-bind=127.0.0.1".to_string(), + "-listen=1".to_string(), + "-txindex=0".to_string(), + "-addressindex=0".to_string(), + "-spentindex=0".to_string(), + "-timestampindex=0".to_string(), + "-blockfilterindex=1".to_string(), + "-peerblockfilters=1".to_string(), + "-debug=all".to_string(), + format!("-wallet={}", self.config.wallet), + ]; + + let mut cmd = tokio::process::Command::new(&self.config.dashd_path); + cmd.args(&args_vec) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::inherit()); + + let child = cmd.spawn().expect("failed to spawn dashd process"); + + self.process = Some(child); + + tracing::info!("Waiting for dashd to be ready..."); + tokio::time::sleep(Duration::from_millis(500)).await; + if let Some(ref mut proc) = self.process { + if let Ok(Some(status)) = proc.try_wait() { + panic!("dashd exited immediately with status: {}", status); + } + } + + let ready = self.wait_for_ready().await; + if !ready { + if let Some(ref mut proc) = self.process { + if let Ok(Some(status)) = proc.try_wait() { + panic!("dashd exited with status: {}", status); + } + } + panic!("dashd failed to start within timeout"); + } + + let addr = SocketAddr::from(([127, 0, 0, 1], self.config.p2p_port)); + tracing::info!("dashd started and ready at {}", addr); + + addr + } + + async fn wait_for_ready(&self) -> bool { + let max_wait = Duration::from_secs(30); + let check_interval = Duration::from_millis(500); + + let result = timeout(max_wait, async { + // Wait for the P2P port to accept connections + loop { + let addr = SocketAddr::from(([127, 0, 0, 1], self.config.p2p_port)); + if tokio::net::TcpStream::connect(addr).await.is_ok() { + break; + } + sleep(check_interval).await; + } + + // Wait for RPC to be fully responsive (not just "warming up") + loop { + let url = format!("http://127.0.0.1:{}", self.config.rpc_port); + let cookie_path = self.config.datadir.join("regtest/.cookie"); + if cookie_path.exists() { + if let Ok(client) = Client::new(&url, Auth::CookieFile(cookie_path)) { + match client.get_blockchain_info() { + Ok(_) => return true, + Err(e) => { + tracing::debug!("RPC not ready yet: {}", e); + } + } + } + } + sleep(check_interval).await; + } + }) + .await; + + result.unwrap_or(false) + } + + /// Get block count via RPC. + pub fn get_block_count(&self) -> u32 { + let client = self.rpc_client(); + client.get_block_count().expect("failed to get block count") + } + + /// Get an RPC client targeting the primary wallet. + fn rpc_client(&self) -> Client { + self.rpc_client_for_wallet(&self.config.wallet) + } + + /// Get an RPC client targeting a specific wallet. + fn rpc_client_for_wallet(&self, wallet_name: &str) -> Client { + let url = format!("http://127.0.0.1:{}/wallet/{}", self.config.rpc_port, wallet_name); + let cookie_path = self.config.datadir.join("regtest/.cookie"); + assert!( + cookie_path.exists(), + "RPC cookie file not found at {}. Is dashd running with this datadir?", + cookie_path.display() + ); + let auth = Auth::CookieFile(cookie_path); + Client::new(&url, auth).expect("failed to create rpc client") + } + + /// Load a wallet by name, creating it if it doesn't exist. + pub fn ensure_wallet(&self, wallet_name: &str) { + let client = self.rpc_client(); + match client.load_wallet(wallet_name) { + Ok(_) => tracing::info!("Loaded wallet: {}", wallet_name), + Err(_) => { + client + .create_wallet(wallet_name, None, None, None, None) + .unwrap_or_else(|e| panic!("failed to create wallet '{}': {}", wallet_name, e)); + tracing::info!("Created wallet: {}", wallet_name); + } + } + } + + /// Get a new address from a specific dashd wallet. + pub fn get_new_address_from_wallet(&self, wallet_name: &str) -> Address { + let client = self.rpc_client_for_wallet(wallet_name); + let address = client.get_new_address(None).expect("failed to get new address"); + address.assume_checked() + } + + /// Check if the connected dashd supports `generatetoaddress` (RPC miner). + /// + /// Some builds (e.g. Windows release binaries) ship without the RPC miner compiled in. + pub fn supports_mining(&self) -> bool { + let client = self.rpc_client(); + let addr = Address::dummy(dashcore::Network::Regtest, 0); + match client.generate_to_address(0, &addr) { + Ok(_) => true, + Err(dashcore_rpc::Error::JsonRpc(dashcore_rpc::jsonrpc::Error::Rpc(ref e))) + if e.message.contains("not available") => + { + false + } + // Any other error (auth, network) still counts as "available" — + // a real generate call will surface the actual error. + Err(_) => true, + } + } + + /// Generate blocks to the given address. + pub fn generate_blocks(&self, count: u64, address: &Address) -> Vec { + let client = self.rpc_client(); + let hashes = client.generate_to_address(count, address).expect("failed to generate blocks"); + tracing::info!("Generated {} blocks to {}", count, address); + hashes + } + + /// Send DASH to an address. + pub fn send_to_address(&self, address: &Address, amount: Amount) -> Txid { + let client = self.rpc_client(); + let txid = client + .send_to_address(address, amount, None, None, None, None, None, None, None, None) + .expect("failed to send to address"); + tracing::info!("Sent {} to {}, txid: {}", amount, address, txid); + txid + } + + /// Disconnect all currently connected peers. + pub fn disconnect_all_peers(&self) { + let client = self.rpc_client(); + let peers = client.get_peer_info().expect("failed to get peer info"); + for peer in &peers { + let addr = peer.addr.to_string(); + let _ = client.disconnect_node(&addr); + tracing::info!("Disconnected peer {}", addr); + } + tracing::info!("Disconnected {} peers", peers.len()); + } +} + +impl Drop for DashCoreNode { + fn drop(&mut self) { + if let Some(mut process) = self.process.take() { + tracing::info!("Stopping dashd process in Drop..."); + if let Err(e) = process.start_kill() { + tracing::warn!("Failed to kill dashd process: {}", e); + } + } + } +} + +/// Wallet file structure for test wallets. +#[derive(Debug, Deserialize)] +pub struct WalletFile { + /// Wallet name, e.g. "default" + pub wallet_name: String, + /// Wallet mnemonic, in BIP39 format + pub mnemonic: String, + /// Wallet balance, in duffs + pub balance: f64, + /// Number of transactions in the wallet + pub transaction_count: usize, + /// Number of UTXOs in the wallet + pub utxo_count: usize, + /// List of transaction hashes in the wallet + pub transactions: Vec, + /// List of UTXOs in the wallet, including their addresses and amounts + pub utxos: Vec, +} + +impl WalletFile { + /// Load a wallet file from the wallets directory in a datadir + pub fn from_json(datadir: &Path, wallet_name: &str) -> Self { + let wallet_path = datadir.join("wallets").join(format!("{}.json", wallet_name)); + if !wallet_path.exists() { + panic!("Wallet file not found: {:?}", wallet_path); + } + + let contents = fs::read_to_string(&wallet_path).expect("Failed to read wallet file"); + serde_json::from_str(&contents).expect("Failed to deserialize wallet file") + } +} diff --git a/dash-spv/tests/dashd_sync.rs b/dash-spv/tests/dashd_sync.rs new file mode 100644 index 000000000..3da446459 --- /dev/null +++ b/dash-spv/tests/dashd_sync.rs @@ -0,0 +1,16 @@ +//! SPV sync tests using dashd. +//! +//! These tests verify SPV sync scenarios against a dashd instance. + +#[path = "dashd_sync/helpers.rs"] +mod helpers; +#[path = "dashd_sync/setup.rs"] +mod setup; +#[path = "dashd_sync/tests_basic.rs"] +mod tests_basic; +#[path = "dashd_sync/tests_disconnect.rs"] +mod tests_disconnect; +#[path = "dashd_sync/tests_restart.rs"] +mod tests_restart; +#[path = "dashd_sync/tests_transaction.rs"] +mod tests_transaction; diff --git a/dash-spv/tests/dashd_sync/helpers.rs b/dash-spv/tests/dashd_sync/helpers.rs new file mode 100644 index 000000000..533984e77 --- /dev/null +++ b/dash-spv/tests/dashd_sync/helpers.rs @@ -0,0 +1,207 @@ +use dash_spv::network::NetworkEvent; +use dash_spv::sync::{ProgressPercentage, SyncEvent, SyncProgress}; +use dash_spv::test_utils::DashCoreNode; +use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; +use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; +use key_wallet_manager::wallet_manager::{WalletId, WalletManager}; +use std::collections::HashSet; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{broadcast, watch, RwLock}; + +use dash_spv::test_utils::SYNC_TIMEOUT; + +use super::setup::{ClientHandle, TestContext}; + +/// Wait for sync to reach target height. +pub(super) async fn wait_for_sync( + progress_receiver: &mut watch::Receiver, + target_height: u32, +) { + let timeout = tokio::time::sleep(SYNC_TIMEOUT); + tokio::pin!(timeout); + + loop { + // Check current state before waiting for changes — the receiver may + // already hold a value that satisfies the condition. + { + let update = progress_receiver.borrow_and_update(); + let header_height = update.headers().ok().map(|h| h.current_height()).unwrap_or(0); + let filters_height = update.filters().ok().map(|f| f.committed_height()).unwrap_or(0); + if update.is_synced() + && header_height >= target_height + && filters_height >= target_height + { + return; + } + } + + tokio::select! { + _ = &mut timeout => { + let update = progress_receiver.borrow(); + panic!("Timeout waiting for sync to height {}. Current progress: {:?}", + target_height, update + ); + } + result = progress_receiver.changed() => { + if result.is_err() { + panic!("Progress channel closed"); + } + } + } + } +} + +/// Count all unique transactions across wallet accounts. +pub(super) async fn count_wallet_transactions( + wallet: &Arc>>, + wallet_id: &WalletId, +) -> usize { + let wallet_read = wallet.read().await; + let wallet_info = wallet_read.get_wallet_info(wallet_id).expect("Wallet info not found"); + let txids: HashSet<_> = + wallet_info.accounts().all_accounts().iter().flat_map(|a| a.transactions.keys()).collect(); + txids.len() +} + +/// Get the spendable balance for a wallet. +pub(super) async fn get_spendable_balance( + wallet: &Arc>>, + wallet_id: &WalletId, +) -> u64 { + let wallet_read = wallet.read().await; + wallet_read.get_wallet_balance(wallet_id).expect("Failed to get wallet balance").spendable() +} + +/// Returns true for sync events that represent meaningful forward progress. +/// +/// Used by restart and disconnection tests to decide when to interrupt. +/// Only counts BlockProcessed events that generated new addresses, since +/// re-processed blocks from storage with no new info are not real progress. +pub(super) fn is_progress_event(event: &SyncEvent) -> bool { + match event { + SyncEvent::BlockHeadersStored { + .. + } + | SyncEvent::FilterHeadersStored { + .. + } + | SyncEvent::FiltersStored { + .. + } + | SyncEvent::BlocksNeeded { + .. + } => true, + SyncEvent::BlockProcessed { + new_addresses, + .. + } => !new_addresses.is_empty(), + _ => false, + } +} + +/// Wait for a specific network event, returning true if seen within the timeout. +pub(super) async fn wait_for_network_event( + receiver: &mut broadcast::Receiver, + predicate: impl Fn(&NetworkEvent) -> bool, + max_wait: Duration, +) -> bool { + let deadline = tokio::time::sleep(max_wait); + tokio::pin!(deadline); + + loop { + tokio::select! { + _ = &mut deadline => return false, + result = receiver.recv() => { + match result { + Ok(ref event) if predicate(event) => return true, + Ok(_) => continue, + Err(_) => return false, + } + } + } + } +} + +/// Run a disconnect-and-reconnect loop during sync, then verify final state. +/// +/// Waits for progress events, disconnects all peers after every 5th event, +/// validates disconnect/reconnect network events, and asserts wallet state +/// after sync completes. +pub(super) async fn run_disconnect_loop( + mut client_handle: ClientHandle, + node: &DashCoreNode, + num_disconnects: usize, + ctx: &TestContext, +) { + let mut disconnect_count = 0; + let mut events_since_disconnect = 0; + + let timeout = tokio::time::sleep(SYNC_TIMEOUT * 2); + tokio::pin!(timeout); + + loop { + tokio::select! { + _ = &mut timeout => { + let progress = client_handle.progress_receiver.borrow(); + panic!( + "Timeout after {} disconnections. Current progress: {:?}", + disconnect_count, progress + ); + } + result = client_handle.sync_event_receiver.recv() => { + match result { + Ok(ref event) if is_progress_event(event) => { + events_since_disconnect += 1; + if disconnect_count < num_disconnects && events_since_disconnect >= 5 { + tracing::info!( + "Disconnection {}: disconnecting peers after: {}", + disconnect_count + 1, + event.description() + ); + node.disconnect_all_peers(); + disconnect_count += 1; + events_since_disconnect = 0; + + let saw_disconnect = wait_for_network_event( + &mut client_handle.network_event_receiver, + |e| matches!(e, NetworkEvent::PeerDisconnected { .. }), + Duration::from_secs(10), + ).await; + assert!(saw_disconnect, "SPV should observe PeerDisconnected"); + tracing::info!("SPV observed PeerDisconnected"); + + let saw_reconnect = wait_for_network_event( + &mut client_handle.network_event_receiver, + |e| matches!(e, NetworkEvent::PeerConnected { .. }), + Duration::from_secs(30), + ).await; + assert!(saw_reconnect, "SPV should reconnect after disconnection"); + tracing::info!("SPV reconnected (PeerConnected)"); + } + } + Ok(SyncEvent::SyncComplete { .. }) => { + tracing::info!( + "Sync completed after {} peer disconnections", + disconnect_count + ); + break; + } + Ok(_) => continue, + Err(_) => { + panic!("Sync event channel error after {} disconnections", disconnect_count); + } + } + } + } + } + + assert_eq!( + disconnect_count, num_disconnects, + "Expected {} disconnections but only did {}", + num_disconnects, disconnect_count + ); + + client_handle.stop().await; + ctx.assert_synced(&client_handle.client.progress().await).await; +} diff --git a/dash-spv/tests/dashd_sync/setup.rs b/dash-spv/tests/dashd_sync/setup.rs new file mode 100644 index 000000000..1715bbc21 --- /dev/null +++ b/dash-spv/tests/dashd_sync/setup.rs @@ -0,0 +1,358 @@ +use dash_spv::network::NetworkEvent; +use dash_spv::storage::{PeerStorage, PersistentPeerStorage, PersistentStorage}; +use dash_spv::test_utils::{retain_test_dir, DashdTestContext}; +use dash_spv::{ + client::{ClientConfig, DashSpvClient}, + network::PeerNetworkManager, + storage::DiskStorageManager, + sync::{ProgressPercentage, SyncEvent, SyncProgress}, + LevelFilter, LoggingGuard, Network, +}; +use dashcore::network::address::AddrV2Message; +use dashcore::network::constants::ServiceFlags; +use key_wallet::managed_account::managed_account_type::ManagedAccountType; +use key_wallet::wallet::initialization::WalletAccountCreationOptions; +use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; +use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; +use key_wallet_manager::wallet_manager::{WalletId, WalletManager}; +use std::collections::{BTreeSet, HashSet}; +use std::path::PathBuf; +use std::sync::Arc; +use tempfile::TempDir; +use tokio::sync::{broadcast, watch, RwLock}; +use tokio_util::sync::CancellationToken; + +/// SPV-specific test context wrapping the shared dashd infrastructure. +/// +/// Storage and blockchain directories are cleaned up on drop. +/// Set `DASHD_TEST_RETAIN_DIR` to a directory path to retain logs and storage for failed tests. +pub(super) struct TestContext { + /// Shared dashd test context. + pub(super) dashd: DashdTestContext, + /// Temporary directory containing the blockchain data. + pub(super) storage_dir: TempDir, + /// Test client configuration. + pub(super) client_config: ClientConfig, + /// Shared wallet manager. + pub(super) wallet: Arc>>, + /// ID of the test wallet. + pub(super) wallet_id: WalletId, + /// Logging guard to ensure test logging is cleaned up on drop. + _log_guard: LoggingGuard, +} + +impl TestContext { + /// Creates a new `TestContext` instance if the setup is successful. + /// + /// # Returns + /// This function returns an `Option`: + /// - `Some(TestContext)` if all initialization steps succeed. + /// - `None` if any part of the initialization fails, such as creating the `DashdTestContext`. + /// + /// # Example + /// ```rust + /// if let Some(context) = TestContext::new().await { + /// // Proceed with using the `context` for testing. + /// } else { + /// eprintln!("Failed to create the test context"); + /// } + /// ``` + pub(super) async fn new() -> Option { + // Create storage dir first so we can set up per-test file logging + let storage_dir = TempDir::new().expect("Failed to create temporary directory"); + let log_dir = storage_dir.path().join("logs"); + let _log_guard = dash_spv::init_logging(dash_spv::LoggingConfig { + level: Some(LevelFilter::DEBUG), + console: std::env::var("DASHD_TEST_LOG").is_ok(), + file: Some(dash_spv::LogFileConfig { + log_dir: log_dir.clone(), + max_files: 1, + }), + thread_local: true, + }) + .expect("Failed to initialize test logging"); + + let dashd = DashdTestContext::new().await?; + + let client_config = create_test_config(storage_dir.path().to_path_buf(), dashd.addr); + + let (wallet, wallet_id) = create_test_wallet(&dashd.wallet.mnemonic, Network::Regtest); + + eprintln!( + "TestContext: addr={}, blocks={}, data={}", + dashd.addr, + dashd.initial_height, + storage_dir.path().display(), + ); + + Some(TestContext { + dashd, + storage_dir, + client_config, + wallet, + wallet_id, + _log_guard, + }) + } + /// Spawns and initializes a new client instance asynchronously. + pub(super) async fn spawn_new_client(&self) -> ClientHandle { + create_and_start_client(&self.client_config, Arc::clone(&self.wallet)).await + } + /// Retrieves the total count of transactions across all accounts in the wallet. + pub(super) async fn transaction_count(&self) -> usize { + let wallet_read = self.wallet.read().await; + let wallet_info = + wallet_read.get_wallet_info(&self.wallet_id).expect("Wallet info not found"); + wallet_info.accounts().all_accounts().iter().map(|a| a.transactions.len()).sum() + } + /// Retrieves the spendable balance of the wallet. + pub(super) async fn spendable_balance(&self) -> u64 { + let wallet_read = self.wallet.read().await; + wallet_read + .get_wallet_balance(&self.wallet_id) + .expect("Failed to get wallet balance") + .spendable() + } + /// Retrieves an unused receiving address from the wallet. + pub(super) async fn receive_address(&self) -> dashcore::Address { + let wallet_read = self.wallet.read().await; + let wallet_info = + wallet_read.get_wallet_info(&self.wallet_id).expect("Wallet info not found"); + + let account = wallet_info + .accounts() + .standard_bip44_accounts + .get(&0) + .expect("BIP44 account 0 not found"); + + let ManagedAccountType::Standard { + external_addresses, + .. + } = &account.account_type + else { + panic!("Account 0 is not a Standard account type"); + }; + + external_addresses + .unused_addresses() + .into_iter() + .next() + .expect("No unused receive address available") + } + /// Checks if a transaction with the specified transaction ID (`txid`) exists in the wallet. + pub(super) async fn has_transaction(&self, txid: &dashcore::Txid) -> bool { + let wallet_read = self.wallet.read().await; + let wallet_info = + wallet_read.get_wallet_info(&self.wallet_id).expect("Wallet info not found"); + + wallet_info + .accounts() + .all_accounts() + .iter() + .any(|account| account.transactions.contains_key(txid)) + || wallet_info.immature_transactions().iter().any(|tx| &tx.txid() == txid) + } + + /// Validate that the context wallet matches the expected baseline from dashd. + pub(super) async fn assert_synced(&self, progress: &SyncProgress) { + self.assert_wallet_synced(progress, &self.wallet, &self.wallet_id).await; + } + + /// Validate that an arbitrary wallet matches the expected baseline from dashd. + pub(super) async fn assert_wallet_synced( + &self, + progress: &SyncProgress, + wallet: &Arc>>, + wallet_id: &WalletId, + ) { + let header_height = progress.headers().unwrap().current_height(); + let filter_header_height = progress.filter_headers().unwrap().current_height(); + assert_eq!(header_height, self.dashd.initial_height, "Header height mismatch"); + assert_eq!( + filter_header_height, self.dashd.initial_height, + "Filter header height mismatch" + ); + + let wallet_read = wallet.read().await; + let wallet_info = wallet_read.get_wallet_info(wallet_id).expect("Wallet info not found"); + + let mut spv_txids = HashSet::new(); + for managed_account in wallet_info.accounts().all_accounts() { + for txid in managed_account.transactions.keys() { + spv_txids.insert(txid.to_string()); + } + } + for tx in wallet_info.immature_transactions() { + spv_txids.insert(tx.txid().to_string()); + } + + let expected_txids: HashSet = self + .dashd + .wallet + .transactions + .iter() + .filter_map(|tx| tx.get("txid").and_then(|v| v.as_str()).map(String::from)) + .collect(); + + let missing: Vec<_> = expected_txids.difference(&spv_txids).collect(); + let extra: Vec<_> = spv_txids.difference(&expected_txids).collect(); + + assert!( + missing.is_empty(), + "SPV wallet is missing {} transactions: {:?}", + missing.len(), + missing + ); + assert!( + extra.is_empty(), + "SPV wallet has {} unexpected transactions: {:?}", + extra.len(), + extra + ); + + drop(wallet_read); + let balance = { + let wr = wallet.read().await; + wr.get_wallet_balance(wallet_id).expect("Failed to get wallet balance").spendable() + }; + let expected_balance: u64 = self + .dashd + .wallet + .utxos + .iter() + .filter_map(|u| u.get("amount").and_then(|v| v.as_f64())) + .map(|dash| (dash * 100_000_000.0).round() as u64) + .sum(); + + assert_eq!(balance, expected_balance, "Wallet balance mismatch"); + tracing::info!( + "Wallet validation passed: {} transactions, balance={}", + spv_txids.len(), + balance + ); + } +} + +impl Drop for TestContext { + fn drop(&mut self) { + retain_test_dir(self.storage_dir.path(), "spv"); + } +} + +/// Type alias for the SPV client used in tests. +pub(super) type TestClient = + DashSpvClient, PeerNetworkManager, DiskStorageManager>; + +/// A `ClientHandle` is a utility structure that manages the state and handles for a `TestClient` +/// required to interact with the synchronization process, various event channels, and cancellation capabilities. +pub(super) struct ClientHandle { + /// The underlying SPV client instance. + pub(super) client: TestClient, + /// The handle to the client's run loop task. + pub(super) run_handle: Option>>, + /// A channel for receiving progress updates. + pub(super) progress_receiver: watch::Receiver, + /// A channel for receiving sync events. + pub(super) sync_event_receiver: broadcast::Receiver, + /// A channel for receiving network events. + pub(super) network_event_receiver: broadcast::Receiver, + /// A cancellation token for the client's run loop. + pub(super) cancel_token: CancellationToken, +} + +impl ClientHandle { + /// Stops the execution of the client run loop by canceling its associated token and awaiting the + /// termination of the background task. + pub(super) async fn stop(&mut self) { + tracing::info!("Cancelling client run loop..."); + self.cancel_token.cancel(); + if let Some(handle) = self.run_handle.take() { + handle.await.expect("Run task panicked").expect("Run task returned error"); + } + } +} + +/// Creates a new SPV client and starts it. +pub(super) async fn create_and_start_client( + config: &ClientConfig, + wallet: Arc>>, +) -> ClientHandle { + let network_manager = + PeerNetworkManager::new(config).await.expect("Failed to create network manager"); + let storage_manager = + DiskStorageManager::new(config).await.expect("Failed to create storage manager"); + + let client = DashSpvClient::new(config.clone(), network_manager, storage_manager, wallet) + .await + .expect("Failed to create client"); + + let progress_receiver = client.subscribe_progress().await; + let sync_event_receiver = client.subscribe_sync_events().await; + let network_event_receiver = client.subscribe_network_events().await; + let cancel_token = CancellationToken::new(); + let run_token = cancel_token.clone(); + + let run_client = client.clone(); + let run_handle = tokio::task::spawn(async move { run_client.run(run_token).await }); + + ClientHandle { + client, + run_handle: Some(run_handle), + progress_receiver, + sync_event_receiver, + network_event_receiver, + cancel_token, + } +} + +/// Account creation options for tests: just a standard BIP44 account 0. +pub(super) fn test_account_options() -> WalletAccountCreationOptions { + WalletAccountCreationOptions::SpecificAccounts( + BTreeSet::from([0]), + BTreeSet::new(), + BTreeSet::new(), + BTreeSet::new(), + BTreeSet::new(), + None, + ) +} + +/// Create a test wallet from mnemonic. +pub(super) fn create_test_wallet( + mnemonic: &str, + network: Network, +) -> (Arc>>, WalletId) { + let mut wallet_manager = WalletManager::::new(network); + let wallet_id = wallet_manager + .create_wallet_from_mnemonic(mnemonic, "", 0, test_account_options()) + .expect("Failed to create wallet from mnemonic"); + (Arc::new(RwLock::new(wallet_manager)), wallet_id) +} + +/// Create test client config pointing to a specific peer (exclusive mode). +fn create_test_config(storage_path: PathBuf, peer_addr: std::net::SocketAddr) -> ClientConfig { + let mut config = ClientConfig::regtest().with_storage_path(storage_path).without_masternodes(); + config.peers.clear(); + config.add_peer(peer_addr); + config +} + +/// Create test client config with no explicit peers (non-exclusive mode). +/// +/// The peer address is seeded into the peer store on disk so the client +/// discovers it through the normal peer discovery path. +pub(super) async fn create_non_exclusive_test_config( + storage_path: PathBuf, + peer_addr: std::net::SocketAddr, +) -> ClientConfig { + let mut config = ClientConfig::regtest().with_storage_path(storage_path).without_masternodes(); + // Clear default regtest peers so the manager enters non-exclusive mode + config.peers.clear(); + // Seed the peer store so the client can discover our dashd node + let peer_store = PersistentPeerStorage::open(config.storage_path.clone()) + .await + .expect("Failed to open peer storage"); + let msg = AddrV2Message::new(peer_addr, ServiceFlags::NETWORK); + peer_store.save_peers(&[msg]).await.expect("Failed to seed peer store"); + config +} diff --git a/dash-spv/tests/dashd_sync/tests_basic.rs b/dash-spv/tests/dashd_sync/tests_basic.rs new file mode 100644 index 000000000..f91745fec --- /dev/null +++ b/dash-spv/tests/dashd_sync/tests_basic.rs @@ -0,0 +1,128 @@ +use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; +use key_wallet_manager::wallet_manager::WalletManager; +use std::sync::Arc; +use tokio::sync::RwLock; + +use dash_spv::sync::ProgressPercentage; +use dash_spv::Network; + +use super::helpers::{count_wallet_transactions, get_spendable_balance, wait_for_sync}; +use super::setup::{ + create_and_start_client, create_test_wallet, test_account_options, TestContext, +}; + +#[tokio::test] +async fn test_wallet_sync() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + let mut client_handle = ctx.spawn_new_client().await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + client_handle.stop().await; + ctx.assert_synced(&client_handle.client.progress().await).await; +} + +/// Verify that syncing with a wallet that has no on-chain activity results in zero +/// transactions and zero balance, while headers and filters sync fully. +#[tokio::test] +async fn test_sync_empty_wallet() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + // Use a mnemonic with no regtest activity + let empty_mnemonic = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"; + let (empty_wallet, empty_wallet_id) = create_test_wallet(empty_mnemonic, Network::Regtest); + + tracing::info!("Starting sync with empty wallet"); + let mut client_handle = + create_and_start_client(&ctx.client_config, Arc::clone(&empty_wallet)).await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + client_handle.stop().await; + + // Verify headers and filter headers synced fully + let final_progress = client_handle.client.progress().await; + let header_height = final_progress.headers().unwrap().current_height(); + let filter_header_height = final_progress.filter_headers().unwrap().current_height(); + + assert_eq!(header_height, ctx.dashd.initial_height, "Header height mismatch"); + assert_eq!(filter_header_height, ctx.dashd.initial_height, "Filter header height mismatch"); + + // Verify zero transactions and zero balance + let tx_count = count_wallet_transactions(&empty_wallet, &empty_wallet_id).await; + let balance = get_spendable_balance(&empty_wallet, &empty_wallet_id).await; + + assert_eq!(tx_count, 0, "Empty wallet should have 0 transactions, got {}", tx_count); + assert_eq!(balance, 0, "Empty wallet should have 0 balance, got {}", balance); + + tracing::info!( + "Empty wallet sync complete: headers={}, filters={}, txs={}, balance={}", + header_height, + filter_header_height, + tx_count, + balance + ); +} + +/// Verify two wallets in one WalletManager sync independently without cross-contamination. +/// +/// Creates a manager with the test mnemonic wallet (has transactions) and the "abandon" +/// wallet (no regtest activity). After sync, the test wallet should have all expected +/// transactions while the abandon wallet remains empty. +#[tokio::test] +async fn test_sync_two_wallets_same_client() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + let empty_mnemonic = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"; + + // Create a WalletManager with two wallets + let mut wallet_manager = WalletManager::::new(Network::Regtest); + let test_wallet_id = wallet_manager + .create_wallet_from_mnemonic(&ctx.dashd.wallet.mnemonic, "", 0, test_account_options()) + .expect("Failed to create test wallet"); + + let empty_wallet_id = wallet_manager + .create_wallet_from_mnemonic(empty_mnemonic, "", 0, test_account_options()) + .expect("Failed to create empty wallet"); + + assert_eq!(wallet_manager.wallet_count(), 2, "Should have two wallets"); + let multi_wallet = Arc::new(RwLock::new(wallet_manager)); + + // Sync + tracing::info!("Starting sync with two wallets"); + let mut client_handle = + create_and_start_client(&ctx.client_config, Arc::clone(&multi_wallet)).await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + client_handle.stop().await; + + // Verify the test wallet has expected transactions and balance + ctx.assert_wallet_synced( + &client_handle.client.progress().await, + &multi_wallet, + &test_wallet_id, + ) + .await; + + // Verify the empty wallet has zero transactions and zero balance + let empty_tx_count = count_wallet_transactions(&multi_wallet, &empty_wallet_id).await; + let empty_balance = get_spendable_balance(&multi_wallet, &empty_wallet_id).await; + + assert_eq!( + empty_tx_count, 0, + "Empty wallet should have 0 transactions, got {}", + empty_tx_count + ); + assert_eq!(empty_balance, 0, "Empty wallet should have 0 balance, got {}", empty_balance); + + tracing::info!( + "Multi-wallet sync passed: empty_wallet(txs={}, balance={})", + empty_tx_count, + empty_balance + ); +} diff --git a/dash-spv/tests/dashd_sync/tests_disconnect.rs b/dash-spv/tests/dashd_sync/tests_disconnect.rs new file mode 100644 index 000000000..eadb0dd4d --- /dev/null +++ b/dash-spv/tests/dashd_sync/tests_disconnect.rs @@ -0,0 +1,46 @@ +use std::sync::Arc; + +use super::helpers::run_disconnect_loop; +use super::setup::{create_and_start_client, create_non_exclusive_test_config, TestContext}; + +/// Verify sync completes successfully despite peer disconnections mid-sync. +/// +/// Waits for sync progress, then disconnects all peers via dashd RPC 3 times. +/// After each disconnection, validates that the SPV client observes a +/// `NetworkEvent::PeerDisconnected` followed by a `NetworkEvent::PeerConnected` +/// (automatic reconnection). After all disconnections, waits for full sync. +#[tokio::test] +async fn test_sync_with_peer_disconnection() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + let num_disconnects = 3; + let client_handle = ctx.spawn_new_client().await; + + run_disconnect_loop(client_handle, &ctx.dashd.node, num_disconnects, &ctx).await; +} + +/// Verify sync completes in non-exclusive mode despite peer disconnections. +/// +/// Unlike `test_sync_with_peer_disconnection` which uses exclusive mode (explicit +/// peers), this test uses non-exclusive mode where the peer is discovered via the +/// seeded peer store. The reconnection path goes through the normal peer discovery +/// mechanism (known addresses + DNS fallback) instead of the exclusive peer list. +#[tokio::test] +async fn test_sync_with_peer_disconnection_non_exclusive() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + // Create non-exclusive config: no explicit peers, dashd seeded in peer store + let non_exclusive_config = + create_non_exclusive_test_config(ctx.storage_dir.path().to_path_buf(), ctx.dashd.addr) + .await; + + let num_disconnects = 3; + let client_handle = + create_and_start_client(&non_exclusive_config, Arc::clone(&ctx.wallet)).await; + + run_disconnect_loop(client_handle, &ctx.dashd.node, num_disconnects, &ctx).await; +} diff --git a/dash-spv/tests/dashd_sync/tests_restart.rs b/dash-spv/tests/dashd_sync/tests_restart.rs new file mode 100644 index 000000000..723400e26 --- /dev/null +++ b/dash-spv/tests/dashd_sync/tests_restart.rs @@ -0,0 +1,192 @@ +use rand::rngs::StdRng; +use rand::{Rng, SeedableRng}; +use std::sync::Arc; +use std::time::Duration; + +use dash_spv::sync::SyncEvent; +use dash_spv::Network; + +use super::helpers::{get_spendable_balance, is_progress_event, wait_for_sync}; +use dash_spv::test_utils::SYNC_TIMEOUT; + +use super::setup::{create_and_start_client, create_test_wallet, TestContext}; + +/// Verify sync state is identical after stopping and restarting with same storage. +#[tokio::test] +async fn test_sync_restart_consistency() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + // First sync + tracing::info!("Starting first sync"); + let mut client_handle = ctx.spawn_new_client().await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + client_handle.stop().await; + ctx.assert_synced(&client_handle.client.progress().await).await; + let first_balance = ctx.spendable_balance().await; + let first_tx_count = ctx.transaction_count().await; + + drop(client_handle); + tokio::time::sleep(Duration::from_millis(100)).await; + + // Restart with same storage and wallet + tracing::info!("Restarting with same storage"); + let mut client_handle = ctx.spawn_new_client().await; + tokio::time::sleep(Duration::from_secs(3)).await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + client_handle.stop().await; + ctx.assert_synced(&client_handle.client.progress().await).await; + let second_balance = ctx.spendable_balance().await; + let second_tx_count = ctx.transaction_count().await; + + // Validate state is identical across restarts + assert_eq!(first_balance, second_balance, "Balance mismatch after restart"); + assert_eq!(first_tx_count, second_tx_count, "Transaction count mismatch after restart"); + tracing::info!("State consistent after restart"); +} + +/// Verify correct rescan behavior when restarting with a fresh wallet but existing storage. +#[tokio::test] +async fn test_sync_restart_with_fresh_wallet() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + // First sync + tracing::info!("Starting first sync"); + let mut client_handle = ctx.spawn_new_client().await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + client_handle.stop().await; + ctx.assert_synced(&client_handle.client.progress().await).await; + + drop(client_handle); + tokio::time::sleep(Duration::from_millis(100)).await; + + // Restart with fresh wallet (triggers rescan) + tracing::info!("Restarting with fresh wallet (triggers rescan)"); + let (fresh_wallet, fresh_wallet_id) = + create_test_wallet(&ctx.dashd.wallet.mnemonic, Network::Regtest); + + { + let balance = get_spendable_balance(&fresh_wallet, &fresh_wallet_id).await; + assert_eq!(balance, 0, "Fresh wallet should start with zero balance"); + } + + let mut client_handle = + create_and_start_client(&ctx.client_config, Arc::clone(&fresh_wallet)).await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + client_handle.stop().await; + ctx.assert_wallet_synced( + &client_handle.client.progress().await, + &fresh_wallet, + &fresh_wallet_id, + ) + .await; +} + +/// Verify sync completes successfully despite repeated interruptions. +/// +/// Listens for key sync events (BlockHeadersStored, FilterHeadersStored, FiltersStored, +/// BlocksNeeded, BlockProcessed) and restarts the client on every 2nd occurrence until +/// sync completes. This exercises restart/resume from unpredictable points across the +/// full sync lifecycle. +#[tokio::test] +async fn test_sync_with_multiple_restarts() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + let mut restart_count = 0; + let final_progress = loop { + tracing::info!("Starting sync (restart count: {})", restart_count); + let mut client_handle = ctx.spawn_new_client().await; + + // Wait for either sync completion or the 2nd matching event + let mut events_seen = 0; + let mut should_restart = false; + let timeout = tokio::time::sleep(SYNC_TIMEOUT); + tokio::pin!(timeout); + + loop { + tokio::select! { + _ = &mut timeout => { + let progress = client_handle.progress_receiver.borrow(); + panic!( + "Timeout after {} restarts. Current progress: {:?}", + restart_count, progress + ); + } + result = client_handle.sync_event_receiver.recv() => { + match result { + Ok(ref event) if is_progress_event(event) => { + events_seen += 1; + if events_seen % 2 == 0 { + tracing::info!("Restarting on: {}", event.description()); + should_restart = true; + break; + } + tracing::info!("Skipped: {}", event.description()); + } + Ok(SyncEvent::SyncComplete { .. }) => break, + Ok(_) => continue, + Err(_) => { + panic!("Sync event channel error after {} restarts", restart_count); + } + } + } + } + } + + client_handle.stop().await; + let progress = client_handle.client.progress().await; + + if !should_restart { + tracing::info!("Sync completed after {} restarts", restart_count); + break progress; + } + + tokio::time::sleep(Duration::from_millis(100)).await; + restart_count += 1; + }; + + ctx.assert_synced(&final_progress).await; +} + +/// Verify sync completes successfully despite restarts at random points. +/// +/// Uses a seeded RNG to sleep a random duration (50-500ms) after starting, then restarts. +#[tokio::test] +async fn test_sync_with_random_restarts() { + let Some(ctx) = TestContext::new().await else { + return; + }; + + let num_restarts = 10; + let seed = 42; + let mut rng = StdRng::seed_from_u64(seed); + + for i in 0..num_restarts { + let delay_ms = rng.gen_range(50..500); + tracing::info!("Restart {}: sleeping {}ms before stopping", i + 1, delay_ms); + let mut client_handle = ctx.spawn_new_client().await; + + tokio::time::sleep(Duration::from_millis(delay_ms)).await; + + client_handle.stop().await; + tokio::time::sleep(Duration::from_millis(100)).await; + } + + // Final sync to completion + tracing::info!("Final sync to completion"); + let mut client_handle = ctx.spawn_new_client().await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + client_handle.stop().await; + ctx.assert_synced(&client_handle.client.progress().await).await; + tracing::info!("Sync completed after {} random restarts (seed={})", num_restarts, seed); +} diff --git a/dash-spv/tests/dashd_sync/tests_transaction.rs b/dash-spv/tests/dashd_sync/tests_transaction.rs new file mode 100644 index 000000000..516693d96 --- /dev/null +++ b/dash-spv/tests/dashd_sync/tests_transaction.rs @@ -0,0 +1,231 @@ +use dash_spv::sync::ProgressPercentage; +use dashcore::Amount; + +use super::helpers::wait_for_sync; +use super::setup::TestContext; + +/// Verify incremental sync works by generating blocks after initial sync. +/// +/// Generates a single block (with a wallet transaction) and then a batch of blocks, +/// verifying wallet balance updates and height progression at each step. +#[tokio::test] +async fn test_sync_then_generate_blocks() { + let Some(ctx) = TestContext::new().await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + tracing::info!("Starting initial sync"); + let mut client_handle = ctx.spawn_new_client().await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + let initial_balance = ctx.spendable_balance().await; + let initial_tx_count = ctx.transaction_count().await; + tracing::info!( + "Initial state: height={}, balance={}, tx_count={}", + ctx.dashd.initial_height, + initial_balance, + initial_tx_count + ); + + let miner_address = ctx.dashd.node.get_new_address_from_wallet("default"); + + // Generate a single block containing a wallet transaction + let receive_address = ctx.receive_address().await; + let send_amount = Amount::from_sat(100_000_000); + let txid = ctx.dashd.node.send_to_address(&receive_address, send_amount); + tracing::info!("Sent {} to SPV wallet, txid: {}", send_amount, txid); + + ctx.dashd.node.generate_blocks(1, &miner_address); + let height_after_one = ctx.dashd.initial_height + 1; + wait_for_sync(&mut client_handle.progress_receiver, height_after_one).await; + + // Verify the transaction was detected and balance reflects fees + assert!(ctx.has_transaction(&txid).await, "SPV wallet should contain transaction {}", txid); + let balance_after_tx = ctx.spendable_balance().await; + assert!( + balance_after_tx < initial_balance, + "Balance should decrease by fees: initial={}, after_tx={}", + initial_balance, + balance_after_tx + ); + let fees = initial_balance - balance_after_tx; + assert!(fees < 1_000_000, "Fees ({}) should be reasonable", fees); + + // Generate a batch of blocks and verify sync reaches the expected height + ctx.dashd.node.generate_blocks(5, &miner_address); + let expected_final_height = ctx.dashd.initial_height + 6; + wait_for_sync(&mut client_handle.progress_receiver, expected_final_height).await; + + client_handle.stop().await; + let final_height = client_handle.client.progress().await.headers().unwrap().current_height(); + let final_tx_count = ctx.transaction_count().await; + + assert_eq!(final_height, expected_final_height, "Header height mismatch"); + assert!( + final_tx_count > initial_tx_count, + "Transaction count should have increased: {} -> {}", + initial_tx_count, + final_tx_count + ); + tracing::info!( + "Incremental sync complete: height {} -> {}, tx_count {} -> {}", + ctx.dashd.initial_height, + final_height, + initial_tx_count, + final_tx_count + ); +} + +/// Verify that multiple transactions sent in quick succession and mined in a single block +/// are all detected by the SPV client. +#[tokio::test] +async fn test_multiple_transactions_in_single_block() { + let Some(ctx) = TestContext::new().await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + // Initial sync to chain tip + tracing::info!("Starting initial sync"); + let mut client_handle = ctx.spawn_new_client().await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + let baseline_tx_count = ctx.transaction_count().await; + let baseline_balance = ctx.spendable_balance().await; + tracing::info!("Baseline: tx_count={}, balance={}", baseline_tx_count, baseline_balance); + + // Send 3 transactions of different amounts to the SPV wallet + let receive_address = ctx.receive_address().await; + let amounts = + [Amount::from_sat(50_000_000), Amount::from_sat(75_000_000), Amount::from_sat(120_000_000)]; + let mut txids = Vec::new(); + for amount in &amounts { + let txid = ctx.dashd.node.send_to_address(&receive_address, *amount); + tracing::info!("Sent {} to SPV wallet, txid: {}", amount, txid); + txids.push(txid); + } + + // Mine a single block to confirm all 3 + let miner_address = ctx.dashd.node.get_new_address_from_wallet("default"); + ctx.dashd.node.generate_blocks(1, &miner_address); + let expected_height = ctx.dashd.initial_height + 1; + + // Wait for SPV to sync the new block + wait_for_sync(&mut client_handle.progress_receiver, expected_height).await; + + // Verify all 3 transactions are in the wallet + let final_tx_count = ctx.transaction_count().await; + let final_balance = ctx.spendable_balance().await; + + assert_eq!( + final_tx_count, + baseline_tx_count + 3, + "Expected 3 new transactions, got {}", + final_tx_count - baseline_tx_count + ); + + // Since dashd and SPV share the same wallet, sends are internal transfers. + // The only balance change is the transaction fees deducted by dashd. + assert!( + final_balance < baseline_balance, + "Balance should decrease by fees for internal transfers" + ); + let fees_paid = baseline_balance - final_balance; + assert!(fees_paid < 1_000_000, "Total fees ({}) should be reasonable", fees_paid); + + for txid in &txids { + assert!(ctx.has_transaction(txid).await, "Wallet should contain transaction {}", txid); + } + + tracing::info!( + "All 3 transactions found: tx_count {} -> {}, balance {} -> {} (fees={})", + baseline_tx_count, + final_tx_count, + baseline_balance, + final_balance, + fees_paid + ); +} + +/// Verify that transactions sent one per block over several blocks are each detected +/// incrementally by the SPV client. +#[tokio::test] +async fn test_multiple_transactions_across_blocks() { + let Some(ctx) = TestContext::new().await else { + return; + }; + if !ctx.dashd.supports_mining { + eprintln!("Skipping test (dashd RPC miner not available)"); + return; + } + + // Initial sync to chain tip + tracing::info!("Starting initial sync"); + let mut client_handle = ctx.spawn_new_client().await; + wait_for_sync(&mut client_handle.progress_receiver, ctx.dashd.initial_height).await; + + let baseline_tx_count = ctx.transaction_count().await; + let baseline_balance = ctx.spendable_balance().await; + tracing::info!("Baseline: tx_count={}, balance={}", baseline_tx_count, baseline_balance); + + // Send 1 tx per block, 3 iterations + let amounts = + [Amount::from_sat(30_000_000), Amount::from_sat(60_000_000), Amount::from_sat(90_000_000)]; + let miner_address = ctx.dashd.node.get_new_address_from_wallet("default"); + let mut current_height = ctx.dashd.initial_height; + let mut txids = Vec::new(); + + for (i, amount) in amounts.iter().enumerate() { + let receive_address = ctx.receive_address().await; + let txid = ctx.dashd.node.send_to_address(&receive_address, *amount); + tracing::info!("Iteration {}: sent {} to SPV wallet, txid: {}", i, amount, txid); + txids.push(txid); + + ctx.dashd.node.generate_blocks(1, &miner_address); + current_height += 1; + + wait_for_sync(&mut client_handle.progress_receiver, current_height).await; + + let tx_count = ctx.transaction_count().await; + assert_eq!( + tx_count, + baseline_tx_count + i + 1, + "After iteration {}, expected {} transactions, got {}", + i, + baseline_tx_count + i + 1, + tx_count + ); + tracing::info!("Iteration {}: tx_count={}", i, tx_count); + } + + // Final verification + let final_balance = ctx.spendable_balance().await; + + // Internal transfers: only fees are deducted + assert!( + final_balance < baseline_balance, + "Balance should decrease by fees for internal transfers" + ); + let fees_paid = baseline_balance - final_balance; + assert!(fees_paid < 1_000_000, "Total fees ({}) should be reasonable", fees_paid); + + for txid in &txids { + assert!(ctx.has_transaction(txid).await, "Wallet should contain transaction {}", txid); + } + + tracing::info!( + "All iterations complete: tx_count {} -> {}, balance {} -> {} (fees={})", + baseline_tx_count, + baseline_tx_count + amounts.len(), + baseline_balance, + final_balance, + fees_paid + ); +} From 03fd60e580314e08df70e08af665a5bf282b2d2f Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 5 Mar 2026 12:41:17 +0700 Subject: [PATCH 2/4] replace `dashd_sync.rs` with `dashd_sync/main.rs` --- dash-spv/tests/dashd_sync.rs | 16 ---------------- dash-spv/tests/dashd_sync/main.rs | 10 ++++++++++ 2 files changed, 10 insertions(+), 16 deletions(-) delete mode 100644 dash-spv/tests/dashd_sync.rs create mode 100644 dash-spv/tests/dashd_sync/main.rs diff --git a/dash-spv/tests/dashd_sync.rs b/dash-spv/tests/dashd_sync.rs deleted file mode 100644 index 3da446459..000000000 --- a/dash-spv/tests/dashd_sync.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! SPV sync tests using dashd. -//! -//! These tests verify SPV sync scenarios against a dashd instance. - -#[path = "dashd_sync/helpers.rs"] -mod helpers; -#[path = "dashd_sync/setup.rs"] -mod setup; -#[path = "dashd_sync/tests_basic.rs"] -mod tests_basic; -#[path = "dashd_sync/tests_disconnect.rs"] -mod tests_disconnect; -#[path = "dashd_sync/tests_restart.rs"] -mod tests_restart; -#[path = "dashd_sync/tests_transaction.rs"] -mod tests_transaction; diff --git a/dash-spv/tests/dashd_sync/main.rs b/dash-spv/tests/dashd_sync/main.rs new file mode 100644 index 000000000..aefe73ac7 --- /dev/null +++ b/dash-spv/tests/dashd_sync/main.rs @@ -0,0 +1,10 @@ +//! SPV sync tests using dashd. +//! +//! These tests verify SPV sync scenarios against a dashd instance. + +mod helpers; +mod setup; +mod tests_basic; +mod tests_disconnect; +mod tests_restart; +mod tests_transaction; From 9e1855090818e382c7cf955c0b6af547bbb8af4c Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 5 Mar 2026 08:46:53 +0700 Subject: [PATCH 3/4] make `copy_dir` less public --- dash-spv/src/test_utils/fs_helpers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dash-spv/src/test_utils/fs_helpers.rs b/dash-spv/src/test_utils/fs_helpers.rs index fcb31c511..258506cad 100644 --- a/dash-spv/src/test_utils/fs_helpers.rs +++ b/dash-spv/src/test_utils/fs_helpers.rs @@ -5,7 +5,7 @@ use std::io; use std::path::{Path, PathBuf}; /// Recursively copy a directory and all its contents. -pub fn copy_dir(src: &Path, dst: &Path) -> io::Result<()> { +pub(super) fn copy_dir(src: &Path, dst: &Path) -> io::Result<()> { fs::create_dir_all(dst)?; for entry in fs::read_dir(src)? { let entry = entry?; From a66bd506b50148ae51c7399910a1f985c5c2f033 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Thu, 5 Mar 2026 12:26:52 +0700 Subject: [PATCH 4/4] restructure FFI dashd integration tests into dashd_sync/ --- CLAUDE.md | 3 +- dash-spv-ffi/Cargo.toml | 6 +- dash-spv-ffi/src/lib.rs | 3 - dash-spv-ffi/src/test_utils/mod.rs | 11 -- .../dashd_sync}/callbacks.rs | 92 ++++++------ .../dashd_sync}/context.rs | 41 +++--- dash-spv-ffi/tests/dashd_sync/main.rs | 11 ++ dash-spv-ffi/tests/dashd_sync/tests_basic.rs | 79 ++++++++++ .../tests_callback.rs} | 13 +- .../tests/dashd_sync/tests_restart.rs | 61 ++++++++ .../tests_transaction.rs} | 136 +----------------- 11 files changed, 223 insertions(+), 233 deletions(-) delete mode 100644 dash-spv-ffi/src/test_utils/mod.rs rename dash-spv-ffi/{src/test_utils => tests/dashd_sync}/callbacks.rs (84%) rename dash-spv-ffi/{src/test_utils => tests/dashd_sync}/context.rs (94%) create mode 100644 dash-spv-ffi/tests/dashd_sync/main.rs create mode 100644 dash-spv-ffi/tests/dashd_sync/tests_basic.rs rename dash-spv-ffi/tests/{callback_integration_test.rs => dashd_sync/tests_callback.rs} (98%) create mode 100644 dash-spv-ffi/tests/dashd_sync/tests_restart.rs rename dash-spv-ffi/tests/{dashd_ffi_sync_test.rs => dashd_sync/tests_transaction.rs} (68%) diff --git a/CLAUDE.md b/CLAUDE.md index a21637895..13d7f16e6 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -114,7 +114,7 @@ eval $(python3 contrib/setup-dashd.py) **Running:** ```bash cargo test -p dash-spv dashd_sync -cargo test -p dash-spv-ffi dashd_ffi_sync_test +cargo test -p dash-spv-ffi --test dashd_sync SKIP_DASHD_TESTS=1 cargo test # skip when dashd is unavailable ``` @@ -125,6 +125,7 @@ SKIP_DASHD_TESTS=1 cargo test # skip when dashd is unavailable **Key files:** - `dash-spv/tests/dashd_sync/` — test modules (basic, restart, disconnect, transaction) +- `dash-spv-ffi/tests/dashd_sync/` — FFI test modules (basic, restart, transaction, callback) - `dash-spv/src/test_utils/` — shared infrastructure (`DashdTestContext`, `DashCoreNode`) - `.github/ci-groups.yml` — CI test group definitions (`spv` and `ffi` groups run dashd tests) diff --git a/dash-spv-ffi/Cargo.toml b/dash-spv-ffi/Cargo.toml index 7cf150c27..da11bea58 100644 --- a/dash-spv-ffi/Cargo.toml +++ b/dash-spv-ffi/Cargo.toml @@ -31,16 +31,12 @@ key-wallet = { path = "../key-wallet" } key-wallet-manager = { path = "../key-wallet-manager" } rand = "0.8" clap = { version = "4.5", features = ["derive"] } -tempfile = { version = "3.8", optional = true } - -[features] -test-utils = ["dep:tempfile", "dash-spv/test-utils"] [dev-dependencies] dash-spv = { path = "../dash-spv", features = ["test-utils"] } -dash-spv-ffi = { path = ".", features = ["test-utils"] } serial_test = "3.0" env_logger = "0.10" +tempfile = "3.8" [build-dependencies] cbindgen = "0.29" diff --git a/dash-spv-ffi/src/lib.rs b/dash-spv-ffi/src/lib.rs index 95982b49e..d53a16d56 100644 --- a/dash-spv-ffi/src/lib.rs +++ b/dash-spv-ffi/src/lib.rs @@ -14,9 +14,6 @@ pub use platform_integration::*; pub use types::*; pub use utils::*; -#[cfg(any(test, feature = "test-utils"))] -pub mod test_utils; - // FFINetwork is now defined in types.rs for cbindgen compatibility // It must match the definition in key_wallet_ffi diff --git a/dash-spv-ffi/src/test_utils/mod.rs b/dash-spv-ffi/src/test_utils/mod.rs deleted file mode 100644 index 04b22b4ea..000000000 --- a/dash-spv-ffi/src/test_utils/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Shared test infrastructure for FFI integration tests. -//! -//! Provides reusable context, callbacks, and helpers that FFI integration tests share. -//! Gated behind the `test-utils` feature so integration tests can import via -//! `dash_spv_ffi::test_utils`. - -pub mod callbacks; -pub mod context; - -pub use callbacks::*; -pub use context::*; diff --git a/dash-spv-ffi/src/test_utils/callbacks.rs b/dash-spv-ffi/tests/dashd_sync/callbacks.rs similarity index 84% rename from dash-spv-ffi/src/test_utils/callbacks.rs rename to dash-spv-ffi/tests/dashd_sync/callbacks.rs index 4f6d7c454..f316b4dc1 100644 --- a/dash-spv-ffi/src/test_utils/callbacks.rs +++ b/dash-spv-ffi/tests/dashd_sync/callbacks.rs @@ -5,80 +5,80 @@ use std::os::raw::{c_char, c_void}; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; -use crate::*; +use dash_spv_ffi::*; /// Tracks callback invocations for verification. /// /// Fields are updated atomically from FFI callbacks and read in test assertions. #[derive(Default)] -pub struct CallbackTracker { +pub(super) struct CallbackTracker { // Sync event tracking - pub sync_start_count: AtomicU32, - pub block_headers_stored_count: AtomicU32, - pub block_header_sync_complete_count: AtomicU32, - pub filter_headers_stored_count: AtomicU32, - pub filter_headers_sync_complete_count: AtomicU32, - pub filters_stored_count: AtomicU32, - pub filters_sync_complete_count: AtomicU32, - pub blocks_needed_count: AtomicU32, - pub block_processed_count: AtomicU32, - pub masternode_state_updated_count: AtomicU32, - pub chainlock_received_count: AtomicU32, - pub instantlock_received_count: AtomicU32, - pub manager_error_count: AtomicU32, - pub sync_complete_count: AtomicU32, + pub(super) sync_start_count: AtomicU32, + pub(super) block_headers_stored_count: AtomicU32, + pub(super) block_header_sync_complete_count: AtomicU32, + pub(super) filter_headers_stored_count: AtomicU32, + pub(super) filter_headers_sync_complete_count: AtomicU32, + pub(super) filters_stored_count: AtomicU32, + pub(super) filters_sync_complete_count: AtomicU32, + pub(super) blocks_needed_count: AtomicU32, + pub(super) block_processed_count: AtomicU32, + pub(super) masternode_state_updated_count: AtomicU32, + pub(super) chainlock_received_count: AtomicU32, + pub(super) instantlock_received_count: AtomicU32, + pub(super) manager_error_count: AtomicU32, + pub(super) sync_complete_count: AtomicU32, // Network event tracking - pub peer_connected_count: AtomicU32, - pub peer_disconnected_count: AtomicU32, - pub peers_updated_count: AtomicU32, + pub(super) peer_connected_count: AtomicU32, + pub(super) peer_disconnected_count: AtomicU32, + pub(super) peers_updated_count: AtomicU32, // Wallet event tracking - pub transaction_received_count: AtomicU32, - pub balance_updated_count: AtomicU32, + pub(super) transaction_received_count: AtomicU32, + pub(super) balance_updated_count: AtomicU32, // Data from callbacks - pub last_header_tip: AtomicU32, - pub last_filter_tip: AtomicU32, - pub last_connected_peer_count: AtomicU32, - pub last_best_height: AtomicU32, - pub connected_peers: Mutex>, - pub errors: Mutex>, + pub(super) last_header_tip: AtomicU32, + pub(super) last_filter_tip: AtomicU32, + pub(super) last_connected_peer_count: AtomicU32, + pub(super) last_best_height: AtomicU32, + pub(super) connected_peers: Mutex>, + pub(super) errors: Mutex>, // Transaction data from on_transaction_received - pub received_txids: Mutex>, - pub received_amounts: Mutex>, + pub(super) received_txids: Mutex>, + pub(super) received_amounts: Mutex>, // Balance data from on_balance_updated - pub last_spendable: AtomicU64, - pub last_unconfirmed: AtomicU64, + pub(super) last_spendable: AtomicU64, + pub(super) last_unconfirmed: AtomicU64, // Lifecycle ordering via global sequence counter - pub sequence_counter: AtomicU32, - pub sync_start_seq: AtomicU32, - pub header_complete_seq: AtomicU32, - pub filter_header_complete_seq: AtomicU32, - pub filters_sync_complete_seq: AtomicU32, - pub sync_complete_seq: AtomicU32, + pub(super) sequence_counter: AtomicU32, + pub(super) sync_start_seq: AtomicU32, + pub(super) header_complete_seq: AtomicU32, + pub(super) filter_header_complete_seq: AtomicU32, + pub(super) filters_sync_complete_seq: AtomicU32, + pub(super) sync_complete_seq: AtomicU32, // Filter header range validation: (start, end, tip) - pub filter_header_ranges: Mutex>, + pub(super) filter_header_ranges: Mutex>, // Block processed heights - pub processed_block_heights: Mutex>, + pub(super) processed_block_heights: Mutex>, // Completion tracking - pub last_sync_cycle: AtomicU32, + pub(super) last_sync_cycle: AtomicU32, // Baseline for `wait_for_sync`: captured before the client starts so that // a SyncComplete firing between client start and `wait_for_sync` entry is // not missed. - pub sync_count_baseline: AtomicU32, + pub(super) sync_count_baseline: AtomicU32, } impl CallbackTracker { /// Assert that no errors were recorded during sync. - pub fn assert_no_errors(&self) { + pub(super) fn assert_no_errors(&self) { let errors = self.errors.lock().unwrap(); assert!(errors.is_empty(), "Unexpected sync errors: {:?}", *errors); } @@ -196,7 +196,7 @@ extern "C" fn on_filters_sync_complete(tip_height: u32, user_data: *mut c_void) } extern "C" fn on_blocks_needed( - _blocks: *const crate::FFIBlockNeeded, + _blocks: *const dash_spv_ffi::FFIBlockNeeded, count: u32, user_data: *mut c_void, ) { @@ -375,7 +375,7 @@ extern "C" fn on_balance_updated( /// /// The `user_data` pointer borrows the tracker Arc. The caller must ensure the /// Arc outlives all callback invocations (i.e. stop the client before dropping it). -pub fn create_sync_callbacks(tracker: &Arc) -> FFISyncEventCallbacks { +pub(super) fn create_sync_callbacks(tracker: &Arc) -> FFISyncEventCallbacks { FFISyncEventCallbacks { on_sync_start: Some(on_sync_start), on_block_headers_stored: Some(on_block_headers_stored), @@ -399,7 +399,7 @@ pub fn create_sync_callbacks(tracker: &Arc) -> FFISyncEventCall /// /// The `user_data` pointer borrows the tracker Arc. The caller must ensure the /// Arc outlives all callback invocations. -pub fn create_network_callbacks(tracker: &Arc) -> FFINetworkEventCallbacks { +pub(super) fn create_network_callbacks(tracker: &Arc) -> FFINetworkEventCallbacks { FFINetworkEventCallbacks { on_peer_connected: Some(on_peer_connected), on_peer_disconnected: Some(on_peer_disconnected), @@ -412,7 +412,7 @@ pub fn create_network_callbacks(tracker: &Arc) -> FFINetworkEve /// /// The `user_data` pointer borrows the tracker Arc. The caller must ensure the /// Arc outlives all callback invocations. -pub fn create_wallet_callbacks(tracker: &Arc) -> FFIWalletEventCallbacks { +pub(super) fn create_wallet_callbacks(tracker: &Arc) -> FFIWalletEventCallbacks { FFIWalletEventCallbacks { on_transaction_received: Some(on_transaction_received), on_balance_updated: Some(on_balance_updated), diff --git a/dash-spv-ffi/src/test_utils/context.rs b/dash-spv-ffi/tests/dashd_sync/context.rs similarity index 94% rename from dash-spv-ffi/src/test_utils/context.rs rename to dash-spv-ffi/tests/dashd_sync/context.rs index 5b451a81f..4e5ae7210 100644 --- a/dash-spv-ffi/src/test_utils/context.rs +++ b/dash-spv-ffi/tests/dashd_sync/context.rs @@ -7,20 +7,20 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use std::time::Duration; -use crate::client::{ +use dash_spv::logging::{LogFileConfig, LoggingConfig, LoggingGuard}; +use dash_spv::test_utils::{retain_test_dir, SYNC_TIMEOUT}; +use dash_spv_ffi::client::{ dash_spv_ffi_client_destroy, dash_spv_ffi_client_get_wallet_manager, dash_spv_ffi_client_new, dash_spv_ffi_client_run, dash_spv_ffi_client_set_network_event_callbacks, dash_spv_ffi_client_set_sync_event_callbacks, dash_spv_ffi_client_set_wallet_event_callbacks, dash_spv_ffi_client_stop, dash_spv_ffi_wallet_manager_free, FFIDashSpvClient, }; -use crate::config::{ +use dash_spv_ffi::config::{ dash_spv_ffi_config_add_peer, dash_spv_ffi_config_destroy, dash_spv_ffi_config_new, dash_spv_ffi_config_set_data_dir, dash_spv_ffi_config_set_masternode_sync_enabled, dash_spv_ffi_config_set_restrict_to_configured_peers, FFIClientConfig, }; -use crate::types::FFIWalletManager as FFIWalletManagerOpaque; -use dash_spv::logging::{LogFileConfig, LoggingConfig, LoggingGuard}; -use dash_spv::test_utils::{retain_test_dir, SYNC_TIMEOUT}; +use dash_spv_ffi::types::FFIWalletManager as FFIWalletManagerOpaque; use dashcore::hashes::Hash; use dashcore::{Address, Txid}; use key_wallet_ffi::managed_account::{ @@ -85,7 +85,7 @@ impl Drop for SessionState { /// /// Split into `FixedState` (stays fixed across restarts) and `SessionState` /// (recreated on restart). -pub struct FFITestContext { +pub(super) struct FFITestContext { fixed: FixedState, session: SessionState, } @@ -96,7 +96,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI functions that allocate and configure opaque pointers. - pub unsafe fn new(peer_addr: std::net::SocketAddr) -> Self { + pub(super) unsafe fn new(peer_addr: std::net::SocketAddr) -> Self { let temp_dir = TempDir::new().expect("Failed to create temp dir"); let storage_dir = temp_dir.path().to_path_buf(); let log_dir = storage_dir.join("logs"); @@ -150,13 +150,8 @@ impl FFITestContext { } } - /// The FFI client pointer. - pub fn client(&self) -> *mut FFIDashSpvClient { - self.session.client - } - /// The callback tracker. - pub fn tracker(&self) -> &Arc { + pub(super) fn tracker(&self) -> &Arc { &self.session.tracker } @@ -165,7 +160,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI wallet functions through raw pointers held by the context. - pub unsafe fn add_wallet(&self, mnemonic: &str) -> Vec { + pub(super) unsafe fn add_wallet(&self, mnemonic: &str) -> Vec { let mnemonic_c = CString::new(mnemonic).unwrap(); let passphrase = CString::new("").unwrap(); let mut error = FFIError::success(); @@ -202,7 +197,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI wallet functions through raw pointers held by the context. - pub unsafe fn get_wallet_balance(&self, wallet_id: &[u8]) -> (u64, u64) { + pub(super) unsafe fn get_wallet_balance(&self, wallet_id: &[u8]) -> (u64, u64) { let mut confirmed: u64 = 0; let mut unconfirmed: u64 = 0; let mut error = FFIError::success(); @@ -224,7 +219,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI client functions through raw pointers held by the context. - pub unsafe fn run_with_sync_callbacks(&self) { + pub(super) unsafe fn run_with_sync_callbacks(&self) { let sync_callbacks = create_sync_callbacks(&self.session.tracker); let result = dash_spv_ffi_client_set_sync_event_callbacks(self.session.client, sync_callbacks); @@ -240,7 +235,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI client functions through raw pointers held by the context. - pub unsafe fn run_with_all_callbacks(&self) { + pub(super) unsafe fn run_with_all_callbacks(&self) { let sync_cbs = create_sync_callbacks(&self.session.tracker); let network_cbs = create_network_callbacks(&self.session.tracker); let wallet_cbs = create_wallet_callbacks(&self.session.tracker); @@ -269,7 +264,7 @@ impl FFITestContext { /// Polls until a new `SyncComplete` event fires with both header and filter /// tips at or above `expected_height`. - pub fn wait_for_sync(&self, expected_height: u32) { + pub(super) fn wait_for_sync(&self, expected_height: u32) { let baseline = self.session.tracker.sync_count_baseline.load(Ordering::SeqCst); let start = std::time::Instant::now(); @@ -304,7 +299,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI wallet functions through raw pointers held by the context. - pub unsafe fn get_receive_address(&self, wallet_id: &[u8]) -> Address { + pub(super) unsafe fn get_receive_address(&self, wallet_id: &[u8]) -> Address { let mut error = FFIError::success(); let wm = self.session.wallet_manager as *mut FFIWalletManager; @@ -355,7 +350,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI managed account functions through raw pointers. - pub unsafe fn transaction_count(&self, wallet_id: &[u8]) -> usize { + pub(super) unsafe fn transaction_count(&self, wallet_id: &[u8]) -> usize { self.with_bip44_account(wallet_id, |account| { managed_core_account_get_transaction_count(account) as usize }) @@ -366,7 +361,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI managed account functions through raw pointers. - pub unsafe fn has_transaction(&self, wallet_id: &[u8], txid: &Txid) -> bool { + pub(super) unsafe fn has_transaction(&self, wallet_id: &[u8], txid: &Txid) -> bool { self.with_bip44_account(wallet_id, |account| { let mut txs_ptr: *mut FFITransactionRecord = std::ptr::null_mut(); let mut count: usize = 0; @@ -391,7 +386,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI managed account functions through raw pointers. - pub unsafe fn wallet_txids(&self, wallet_id: &[u8]) -> HashSet { + pub(super) unsafe fn wallet_txids(&self, wallet_id: &[u8]) -> HashSet { self.with_bip44_account(wallet_id, |account| { let mut txs_ptr: *mut FFITransactionRecord = std::ptr::null_mut(); let mut count: usize = 0; @@ -421,7 +416,7 @@ impl FFITestContext { /// # Safety /// /// Calls FFI client functions through raw pointers held by the context. - pub unsafe fn restart(self) -> Self { + pub(super) unsafe fn restart(self) -> Self { let fixed = self.fixed; // Drop the session (stops client, frees wallet manager, destroys client) drop(self.session); diff --git a/dash-spv-ffi/tests/dashd_sync/main.rs b/dash-spv-ffi/tests/dashd_sync/main.rs new file mode 100644 index 000000000..db8424262 --- /dev/null +++ b/dash-spv-ffi/tests/dashd_sync/main.rs @@ -0,0 +1,11 @@ +//! FFI sync tests using dashd. +//! +//! These tests mirror Rust SPV sync tests but use FFI bindings +//! with the event-based API (dash_spv_ffi_client_run + event callbacks). + +mod callbacks; +mod context; +mod tests_basic; +mod tests_callback; +mod tests_restart; +mod tests_transaction; diff --git a/dash-spv-ffi/tests/dashd_sync/tests_basic.rs b/dash-spv-ffi/tests/dashd_sync/tests_basic.rs new file mode 100644 index 000000000..8d1092f7a --- /dev/null +++ b/dash-spv-ffi/tests/dashd_sync/tests_basic.rs @@ -0,0 +1,79 @@ +use std::collections::HashSet; +use std::sync::atomic::Ordering; + +use dash_spv::test_utils::DashdTestContext; + +use super::context::FFITestContext; + +#[test] +fn test_wallet_sync_via_ffi() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + eprintln!("Skipping test (dashd context unavailable)"); + return; + }; + + unsafe { + let ctx = FFITestContext::new(dashd.addr); + + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + tracing::info!("Added wallet, ID: {}", hex::encode(&wallet_id)); + + ctx.run_with_sync_callbacks(); + tracing::info!("FFI client running"); + + ctx.wait_for_sync(dashd.initial_height); + + ctx.tracker().assert_no_errors(); + + // Validate sync heights + let final_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); + let final_filter = ctx.tracker().last_filter_tip.load(Ordering::SeqCst); + + assert_eq!(final_header, dashd.initial_height, "Header height mismatch"); + assert_eq!(final_filter, dashd.initial_height, "Filter header height mismatch"); + assert_eq!( + ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), + 0, + "Initial sync should be cycle 0" + ); + tracing::info!("Heights match: headers={}, filters={}", final_header, final_filter); + + // Validate wallet balance + let (confirmed, _unconfirmed) = ctx.get_wallet_balance(&wallet_id); + let expected_balance = (dashd.wallet.balance * 100_000_000.0).round() as u64; + tracing::info!( + "Balance: confirmed={} satoshis, expected={} satoshis", + confirmed, + expected_balance + ); + + assert_eq!(confirmed, expected_balance, "Balance mismatch"); + + // Validate transaction set against dashd baseline + let spv_txids = ctx.wallet_txids(&wallet_id); + let expected_txids: HashSet = dashd + .wallet + .transactions + .iter() + .filter_map(|tx| tx.get("txid").and_then(|v| v.as_str()).map(String::from)) + .collect(); + + let missing: Vec<_> = expected_txids.difference(&spv_txids).collect(); + let extra: Vec<_> = spv_txids.difference(&expected_txids).collect(); + + assert!( + missing.is_empty(), + "SPV wallet is missing {} transactions: {:?}", + missing.len(), + missing + ); + assert!( + extra.is_empty(), + "SPV wallet has {} unexpected transactions: {:?}", + extra.len(), + extra + ); + tracing::info!("Transaction set validated: {} transactions match", spv_txids.len()); + } +} diff --git a/dash-spv-ffi/tests/callback_integration_test.rs b/dash-spv-ffi/tests/dashd_sync/tests_callback.rs similarity index 98% rename from dash-spv-ffi/tests/callback_integration_test.rs rename to dash-spv-ffi/tests/dashd_sync/tests_callback.rs index 86100c0b6..d562c2630 100644 --- a/dash-spv-ffi/tests/callback_integration_test.rs +++ b/dash-spv-ffi/tests/dashd_sync/tests_callback.rs @@ -1,16 +1,11 @@ -//! Integration test for FFI event callbacks. -//! -//! This test verifies all three callback structs work correctly in a real sync scenario: -//! - FFISyncEventCallbacks -//! - FFINetworkEventCallbacks -//! - FFIWalletEventCallbacks +use std::sync::atomic::Ordering; +use std::time::Duration; use dash_spv::test_utils::DashdTestContext; -use dash_spv_ffi::test_utils::FFITestContext; use dashcore::hashes::Hash; use dashcore::Amount; -use std::sync::atomic::Ordering; -use std::time::Duration; + +use super::context::FFITestContext; #[test] fn test_all_callbacks_during_sync() { diff --git a/dash-spv-ffi/tests/dashd_sync/tests_restart.rs b/dash-spv-ffi/tests/dashd_sync/tests_restart.rs new file mode 100644 index 000000000..04304b4fb --- /dev/null +++ b/dash-spv-ffi/tests/dashd_sync/tests_restart.rs @@ -0,0 +1,61 @@ +use std::sync::atomic::Ordering; + +use dash_spv::test_utils::DashdTestContext; + +use super::context::FFITestContext; + +/// Verify FFI client restart preserves consistent state across stop/recreate cycles. +#[test] +fn test_ffi_restart_consistency() { + let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); + let Some(dashd) = rt.block_on(DashdTestContext::new()) else { + eprintln!("Skipping test (dashd context unavailable)"); + return; + }; + + unsafe { + // First sync + tracing::info!("First FFI sync"); + let ctx = FFITestContext::new(dashd.addr); + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + + ctx.run_with_sync_callbacks(); + ctx.wait_for_sync(dashd.initial_height); + + let (first_balance, _) = ctx.get_wallet_balance(&wallet_id); + let first_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); + + ctx.tracker().assert_no_errors(); + assert_eq!( + ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), + 0, + "First sync should be cycle 0" + ); + + tracing::info!("First sync: balance={}, header_tip={}", first_balance, first_header); + + // Restart with same storage + tracing::info!("Restarting FFI client"); + let ctx = ctx.restart(); + let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); + + ctx.run_with_sync_callbacks(); + ctx.wait_for_sync(dashd.initial_height); + + let (second_balance, _) = ctx.get_wallet_balance(&wallet_id); + let second_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); + + ctx.tracker().assert_no_errors(); + assert_eq!( + ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), + 0, + "Restart sync should be cycle 0 (fresh client)" + ); + + tracing::info!("Second sync: balance={}, header_tip={}", second_balance, second_header); + + // Verify state is identical + assert_eq!(first_balance, second_balance, "Balance mismatch after restart"); + assert_eq!(first_header, second_header, "Header tip mismatch after restart"); + } +} diff --git a/dash-spv-ffi/tests/dashd_ffi_sync_test.rs b/dash-spv-ffi/tests/dashd_sync/tests_transaction.rs similarity index 68% rename from dash-spv-ffi/tests/dashd_ffi_sync_test.rs rename to dash-spv-ffi/tests/dashd_sync/tests_transaction.rs index 3dffd1b9e..b60266bd4 100644 --- a/dash-spv-ffi/tests/dashd_ffi_sync_test.rs +++ b/dash-spv-ffi/tests/dashd_sync/tests_transaction.rs @@ -1,88 +1,10 @@ -//! FFI Sync tests using dashd. -//! -//! These tests mirror Rust SPV sync tests but use FFI bindings -//! with the event-based API (dash_spv_ffi_client_run + event callbacks). - -use std::collections::HashSet; use std::sync::atomic::Ordering; use dash_spv::test_utils::DashdTestContext; -use dash_spv_ffi::test_utils::FFITestContext; use dashcore::hashes::Hash; use dashcore::Amount; -#[test] -fn test_wallet_sync_via_ffi() { - let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); - let Some(dashd) = rt.block_on(DashdTestContext::new()) else { - eprintln!("Skipping test (dashd context unavailable)"); - return; - }; - - unsafe { - let ctx = FFITestContext::new(dashd.addr); - - let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); - tracing::info!("Added wallet, ID: {}", hex::encode(&wallet_id)); - - ctx.run_with_sync_callbacks(); - tracing::info!("FFI client running"); - - ctx.wait_for_sync(dashd.initial_height); - - ctx.tracker().assert_no_errors(); - - // Validate sync heights - let final_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); - let final_filter = ctx.tracker().last_filter_tip.load(Ordering::SeqCst); - - assert_eq!(final_header, dashd.initial_height, "Header height mismatch"); - assert_eq!(final_filter, dashd.initial_height, "Filter header height mismatch"); - assert_eq!( - ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), - 0, - "Initial sync should be cycle 0" - ); - tracing::info!("Heights match: headers={}, filters={}", final_header, final_filter); - - // Validate wallet balance - let (confirmed, _unconfirmed) = ctx.get_wallet_balance(&wallet_id); - let expected_balance = (dashd.wallet.balance * 100_000_000.0).round() as u64; - tracing::info!( - "Balance: confirmed={} satoshis, expected={} satoshis", - confirmed, - expected_balance - ); - - assert_eq!(confirmed, expected_balance, "Balance mismatch"); - - // Validate transaction set against dashd baseline - let spv_txids = ctx.wallet_txids(&wallet_id); - let expected_txids: HashSet = dashd - .wallet - .transactions - .iter() - .filter_map(|tx| tx.get("txid").and_then(|v| v.as_str()).map(String::from)) - .collect(); - - let missing: Vec<_> = expected_txids.difference(&spv_txids).collect(); - let extra: Vec<_> = spv_txids.difference(&expected_txids).collect(); - - assert!( - missing.is_empty(), - "SPV wallet is missing {} transactions: {:?}", - missing.len(), - missing - ); - assert!( - extra.is_empty(), - "SPV wallet has {} unexpected transactions: {:?}", - extra.len(), - extra - ); - tracing::info!("Transaction set validated: {} transactions match", spv_txids.len()); - } -} +use super::context::FFITestContext; /// Verify incremental sync works via FFI by generating blocks after initial sync. /// @@ -197,62 +119,6 @@ fn test_ffi_sync_then_generate_blocks() { } } -/// Verify FFI client restart preserves consistent state across stop/recreate cycles. -#[test] -fn test_ffi_restart_consistency() { - let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap(); - let Some(dashd) = rt.block_on(DashdTestContext::new()) else { - eprintln!("Skipping test (dashd context unavailable)"); - return; - }; - - unsafe { - // First sync - tracing::info!("First FFI sync"); - let ctx = FFITestContext::new(dashd.addr); - let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); - - ctx.run_with_sync_callbacks(); - ctx.wait_for_sync(dashd.initial_height); - - let (first_balance, _) = ctx.get_wallet_balance(&wallet_id); - let first_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); - - ctx.tracker().assert_no_errors(); - assert_eq!( - ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), - 0, - "First sync should be cycle 0" - ); - - tracing::info!("First sync: balance={}, header_tip={}", first_balance, first_header); - - // Restart with same storage - tracing::info!("Restarting FFI client"); - let ctx = ctx.restart(); - let wallet_id = ctx.add_wallet(&dashd.wallet.mnemonic); - - ctx.run_with_sync_callbacks(); - ctx.wait_for_sync(dashd.initial_height); - - let (second_balance, _) = ctx.get_wallet_balance(&wallet_id); - let second_header = ctx.tracker().last_header_tip.load(Ordering::SeqCst); - - ctx.tracker().assert_no_errors(); - assert_eq!( - ctx.tracker().last_sync_cycle.load(Ordering::SeqCst), - 0, - "Restart sync should be cycle 0 (fresh client)" - ); - - tracing::info!("Second sync: balance={}, header_tip={}", second_balance, second_header); - - // Verify state is identical - assert_eq!(first_balance, second_balance, "Balance mismatch after restart"); - assert_eq!(first_header, second_header, "Header tip mismatch after restart"); - } -} - /// Verify that multiple transactions sent in quick succession and mined in a single block /// are all detected by the SPV client via FFI. #[test]