diff --git a/.github/workflows/spv-integration-tests.yml b/.github/workflows/spv-integration-tests.yml new file mode 100644 index 000000000..d297fce23 --- /dev/null +++ b/.github/workflows/spv-integration-tests.yml @@ -0,0 +1,44 @@ +name: SPV Integration Tests + +on: + workflow_dispatch: + push: + branches: [master, 'v**-dev'] + pull_request: + +env: + DASHVERSION: "23.0.2" + TEST_DATA_REPO: "xdustinface/regtest-blockchain" + TEST_DATA_VERSION: "v0.0.1" + CACHE_DIR: ${{ github.workspace }}/.rust-dashcore-test + +jobs: + spv-integration-test: + name: "SPV sync with dashd" + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: dtolnay/rust-toolchain@stable + + - uses: Swatinem/rust-cache@v2 + with: + shared-key: "spv-integration" + + - name: Cache test dependencies + uses: actions/cache@v4 + with: + path: .rust-dashcore-test + key: rust-dashcore-test-${{ env.DASHVERSION }}-${{ env.TEST_DATA_VERSION }} + + - name: Run tests + env: + CACHE_DIR: ${{ github.workspace }}/.rust-dashcore-test + TEST_DATA_REPO: ${{ env.TEST_DATA_REPO }} + TEST_DATA_VERSION: ${{ env.TEST_DATA_VERSION }} + DASHVERSION: ${{ env.DASHVERSION }} + run: | + chmod +x ./contrib/setup-dashd.sh + source ./contrib/setup-dashd.sh + cargo test -p dash-spv --test dashd_sync -- --nocapture diff --git a/contrib/setup-dashd.sh b/contrib/setup-dashd.sh new file mode 100755 index 000000000..6cf91be4b --- /dev/null +++ b/contrib/setup-dashd.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# Setup and download script for dashd and test blockchain data for integration tests. +# +# Usage: +# ./contrib/setup-dashd.sh +# +# Environment variables: +# DASHVERSION - Dash Core version (default: 23.0.2) +# TEST_DATA_VERSION - Test data release version (default: v0.0.1) +# TEST_DATA_REPO - GitHub repo for test data (default: xdustinface/regtest-blockchain) +# CACHE_DIR - Cache directory (default: ~/.cache/rust-dashcore-test) + +set -euo pipefail + +DASHVERSION="${DASHVERSION:-23.0.2}" +TEST_DATA_VERSION="${TEST_DATA_VERSION:-v0.0.1}" +TEST_DATA_REPO="${TEST_DATA_REPO:-xdustinface/regtest-blockchain}" + +CACHE_DIR="${CACHE_DIR:-$HOME/.rust-dashcore-test}" + +# Detect platform and set asset name +case "$(uname -s)" in + Linux*) + DASHD_ASSET="dashcore-${DASHVERSION}-x86_64-linux-gnu.tar.gz" + ;; + Darwin*) + case "$(uname -m)" in + arm64) DASHD_ASSET="dashcore-${DASHVERSION}-arm64-apple-darwin.tar.gz" ;; + *) DASHD_ASSET="dashcore-${DASHVERSION}-x86_64-apple-darwin.tar.gz" ;; + esac + ;; + *) + echo "Unsupported platform: $(uname -s)" + exit 1 + ;; +esac + +mkdir -p "$CACHE_DIR" + +# Download dashd if not cached +DASHD_DIR="$CACHE_DIR/dashcore-${DASHVERSION}" +DASHD_BIN="$DASHD_DIR/bin/dashd" +if [ -x "$DASHD_BIN" ]; then + echo "dashd ${DASHVERSION} already available" +else + echo "Downloading dashd ${DASHVERSION}..." + curl -L "https://github.com/dashpay/dash/releases/download/v${DASHVERSION}/${DASHD_ASSET}" \ + -o "$CACHE_DIR/${DASHD_ASSET}" + tar -xzf "$CACHE_DIR/${DASHD_ASSET}" -C "$CACHE_DIR" + rm "$CACHE_DIR/${DASHD_ASSET}" + echo "Downloaded dashd to $DASHD_DIR" +fi + +# Download test data if not cached +TEST_DATA_DIR="$CACHE_DIR/regtest-blockchain-${TEST_DATA_VERSION}/regtest-1000" +if [ -d "$TEST_DATA_DIR/regtest/blocks" ]; then + echo "Test blockchain data ${TEST_DATA_VERSION} already available" +else + echo "Downloading test blockchain data ${TEST_DATA_VERSION}..." + mkdir -p "$CACHE_DIR/regtest-blockchain-${TEST_DATA_VERSION}" + curl -L "https://github.com/${TEST_DATA_REPO}/releases/download/${TEST_DATA_VERSION}/regtest-1000.tar.gz" \ + -o "$CACHE_DIR/regtest-1000.tar.gz" + tar -xzf "$CACHE_DIR/regtest-1000.tar.gz" -C "$CACHE_DIR/regtest-blockchain-${TEST_DATA_VERSION}" + rm "$CACHE_DIR/regtest-1000.tar.gz" + echo "Downloaded test data to $TEST_DATA_DIR" +fi + +# Set environment variables +export DASHD_PATH="$DASHD_DIR/bin/dashd" +export DASHD_DATADIR="$TEST_DATA_DIR" + +echo "" +echo "Environment configured:" +echo " DASHD_PATH=$DASHD_PATH" +echo " DASHD_DATADIR=$DASHD_DATADIR" +echo "" + +# Reset strict mode (important when sourcing) +set +euo pipefail diff --git a/dash-spv/tests/common/mod.rs b/dash-spv/tests/common/mod.rs new file mode 100644 index 000000000..51c6a57c5 --- /dev/null +++ b/dash-spv/tests/common/mod.rs @@ -0,0 +1,4 @@ +//! Test utilities for dash-spv integration testing. +pub mod node; + +pub use node::{is_dashd_available, DashCoreNode}; diff --git a/dash-spv/tests/common/node.rs b/dash-spv/tests/common/node.rs new file mode 100644 index 000000000..110bf6581 --- /dev/null +++ b/dash-spv/tests/common/node.rs @@ -0,0 +1,259 @@ +//! Dash Core node harness for integration testing. +//! +//! This starts a dashd instance using existing regtest data providing full protocol support. +use std::net::{Ipv4Addr, SocketAddr}; +use std::path::PathBuf; +use std::time::Duration; +use tokio::io::{AsyncBufReadExt, BufReader}; +use tokio::process::{Child, Command}; +use tokio::time::{sleep, timeout}; + +const REGTEST_P2P_PORT: u16 = 19999; +const REGTEST_RPC_PORT: u16 = 19998; + +/// Configuration for Dash Core node +pub struct DashCoreConfig { + /// Path to dashd binary + pub dashd_path: PathBuf, + /// Path to existing datadir with blockchain data + pub datadir: PathBuf, + /// Wallet name to load on startup + pub wallet: String, +} + +impl Default for DashCoreConfig { + fn default() -> Self { + let dashd_path = std::env::var("DASHD_PATH") + .map(PathBuf::from) + .expect("DASHD_PATH not set. Run: source ./contrib/setup-dashd.sh"); + + let datadir = std::env::var("DASHD_DATADIR") + .map(PathBuf::from) + .or_else(|_| { + // Fallback to default cache location from setup-dashd.sh + std::env::var("HOME").map(|h| { + PathBuf::from(h) + .join(".rust-dashcore-test/regtest-blockchain-v0.0.1/regtest-1000") + }) + }) + .expect("Neither DASHD_DATADIR nor HOME is set"); + + Self { + dashd_path, + datadir, + wallet: "default".to_string(), + } + } +} + +/// Harness for managing a Dash Core node +pub struct DashCoreNode { + config: DashCoreConfig, + process: Option, +} + +impl DashCoreNode { + /// Create a new Dash Core node with custom configuration + pub fn with_config(config: DashCoreConfig) -> Result> { + if !config.dashd_path.exists() { + return Err(format!("dashd not found at {:?}", config.dashd_path).into()); + } + + Ok(Self { + config, + process: None, + }) + } + + /// Start the Dash Core node + pub async fn start(&mut self) -> Result> { + tracing::info!("Starting dashd..."); + tracing::info!(" Binary: {:?}", self.config.dashd_path); + tracing::info!(" Datadir: {:?}", self.config.datadir); + tracing::info!(" P2P port: {}", REGTEST_P2P_PORT); + tracing::info!(" RPC port: {}", REGTEST_RPC_PORT); + + // Ensure datadir exists + std::fs::create_dir_all(&self.config.datadir)?; + + // Build command arguments + let args_vec = vec![ + "-regtest".to_string(), + format!("-datadir={}", self.config.datadir.display()), + format!("-port={}", REGTEST_P2P_PORT), + format!("-rpcport={}", REGTEST_RPC_PORT), + "-server=1".to_string(), + "-daemon=0".to_string(), + "-fallbackfee=0.00001".to_string(), + "-rpcbind=127.0.0.1".to_string(), + "-rpcallowip=127.0.0.1".to_string(), + "-listen=1".to_string(), + "-txindex=0".to_string(), + "-addressindex=0".to_string(), + "-spentindex=0".to_string(), + "-timestampindex=0".to_string(), + "-blockfilterindex=1".to_string(), + "-peerblockfilters=1".to_string(), + "-printtoconsole".to_string(), + format!("-wallet={}", self.config.wallet), + ]; + + // Try running through bash with explicit ulimit + // Use launchctl to set file descriptor limit if on macOS + let script = if cfg!(target_os = "macos") { + format!( + "launchctl limit maxfiles 10000 unlimited 2>/dev/null || true; ulimit -Sn 10000 2>/dev/null || ulimit -n 10000; exec {} {}", + self.config.dashd_path.display(), + args_vec.join(" ") + ) + } else { + format!( + "ulimit -n 10000; exec {} {}", + self.config.dashd_path.display(), + args_vec.join(" ") + ) + }; + + let mut child = Command::new("bash") + .arg("-c") + .arg(&script) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn()?; + + // Spawn task to read stderr for debugging + if let Some(stderr) = child.stderr.take() { + tokio::spawn(async move { + let mut reader = BufReader::new(stderr).lines(); + while let Ok(Some(line)) = reader.next_line().await { + tracing::debug!("dashd stderr: {}", line); + } + }); + } + + self.process = Some(child); + + // Wait for node to be ready by checking if port is open + tracing::info!("Waiting for dashd to be ready..."); + + // First check if process died immediately (e.g., due to lock) + tokio::time::sleep(Duration::from_millis(500)).await; + if let Some(ref mut proc) = self.process { + if let Ok(Some(status)) = proc.try_wait() { + return Err(format!("dashd exited immediately with status: {}", status).into()); + } + } + + let ready = self.wait_for_ready().await?; + if !ready { + // Try to get exit status if process died + if let Some(ref mut proc) = self.process { + if let Ok(Some(status)) = proc.try_wait() { + return Err(format!("dashd exited with status: {}", status).into()); + } + } + return Err("dashd failed to start within timeout".into()); + } + + // Double-check process is still alive after port check + if let Some(ref mut proc) = self.process { + if let Ok(Some(status)) = proc.try_wait() { + return Err( + format!("dashd died after port became ready, status: {}", status).into() + ); + } + } + + let addr = SocketAddr::from(([127, 0, 0, 1], REGTEST_P2P_PORT)); + tracing::info!("✅ dashd started and ready at {}", addr); + + Ok(addr) + } + + /// Wait for dashd to be ready by checking if P2P port is accepting connections + async fn wait_for_ready(&self) -> Result> { + let max_wait = Duration::from_secs(30); + let check_interval = Duration::from_millis(500); + + let result = timeout(max_wait, async { + loop { + let addr = SocketAddr::from((Ipv4Addr::new(127, 0, 0, 1), REGTEST_P2P_PORT)); + if tokio::net::TcpStream::connect(addr).await.is_ok() { + tracing::debug!("P2P port is accepting connections"); + return true; + } + + sleep(check_interval).await; + } + }) + .await; + + Ok(result.unwrap_or(false)) + } + + /// Stop the Dash Core node + pub async fn stop(&mut self) { + if let Some(mut process) = self.process.take() { + tracing::info!("Stopping dashd..."); + + // Try graceful shutdown via RPC if possible + // For now, just kill the process + let _ = process.kill().await; + let _ = process.wait().await; + + tracing::info!("✅ dashd stopped"); + } + } + + /// Get block count via RPC + pub async fn get_block_count(&self) -> Result> { + // This would use RPC to get block count + // For now, we'll use dash-cli + let dash_cli = self + .config + .dashd_path + .parent() + .map(|p| p.join("dash-cli")) + .ok_or("Could not find dash-cli")?; + + let output = std::process::Command::new(dash_cli) + .arg("-regtest") + .arg(format!("-datadir={}", self.config.datadir.display())) + .arg(format!("-rpcport={}", REGTEST_RPC_PORT)) + .arg("getblockcount") + .output()?; + + if !output.status.success() { + return Err( + format!("dash-cli failed: {}", String::from_utf8_lossy(&output.stderr)).into() + ); + } + + let count_str = String::from_utf8(output.stdout)?; + let count_str = count_str.trim(); + if count_str.is_empty() { + return Err("Empty response from getblockcount".into()); + } + let count = count_str.parse::()?; + Ok(count) + } +} + +impl Drop for DashCoreNode { + fn drop(&mut self) { + if let Some(mut process) = self.process.take() { + tracing::info!("Stopping dashd process in Drop..."); + + if let Err(e) = process.start_kill() { + tracing::warn!("Failed to kill dashd process: {}", e); + } else { + tracing::info!("✅ dashd process stopped"); + } + } + } +} + +/// Check if dashd is available (DASHD_PATH env var set and file exists) +pub fn is_dashd_available() -> bool { + std::env::var("DASHD_PATH").map(|p| PathBuf::from(p).exists()).unwrap_or(false) +} diff --git a/dash-spv/tests/dashd_sync.rs b/dash-spv/tests/dashd_sync.rs new file mode 100644 index 000000000..34a982fa3 --- /dev/null +++ b/dash-spv/tests/dashd_sync.rs @@ -0,0 +1,388 @@ +//! SPV sync tests using dashd. +//! +//! These tests demonstrate realistic SPV sync scenarios against a dashd instance. +mod common; + +use common::{is_dashd_available, DashCoreNode}; +use dash_spv::{ + client::{config::MempoolStrategy, ClientConfig, DashSpvClient}, + network::PeerNetworkManager, + storage::MemoryStorageManager, + types::ValidationMode, + LevelFilter, Network, +}; +use key_wallet::wallet::initialization::WalletAccountCreationOptions; +use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; +use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; +use key_wallet::Network as WalletNetwork; +use key_wallet_manager::wallet_manager::WalletManager; +use serde::Deserialize; +use std::fs::{self, File}; +use std::io::Write; +use std::process::Command; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::RwLock; +use tokio_util::sync::CancellationToken; +use tracing::{info, warn}; + +/// Wallet file structure (individual wallet JSON) +#[derive(Debug, Deserialize)] +struct WalletFile { + wallet_name: String, + mnemonic: String, + balance: f64, + transaction_count: usize, + utxo_count: usize, + transactions: Vec, + utxos: Vec, +} + +/// Helper function to load light wallet from test data +fn load_light_wallet( + test_data_dir: &std::path::Path, +) -> Result> { + let wallet_path = test_data_dir.join("wallets/light.json"); + + let json_content = fs::read_to_string(&wallet_path)?; + let wallet_file: WalletFile = serde_json::from_str(&json_content)?; + + Ok(wallet_file) +} + +fn kill_all_dashd() { + // Kill any existing dashd processes + let _ = Command::new("pkill").arg("-9").arg("-x").arg("dashd").output(); + // Wait a moment for processes to die + std::thread::sleep(Duration::from_millis(500)); +} + +#[tokio::test] +async fn test_wallet_sync() { + let _guard = + dash_spv::init_console_logging(LevelFilter::DEBUG).expect("Failed to initialize logging"); + kill_all_dashd(); + + // Skip if dashd is not available + if !is_dashd_available() { + warn!("dashd not available, skipping test"); + return; + } + + // Create config with light wallet + let config = common::node::DashCoreConfig { + wallet: "light".to_string(), + ..Default::default() + }; + info!("Using datadir: {:?}", config.datadir); + + // Load light wallet from test data + let light_wallet = load_light_wallet(&config.datadir).expect("Failed to load light wallet"); + assert_eq!(light_wallet.wallet_name, "light", "Unexpected wallet name"); + info!( + "Loaded '{}' wallet with {} transactions, {} UTXOs, balance: {:.8} DASH", + light_wallet.wallet_name, + light_wallet.transaction_count, + light_wallet.utxo_count, + light_wallet.balance + ); + + let mut node = DashCoreNode::with_config(config).expect( + "Failed to create DashCoreNode. Check that dashd binary exists at the configured path.", + ); + + let addr = node.start().await.expect( + "Failed to start dashd. This test requires dashd to run. \ + On macOS, you may need to increase file descriptor limits: \ + sudo launchctl limit maxfiles 65536 200000 && ulimit -n 10000", + ); + info!("DashCoreNode started at {}", addr); + + // Get expected block count from dashd + let expected_height = + node.get_block_count().await.expect("Failed to get block count from dashd"); + info!("Dashd has {} blocks", expected_height); + + // Create SPV client configuration + let mut config = ClientConfig::new(Network::Regtest) + .with_validation_mode(ValidationMode::Basic) + .with_connection_timeout(Duration::from_secs(30)) + .with_mempool_tracking(MempoolStrategy::BloomFilter) + .without_masternodes(); // Regtest doesn't have masternodes/quorums + + config.peers.clear(); + config.peers.push(addr); + + // Create network and storage managers + let network_manager = + PeerNetworkManager::new(&config).await.expect("Failed to create network manager"); + let storage_manager = + MemoryStorageManager::new().await.expect("Failed to create storage manager"); + + // Create wallet from mnemonic + let wallet_network = WalletNetwork::Regtest; + let mut wallet_manager = WalletManager::::new(); + let wallet_id = wallet_manager + .create_wallet_from_mnemonic( + &light_wallet.mnemonic, + "", // No passphrase + &[wallet_network], + None, // birth_height + WalletAccountCreationOptions::SpecificAccounts( + { + let mut accounts = std::collections::BTreeSet::new(); + accounts.insert(0); // Create only BIP44 account 0 + accounts + }, + std::collections::BTreeSet::new(), // No BIP32 accounts + std::collections::BTreeSet::new(), // No CoinJoin accounts + std::collections::BTreeSet::new(), // No identity top-up accounts + None, // No additional special accounts + ), + ) + .expect("Failed to create wallet from mnemonic"); + info!("Created wallet from mnemonic, ID: {:?}", wallet_id); + + let wallet = Arc::new(RwLock::new(wallet_manager)); + + // Create SPV client + let mut client = DashSpvClient::new(config, network_manager, storage_manager, wallet.clone()) + .await + .expect("Failed to create SPV client"); + + // Start syncing + info!("Starting SPV client sync..."); + client.start().await.expect("Failed to start SPV client"); + + // Take the progress receiver + let mut progress_receiver = + client.take_progress_receiver().expect("Progress receiver should be available"); + + let token = CancellationToken::new(); + let monitor_token = token.clone(); + let (_command_sender, command_receiver) = tokio::sync::mpsc::unbounded_channel(); + // Spawn monitor_network() in background + info!("Starting network monitoring task..."); + let monitor_handle = tokio::task::spawn(async move { + if let Err(e) = client.monitor_network(command_receiver, monitor_token).await { + warn!("Monitor network error: {}", e); + } + client + }); + + // Wait for sync to complete + info!("Waiting for sync to complete (expected height: {})...", expected_height); + let timeout = tokio::time::sleep(Duration::from_secs(120)); + tokio::pin!(timeout); + let mut last_height = None; + + let final_progress = loop { + tokio::select! { + _ = &mut timeout => { + panic!( + "SPV client sync timeout after 120 seconds at height {:?}", + last_height + ); + } + progress = progress_receiver.recv() => { + match progress { + Some(progress) => { + let height = progress.sync_progress.header_height; + + // Log progress when height changes + if last_height != Some(height) { + info!( + "Sync progress: {}/{} headers ({:.1}%) - Stage: {:?}", + height, expected_height, progress.percentage, progress.sync_stage + ); + last_height = Some(height); + } + + // Check if sync is complete + if progress.sync_stage == dash_spv::types::SyncStage::Complete { + info!( + "Sync completed! Headers: {}, Filter headers: {}, Filters: {}", + progress.sync_progress.header_height, + progress.sync_progress.filter_header_height, + progress.sync_progress.filters_downloaded + ); + break progress.sync_progress; + } + + // Check for failed state + if let dash_spv::types::SyncStage::Failed(reason) = &progress.sync_stage { + panic!("Sync failed: {}", reason); + } + } + None => { + panic!("Progress channel closed unexpectedly"); + } + } + } + } + }; + + // Abort the monitoring task + info!("Aborting network monitoring task..."); + token.cancel(); + let (result,) = tokio::join!(monitor_handle); + assert!(result.is_ok(), "Monitor network task failed"); + + // Validate sync results + info!("=== Validation ==="); + + assert_eq!(final_progress.header_height, expected_height, "Header height mismatch"); + info!("Header height matches: {}", final_progress.header_height); + + assert_eq!( + final_progress.filter_header_height, expected_height, + "Filter header height mismatch" + ); + info!("Filter header height matches: {}", final_progress.filter_header_height); + + assert!(final_progress.peer_count > 0, "No peers connected"); + info!("Connected to {} peer(s)", final_progress.peer_count); + + // Get the read lock of the wallet + let wallet_read = wallet.read().await; + + // Validate wallet data + let wallet_info = wallet_read.get_wallet_info(&wallet_id).expect("Wallet info not found"); + + // Get SPV UTXOs and write to file for comparison + { + let utxos = wallet_info.get_utxos(wallet_network); + + let mut spv_utxos: Vec = utxos + .iter() + .map(|(outpoint, _utxo)| format!("{}:{}", outpoint.txid, outpoint.vout)) + .collect(); + spv_utxos.sort(); + + let mut file = File::create("/tmp/spv_utxos.txt").expect("Failed to create SPV UTXOs file"); + for utxo in &spv_utxos { + writeln!(file, "{}", utxo).expect("Failed to write UTXO"); + } + info!("Wrote {} SPV UTXOs to /tmp/spv_utxos.txt", spv_utxos.len()); + } + + // Get all SPV transaction IDs + let mut spv_txids = std::collections::HashSet::new(); + if let Some(managed_collection) = wallet_info.accounts(wallet_network) { + for managed_account in managed_collection.all_accounts() { + for txid in managed_account.transactions.keys() { + spv_txids.insert(txid.to_string()); + } + } + } + // Add all immature transactions + let immature = wallet_info.immature_transactions(wallet_network).unwrap().all(); + for tx in immature { + spv_txids.insert(tx.txid.to_string()); + } + + // Get expected transaction IDs from JSON + let mut expected_txids = std::collections::HashSet::new(); + for tx in &light_wallet.transactions { + if let Some(txid) = tx.get("txid").and_then(|v| v.as_str()) { + expected_txids.insert(txid.to_string()); + } + } + + info!("Transaction comparison:"); + info!(" SPV found: {} transactions", spv_txids.len()); + info!(" Expected: {} transactions", expected_txids.len()); + info!(" JSON tx_count: {}", light_wallet.transaction_count); + + // Export SPV txids to file + { + let mut file = + File::create("/tmp/spv_txids_actual.txt").expect("Failed to create SPV txids file"); + let mut sorted_spv: Vec<_> = spv_txids.iter().map(|s| s.as_str()).collect(); + sorted_spv.sort(); + for txid in sorted_spv { + writeln!(file, "{}", txid).expect("Failed to write txid"); + } + info!("Wrote {} SPV transaction IDs to /tmp/spv_txids_actual.txt", spv_txids.len()); + } + + // Find missing and extra transactions + let missing_txids: Vec<_> = expected_txids.difference(&spv_txids).collect(); + let extra_txids: Vec<_> = spv_txids.difference(&expected_txids).collect(); + + if !missing_txids.is_empty() { + warn!("Missing {} transactions in SPV wallet:", missing_txids.len()); + for txid in missing_txids.iter().take(10) { + warn!(" {}", txid); + } + if missing_txids.len() > 10 { + warn!(" ... and {} more", missing_txids.len() - 10); + } + + // Export missing txids to file + let mut file = + File::create("/tmp/missing_txids.txt").expect("Failed to create missing txids file"); + let mut sorted_missing: Vec<_> = missing_txids.iter().map(|s| s.as_str()).collect(); + sorted_missing.sort(); + for txid in sorted_missing { + writeln!(file, "{}", txid).expect("Failed to write txid"); + } + info!("Wrote {} missing transaction IDs to /tmp/missing_txids.txt", missing_txids.len()); + } + + if !extra_txids.is_empty() { + warn!("Extra {} transactions in SPV wallet:", extra_txids.len()); + for txid in extra_txids.iter().take(10) { + warn!(" {}", txid); + } + if extra_txids.len() > 10 { + warn!(" ... and {} more", extra_txids.len() - 10); + } + } + + // Assert transaction count matches + assert_eq!( + spv_txids.len(), + expected_txids.len(), + "Transaction count mismatch: SPV has {}, expected {}", + spv_txids.len(), + expected_txids.len() + ); + + // Assert all expected transactions are present + assert!(missing_txids.is_empty(), "SPV wallet is missing {} transactions", missing_txids.len()); + + // Assert no unexpected transactions + assert!(extra_txids.is_empty(), "SPV wallet has {} unexpected transactions", extra_txids.len()); + + info!("All {} transactions match expected set", spv_txids.len()); + + // Check wallet balance + let balance = wallet_read.get_wallet_balance(&wallet_id).expect("Failed to get wallet balance"); + + info!( + "SPV Wallet balance: {} satoshis ({:.8} DASH)", + balance.total, + balance.total as f64 / 100_000_000.0 + ); + + let expected = light_wallet + .utxos + .iter() + .filter_map(|u| u.get("amount").and_then(|v| v.as_f64())) + .map(|dash| (dash * 100_000_000.0) as u64) + .sum::(); + info!("Expected balance: {} satoshis ({:.8} DASH)", expected, expected as f64 / 100_000_000.0); + + assert_eq!( + balance.total, expected, + "Wallet balance mismatch: SPV has {}, expected {}", + balance.total, expected + ); + info!("Balance matches expected value from JSON"); + + // Cleanup + node.stop().await; + + info!("Full sync validation test completed successfully"); +}