Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions .github/workflows/spv-integration-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
name: SPV Integration Tests

on:
workflow_dispatch:
push:
branches: [master, 'v**-dev']
pull_request:

env:
DASHVERSION: "23.0.2"
TEST_DATA_REPO: "xdustinface/regtest-blockchain"
TEST_DATA_VERSION: "v0.0.1"
CACHE_DIR: ${{ github.workspace }}/.rust-dashcore-test

jobs:
spv-integration-test:
name: "SPV sync with dashd"
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4

- uses: dtolnay/rust-toolchain@stable

- uses: Swatinem/rust-cache@v2
with:
shared-key: "spv-integration"

- name: Cache test dependencies
uses: actions/cache@v4
with:
path: .rust-dashcore-test
key: rust-dashcore-test-${{ env.DASHVERSION }}-${{ env.TEST_DATA_VERSION }}

- name: Run tests
env:
CACHE_DIR: ${{ github.workspace }}/.rust-dashcore-test
TEST_DATA_REPO: ${{ env.TEST_DATA_REPO }}
TEST_DATA_VERSION: ${{ env.TEST_DATA_VERSION }}
DASHVERSION: ${{ env.DASHVERSION }}
run: |
chmod +x ./contrib/setup-dashd.sh
source ./contrib/setup-dashd.sh
cargo test -p dash-spv --test dashd_sync -- --nocapture
79 changes: 79 additions & 0 deletions contrib/setup-dashd.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#!/usr/bin/env bash
# Setup and download script for dashd and test blockchain data for integration tests.
#
# Usage:
# ./contrib/setup-dashd.sh
#
# Environment variables:
# DASHVERSION - Dash Core version (default: 23.0.2)
# TEST_DATA_VERSION - Test data release version (default: v0.0.1)
# TEST_DATA_REPO - GitHub repo for test data (default: xdustinface/regtest-blockchain)
# CACHE_DIR - Cache directory (default: ~/.cache/rust-dashcore-test)

set -euo pipefail

DASHVERSION="${DASHVERSION:-23.0.2}"
TEST_DATA_VERSION="${TEST_DATA_VERSION:-v0.0.1}"
TEST_DATA_REPO="${TEST_DATA_REPO:-xdustinface/regtest-blockchain}"

CACHE_DIR="${CACHE_DIR:-$HOME/.rust-dashcore-test}"

# Detect platform and set asset name
case "$(uname -s)" in
Linux*)
DASHD_ASSET="dashcore-${DASHVERSION}-x86_64-linux-gnu.tar.gz"
;;
Darwin*)
case "$(uname -m)" in
arm64) DASHD_ASSET="dashcore-${DASHVERSION}-arm64-apple-darwin.tar.gz" ;;
*) DASHD_ASSET="dashcore-${DASHVERSION}-x86_64-apple-darwin.tar.gz" ;;
esac
;;
*)
echo "Unsupported platform: $(uname -s)"
exit 1
;;
esac

mkdir -p "$CACHE_DIR"

# Download dashd if not cached
DASHD_DIR="$CACHE_DIR/dashcore-${DASHVERSION}"
DASHD_BIN="$DASHD_DIR/bin/dashd"
if [ -x "$DASHD_BIN" ]; then
echo "dashd ${DASHVERSION} already available"
else
echo "Downloading dashd ${DASHVERSION}..."
curl -L "https://github.com/dashpay/dash/releases/download/v${DASHVERSION}/${DASHD_ASSET}" \
-o "$CACHE_DIR/${DASHD_ASSET}"
tar -xzf "$CACHE_DIR/${DASHD_ASSET}" -C "$CACHE_DIR"
rm "$CACHE_DIR/${DASHD_ASSET}"
echo "Downloaded dashd to $DASHD_DIR"
fi

# Download test data if not cached
TEST_DATA_DIR="$CACHE_DIR/regtest-blockchain-${TEST_DATA_VERSION}/regtest-1000"
if [ -d "$TEST_DATA_DIR/regtest/blocks" ]; then
echo "Test blockchain data ${TEST_DATA_VERSION} already available"
else
echo "Downloading test blockchain data ${TEST_DATA_VERSION}..."
mkdir -p "$CACHE_DIR/regtest-blockchain-${TEST_DATA_VERSION}"
curl -L "https://github.com/${TEST_DATA_REPO}/releases/download/${TEST_DATA_VERSION}/regtest-1000.tar.gz" \
-o "$CACHE_DIR/regtest-1000.tar.gz"
tar -xzf "$CACHE_DIR/regtest-1000.tar.gz" -C "$CACHE_DIR/regtest-blockchain-${TEST_DATA_VERSION}"
rm "$CACHE_DIR/regtest-1000.tar.gz"
echo "Downloaded test data to $TEST_DATA_DIR"
fi

# Set environment variables
export DASHD_PATH="$DASHD_DIR/bin/dashd"
export DASHD_DATADIR="$TEST_DATA_DIR"

echo ""
echo "Environment configured:"
echo " DASHD_PATH=$DASHD_PATH"
echo " DASHD_DATADIR=$DASHD_DATADIR"
echo ""

# Reset strict mode (important when sourcing)
set +euo pipefail
4 changes: 4 additions & 0 deletions dash-spv/tests/common/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
//! Test utilities for dash-spv integration testing.
pub mod node;

pub use node::{is_dashd_available, DashCoreNode};
259 changes: 259 additions & 0 deletions dash-spv/tests/common/node.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,259 @@
//! Dash Core node harness for integration testing.
//!
//! This starts a dashd instance using existing regtest data providing full protocol support.
use std::net::{Ipv4Addr, SocketAddr};
use std::path::PathBuf;
use std::time::Duration;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::{Child, Command};
use tokio::time::{sleep, timeout};

const REGTEST_P2P_PORT: u16 = 19999;
const REGTEST_RPC_PORT: u16 = 19998;

/// Configuration for Dash Core node
pub struct DashCoreConfig {
/// Path to dashd binary
pub dashd_path: PathBuf,
/// Path to existing datadir with blockchain data
pub datadir: PathBuf,
/// Wallet name to load on startup
pub wallet: String,
}

impl Default for DashCoreConfig {
fn default() -> Self {
let dashd_path = std::env::var("DASHD_PATH")
.map(PathBuf::from)
.expect("DASHD_PATH not set. Run: source ./contrib/setup-dashd.sh");

let datadir = std::env::var("DASHD_DATADIR")
.map(PathBuf::from)
.or_else(|_| {
// Fallback to default cache location from setup-dashd.sh
std::env::var("HOME").map(|h| {
PathBuf::from(h)
.join(".rust-dashcore-test/regtest-blockchain-v0.0.1/regtest-1000")
})
})
.expect("Neither DASHD_DATADIR nor HOME is set");

Self {
dashd_path,
datadir,
wallet: "default".to_string(),
}
}
}

/// Harness for managing a Dash Core node
pub struct DashCoreNode {
config: DashCoreConfig,
process: Option<Child>,
}

impl DashCoreNode {
/// Create a new Dash Core node with custom configuration
pub fn with_config(config: DashCoreConfig) -> Result<Self, Box<dyn std::error::Error>> {
if !config.dashd_path.exists() {
return Err(format!("dashd not found at {:?}", config.dashd_path).into());
}

Ok(Self {
config,
process: None,
})
}

/// Start the Dash Core node
pub async fn start(&mut self) -> Result<SocketAddr, Box<dyn std::error::Error>> {
tracing::info!("Starting dashd...");
tracing::info!(" Binary: {:?}", self.config.dashd_path);
tracing::info!(" Datadir: {:?}", self.config.datadir);
tracing::info!(" P2P port: {}", REGTEST_P2P_PORT);
tracing::info!(" RPC port: {}", REGTEST_RPC_PORT);

// Ensure datadir exists
std::fs::create_dir_all(&self.config.datadir)?;

// Build command arguments
let args_vec = vec![
"-regtest".to_string(),
format!("-datadir={}", self.config.datadir.display()),
format!("-port={}", REGTEST_P2P_PORT),
format!("-rpcport={}", REGTEST_RPC_PORT),
"-server=1".to_string(),
"-daemon=0".to_string(),
"-fallbackfee=0.00001".to_string(),
"-rpcbind=127.0.0.1".to_string(),
"-rpcallowip=127.0.0.1".to_string(),
"-listen=1".to_string(),
"-txindex=0".to_string(),
"-addressindex=0".to_string(),
"-spentindex=0".to_string(),
"-timestampindex=0".to_string(),
"-blockfilterindex=1".to_string(),
"-peerblockfilters=1".to_string(),
"-printtoconsole".to_string(),
format!("-wallet={}", self.config.wallet),
];

// Try running through bash with explicit ulimit
// Use launchctl to set file descriptor limit if on macOS
let script = if cfg!(target_os = "macos") {
format!(
"launchctl limit maxfiles 10000 unlimited 2>/dev/null || true; ulimit -Sn 10000 2>/dev/null || ulimit -n 10000; exec {} {}",
self.config.dashd_path.display(),
args_vec.join(" ")
)
} else {
format!(
"ulimit -n 10000; exec {} {}",
self.config.dashd_path.display(),
args_vec.join(" ")
)
};

let mut child = Command::new("bash")
.arg("-c")
.arg(&script)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()?;

// Spawn task to read stderr for debugging
if let Some(stderr) = child.stderr.take() {
tokio::spawn(async move {
let mut reader = BufReader::new(stderr).lines();
while let Ok(Some(line)) = reader.next_line().await {
tracing::debug!("dashd stderr: {}", line);
}
});
}

self.process = Some(child);

// Wait for node to be ready by checking if port is open
tracing::info!("Waiting for dashd to be ready...");

// First check if process died immediately (e.g., due to lock)
tokio::time::sleep(Duration::from_millis(500)).await;
if let Some(ref mut proc) = self.process {
if let Ok(Some(status)) = proc.try_wait() {
return Err(format!("dashd exited immediately with status: {}", status).into());
}
}

let ready = self.wait_for_ready().await?;
if !ready {
// Try to get exit status if process died
if let Some(ref mut proc) = self.process {
if let Ok(Some(status)) = proc.try_wait() {
return Err(format!("dashd exited with status: {}", status).into());
}
}
return Err("dashd failed to start within timeout".into());
}

// Double-check process is still alive after port check
if let Some(ref mut proc) = self.process {
if let Ok(Some(status)) = proc.try_wait() {
return Err(
format!("dashd died after port became ready, status: {}", status).into()
);
}
}

let addr = SocketAddr::from(([127, 0, 0, 1], REGTEST_P2P_PORT));
tracing::info!("✅ dashd started and ready at {}", addr);

Ok(addr)
}

/// Wait for dashd to be ready by checking if P2P port is accepting connections
async fn wait_for_ready(&self) -> Result<bool, Box<dyn std::error::Error>> {
let max_wait = Duration::from_secs(30);
let check_interval = Duration::from_millis(500);

let result = timeout(max_wait, async {
loop {
let addr = SocketAddr::from((Ipv4Addr::new(127, 0, 0, 1), REGTEST_P2P_PORT));
if tokio::net::TcpStream::connect(addr).await.is_ok() {
tracing::debug!("P2P port is accepting connections");
return true;
}

sleep(check_interval).await;
}
})
.await;

Ok(result.unwrap_or(false))
}

/// Stop the Dash Core node
pub async fn stop(&mut self) {
if let Some(mut process) = self.process.take() {
tracing::info!("Stopping dashd...");

// Try graceful shutdown via RPC if possible
// For now, just kill the process
let _ = process.kill().await;
let _ = process.wait().await;

tracing::info!("✅ dashd stopped");
}
}

/// Get block count via RPC
pub async fn get_block_count(&self) -> Result<u32, Box<dyn std::error::Error>> {
// This would use RPC to get block count
// For now, we'll use dash-cli
let dash_cli = self
.config
.dashd_path
.parent()
.map(|p| p.join("dash-cli"))
.ok_or("Could not find dash-cli")?;

let output = std::process::Command::new(dash_cli)
.arg("-regtest")
.arg(format!("-datadir={}", self.config.datadir.display()))
.arg(format!("-rpcport={}", REGTEST_RPC_PORT))
.arg("getblockcount")
.output()?;

if !output.status.success() {
return Err(
format!("dash-cli failed: {}", String::from_utf8_lossy(&output.stderr)).into()
);
}

let count_str = String::from_utf8(output.stdout)?;
let count_str = count_str.trim();
if count_str.is_empty() {
return Err("Empty response from getblockcount".into());
}
let count = count_str.parse::<u32>()?;
Ok(count)
}
}

impl Drop for DashCoreNode {
fn drop(&mut self) {
if let Some(mut process) = self.process.take() {
tracing::info!("Stopping dashd process in Drop...");

if let Err(e) = process.start_kill() {
tracing::warn!("Failed to kill dashd process: {}", e);
} else {
tracing::info!("✅ dashd process stopped");
}
}
}
}

/// Check if dashd is available (DASHD_PATH env var set and file exists)
pub fn is_dashd_available() -> bool {
std::env::var("DASHD_PATH").map(|p| PathBuf::from(p).exists()).unwrap_or(false)
}
Loading
Loading