Skip to content

Commit 80b33ab

Browse files
committed
created benches with criterion and moved one performance test to it
1 parent f9234fd commit 80b33ab

File tree

5 files changed

+101
-45
lines changed

5 files changed

+101
-45
lines changed

TEST_SUMMARY.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ I have successfully implemented comprehensive unit tests for several critical da
8080
- State management and persistence
8181
- Concurrent operations and thread safety
8282
- Edge cases and error scenarios
83-
- Performance considerations
83+
- dash-spc/src considerations
8484

8585
2. **Test Quality**: All tests follow best practices:
8686
- Clear test names describing what is being tested

UNIFIED_SDK.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ dash_sdk_init()
6767
1. **Size Reduction**: 79.4% smaller than separate libraries
6868
2. **No Symbol Conflicts**: Shared dependencies included only once
6969
3. **Simplified Distribution**: Single XCFramework to manage
70-
4. **Better Performance**: Reduced memory footprint and faster load times
70+
4. **Better dash-spv/src**: Reduced memory footprint and faster load times
7171
5. **Easier Maintenance**: One build process for all functionality
7272

7373
## Compatibility

dash-spv/Cargo.toml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,17 @@ hickory-resolver = "0.25"
5959
log = "0.4"
6060

6161
[dev-dependencies]
62+
criterion = { version = "0.8.1", features = ["async_tokio"] }
6263
tempfile = "3.0"
6364
tokio-test = "0.4"
6465
env_logger = "0.10"
6566
hex = "0.4"
6667
test-case = "3.3"
6768

69+
[[bench]]
70+
name = "storage"
71+
harness = false
72+
6873
[[bin]]
6974
name = "dash-spv"
7075
path = "src/main.rs"

dash-spv/benches/storage.rs

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
use std::time::Duration;
2+
3+
use criterion::{criterion_group, criterion_main, BatchSize, Criterion};
4+
use dash_spv::{
5+
storage::{DiskStorageManager, StorageManager},
6+
Hash,
7+
};
8+
use dashcore::{block::Version, BlockHash, CompactTarget, Header};
9+
use tempfile::TempDir;
10+
use tokio::runtime::Builder;
11+
12+
fn create_test_header(height: u32) -> Header {
13+
Header {
14+
version: Version::from_consensus(1),
15+
prev_blockhash: BlockHash::all_zeros(),
16+
merkle_root: dashcore_hashes::sha256d::Hash::all_zeros().into(),
17+
time: height,
18+
bits: CompactTarget::from_consensus(0x207fffff),
19+
nonce: height,
20+
}
21+
}
22+
23+
fn bench_disk_storage(c: &mut Criterion) {
24+
const CHUNK_SIZE: u32 = 13_000;
25+
const NUM_ELEMENTS: u32 = CHUNK_SIZE * 20;
26+
27+
let rt = Builder::new_multi_thread().worker_threads(4).enable_all().build().unwrap();
28+
29+
let headers = (0..NUM_ELEMENTS).map(create_test_header).collect::<Vec<Header>>();
30+
31+
c.bench_function("storage/disk/store", |b| {
32+
b.to_async(&rt).iter_batched(
33+
|| async {
34+
DiskStorageManager::new(TempDir::new().unwrap().path().to_path_buf()).await.unwrap()
35+
},
36+
|a| async {
37+
let mut storage = a.await;
38+
39+
for chunk in headers.chunks(CHUNK_SIZE as usize) {
40+
storage.store_headers(chunk).await.unwrap();
41+
}
42+
},
43+
BatchSize::SmallInput,
44+
)
45+
});
46+
47+
let temp_dir = TempDir::new().unwrap();
48+
49+
let mut storage = rt.block_on(async {
50+
let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await.unwrap();
51+
52+
for chunk in headers.chunks(CHUNK_SIZE as usize) {
53+
storage.store_headers(chunk).await.unwrap();
54+
}
55+
56+
storage
57+
});
58+
59+
c.bench_function("storage/disk/get", |b| {
60+
b.to_async(&rt).iter_batched(
61+
|| rand::random::<u32>() % NUM_ELEMENTS,
62+
async |height| {
63+
let _ = storage.get_header(height).await.unwrap();
64+
},
65+
BatchSize::SmallInput,
66+
)
67+
});
68+
69+
c.bench_function("storage/disk/reverse_index", |b| {
70+
b.to_async(&rt).iter_batched(
71+
|| {
72+
let height = rand::random::<u32>() % NUM_ELEMENTS;
73+
headers[height as usize].block_hash()
74+
},
75+
async |hash| {
76+
let _ = storage.get_header_height_by_hash(&hash).await.unwrap();
77+
},
78+
BatchSize::SmallInput,
79+
)
80+
});
81+
82+
rt.block_on(async {
83+
storage.shutdown().await;
84+
});
85+
}
86+
87+
criterion_group!(
88+
name = disk_storage;
89+
config = Criterion::default()
90+
.sample_size(10)
91+
.warm_up_time(Duration::from_secs(1));
92+
targets = bench_disk_storage);
93+
criterion_main!(disk_storage);

dash-spv/tests/segmented_storage_test.rs

Lines changed: 1 addition & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use dashcore::pow::CompactTarget;
77
use dashcore::BlockHash;
88
use dashcore_hashes::Hash;
99
use std::sync::Arc;
10-
use std::time::{Duration, Instant};
10+
use std::time::Duration;
1111
use tempfile::TempDir;
1212
use tokio::time::sleep;
1313

@@ -383,45 +383,3 @@ async fn test_filter_header_persistence() {
383383
assert_eq!(loaded[3], create_test_filter_header(50_001));
384384
}
385385
}
386-
387-
#[tokio::test]
388-
async fn test_performance_improvement() {
389-
let temp_dir = TempDir::new().unwrap();
390-
let mut storage = DiskStorageManager::new(temp_dir.path().to_path_buf()).await.unwrap();
391-
392-
// Store a large number of headers
393-
let headers: Vec<BlockHeader> = (0..200_000).map(create_test_header).collect();
394-
395-
let start = Instant::now();
396-
for chunk in headers.chunks(10_000) {
397-
storage.store_headers(chunk).await.unwrap();
398-
}
399-
let store_time = start.elapsed();
400-
401-
println!("Stored 200,000 headers in {:?}", store_time);
402-
403-
// Test random access performance
404-
let start = Instant::now();
405-
for _ in 0..1000 {
406-
let height = rand::random::<u32>() % 200_000;
407-
let _ = storage.get_header(height).await.unwrap();
408-
}
409-
let access_time = start.elapsed();
410-
411-
println!("1000 random accesses in {:?}", access_time);
412-
assert!(access_time < Duration::from_secs(1), "Random access should be fast");
413-
414-
// Test reverse index performance
415-
let start = Instant::now();
416-
for _ in 0..1000 {
417-
let height = rand::random::<u32>() % 200_000;
418-
let hash = headers[height as usize].block_hash();
419-
let _ = storage.get_header_height_by_hash(&hash).await.unwrap();
420-
}
421-
let lookup_time = start.elapsed();
422-
423-
println!("1000 hash lookups in {:?}", lookup_time);
424-
assert!(lookup_time < Duration::from_secs(1), "Hash lookups should be fast");
425-
426-
storage.shutdown().await;
427-
}

0 commit comments

Comments
 (0)