Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
715 changes: 554 additions & 161 deletions Cargo.lock

Large diffs are not rendered by default.

19 changes: 12 additions & 7 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "sqd-worker"
license = "AGPL-3.0-or-later"
version = "2.6.1"
version = "2.7.0"
edition = "2021"

[dependencies]
Expand All @@ -15,6 +15,7 @@ bs58 = "0.5.1"
camino = "1.1.6"
clap = { version = "4.4.18", features = ["derive", "env"] }
criterion = { version = "0.5.1", features = ["async_tokio"] }
chrono = "0.4"
crypto_box = "0.9.1"
curve25519-dalek = "4.1.3"
dotenv = "0.15.0"
Expand All @@ -27,10 +28,12 @@ lazy_static = "1.4.0"
mimalloc = "0.1.43"
parking_lot = "0.12.1"
prometheus-client = "0.23"
prost = "0.12.3"
polars-plan = "0.43.1"
polars = { version = "0.43.1", features = ["json", "parquet"] }
prost = "0.13.5"
rand = "0.9.2"
regex = "1.10.2"
reqwest = { version = "0.12.4", features = ["json", "stream"] }
reqwest = { version = "0.12.8", features = ["json", "stream"] }
scopeguard = "1.2.0"
sentry = { version = "0.32.2", features = ["tracing"] }
sentry-tower = { version = "0.32.2", features = ["axum", "http"] }
Expand All @@ -40,6 +43,7 @@ serde_json = { version = "1.0.111", features = ["preserve_order"] }
serde_with = { version = "3.11.0", features = ["base64"] }
sha2 = "0.10.8"
sha3 = "0.10.8"
substrait = { version = "0.48.0", features = ["serde", "parse"] }
thiserror = "1.0.57"
tokio = { version = "1.35.1", features = ["full", "tracing", "test-util"] }
tokio-rusqlite = "0.5.1"
Expand All @@ -53,12 +57,13 @@ url = "2.5.2"
walkdir = "2.5.0"
zstd = "0.13"

sqd-assignments = { git = "https://github.com/subsquid/sqd-network.git", rev = "fd4c694", features = ["reader"] }
sqd-contract-client = { git = "https://github.com/subsquid/sqd-network.git", rev = "fd4c694", version = "1.2.1" }
sqd-messages = { git = "https://github.com/subsquid/sqd-network.git", rev = "fd4c694", version = "2.0.2", features = ["bitstring"] }
sqd-network-transport = { git = "https://github.com/subsquid/sqd-network.git", rev = "fd4c694", version = "3.0.0", features = ["worker", "metrics"] }
sqd-assignments = { git = "https://github.com/subsquid/sqd-network.git", rev = "2a7cb0e", features = ["reader"] }
sqd-contract-client = { git = "https://github.com/subsquid/sqd-network.git", rev = "2a7cb0e", version = "1.2.1" }
sqd-messages = { git = "https://github.com/subsquid/sqd-network.git", rev = "2a7cb0e", version = "2.0.2", features = ["bitstring"] }
sqd-network-transport = { git = "https://github.com/subsquid/sqd-network.git", rev = "2a7cb0e", version = "3.0.0", features = ["worker", "metrics"] }
sqd-query = { git = "https://github.com/subsquid/data.git", rev = "4c089d8", features = ["parquet"] }
sqd-polars = { git = "https://github.com/subsquid/data.git", rev = "4c089d8" }
sql_query_plan = {git = "https://github.com/subsquid/qplan.git", rev = "658f88f" }

[profile.release]
debug = true
Expand Down
2 changes: 2 additions & 0 deletions src/controller/mod.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
pub mod assignments;
pub mod p2p;
pub mod polars_target;
pub mod sql_request;
pub mod worker;
54 changes: 52 additions & 2 deletions src/controller/p2p.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use crate::{
self,
allocations_checker::{self, AllocationsChecker},
},
controller::worker::QueryType,
logs_storage::LogsStorage,
metrics,
query::result::{QueryError, QueryResult},
Expand Down Expand Up @@ -60,6 +61,9 @@ pub struct P2PController<EventStream> {
queries_tx: mpsc::Sender<(PeerId, Query, ResponseChannel<sqd_messages::QueryResult>)>,
queries_rx:
UseOnce<mpsc::Receiver<(PeerId, Query, ResponseChannel<sqd_messages::QueryResult>)>>,
sql_queries_tx: mpsc::Sender<(PeerId, Query, ResponseChannel<sqd_messages::QueryResult>)>,
sql_queries_rx:
UseOnce<mpsc::Receiver<(PeerId, Query, ResponseChannel<sqd_messages::QueryResult>)>>,
log_requests_tx: mpsc::Sender<(LogsRequest, ResponseChannel<QueryLogs>)>,
log_requests_rx: UseOnce<mpsc::Receiver<(LogsRequest, ResponseChannel<QueryLogs>)>>,
}
Expand Down Expand Up @@ -91,6 +95,7 @@ pub async fn create_p2p_controller(
let (event_stream, transport_handle) = transport_builder.build_worker(config).await?;

let (queries_tx, queries_rx) = mpsc::channel(QUERIES_POOL_SIZE);
let (sql_queries_tx, sql_queries_rx) = mpsc::channel(QUERIES_POOL_SIZE);
let (log_requests_tx, log_requests_rx) = mpsc::channel(LOG_REQUESTS_QUEUE_SIZE);

Ok(P2PController {
Expand All @@ -108,6 +113,8 @@ pub async fn create_p2p_controller(
assignment_url: args.assignment_url,
queries_tx,
queries_rx: UseOnce::new(queries_rx),
sql_queries_tx,
sql_queries_rx: UseOnce::new(sql_queries_rx),
log_requests_tx,
log_requests_rx: UseOnce::new(log_requests_rx),
})
Expand All @@ -123,6 +130,10 @@ impl<EventStream: Stream<Item = WorkerEvent> + Send + 'static> P2PController<Eve
let token = cancellation_token.child_token();
let queries_task = tokio::spawn(async move { this.run_queries_loop(token).await });

let this = self.clone();
let token = cancellation_token.child_token();
let sql_queries_task = tokio::spawn(async move { this.run_sql_queries_loop(token).await });

let this = self.clone();
let token = cancellation_token.child_token();
let assignments_task = tokio::spawn(async move {
Expand Down Expand Up @@ -163,6 +174,7 @@ impl<EventStream: Stream<Item = WorkerEvent> + Send + 'static> P2PController<Eve
cancellation_token,
event_task,
queries_task,
sql_queries_task,
assignments_task,
logs_task,
logs_cleanup_task,
Expand All @@ -179,14 +191,31 @@ impl<EventStream: Stream<Item = WorkerEvent> + Send + 'static> P2PController<Eve
.for_each_concurrent(CONCURRENT_QUERY_MESSAGES, |(peer_id, query, resp_chan)| {
let this = self.clone();
tokio::spawn(async move {
this.handle_query(peer_id, query, resp_chan).await;
this.handle_query(peer_id, query, resp_chan, QueryType::PlainQuery)
.await;
})
.map(|r| r.unwrap())
})
.await;
info!("Query processing task finished");
}

async fn run_sql_queries_loop(self: Arc<Self>, cancellation_token: CancellationToken) {
let sql_queries_rx = self.sql_queries_rx.take().unwrap();
ReceiverStream::new(sql_queries_rx)
.take_until(cancellation_token.cancelled_owned())
.for_each_concurrent(CONCURRENT_QUERY_MESSAGES, |(peer_id, query, resp_chan)| {
let this = self.clone();
tokio::spawn(async move {
this.handle_query(peer_id, query, resp_chan, QueryType::SqlQuery)
.await;
})
.map(|r| r.unwrap())
})
.await;
info!("SQL Query processing task finished");
}

async fn run_assignments_loop(
&self,
cancellation_token: CancellationToken,
Expand Down Expand Up @@ -298,6 +327,24 @@ impl<EventStream: Stream<Item = WorkerEvent> + Send + 'static> P2PController<Eve
}
}
}
WorkerEvent::SqlQuery {
peer_id,
query,
resp_chan,
} => {
if !self.validate_query(&query, peer_id) {
continue;
}
match self.sql_queries_tx.try_send((peer_id, query, resp_chan)) {
Ok(_) => {}
Err(mpsc::error::TrySendError::Full(_)) => {
warn!("SQL Queries queue is full. Dropping query from {peer_id}");
}
Err(mpsc::error::TrySendError::Closed(_)) => {
break;
}
}
}
WorkerEvent::LogsRequest { request, resp_chan } => {
match self.log_requests_tx.try_send((request, resp_chan)) {
Ok(_) => {}
Expand Down Expand Up @@ -351,11 +398,12 @@ impl<EventStream: Stream<Item = WorkerEvent> + Send + 'static> P2PController<Eve
peer_id: PeerId,
query: Query,
resp_chan: ResponseChannel<sqd_messages::QueryResult>,
query_type: QueryType,
) {
let query_id = query.query_id.clone();
let compression = query.compression();

let (result, retry_after) = self.process_query(peer_id, &query).await;
let (result, retry_after) = self.process_query(peer_id, &query, query_type).await;
if let Err(e) = &result {
warn!("Query {query_id} by {peer_id} execution failed: {e:?}");
}
Expand Down Expand Up @@ -394,6 +442,7 @@ impl<EventStream: Stream<Item = WorkerEvent> + Send + 'static> P2PController<Eve
&self,
peer_id: PeerId,
query: &Query,
query_type: QueryType,
) -> (QueryResult, Option<Duration>) {
match query.compression {
c if c == sqd_messages::Compression::Gzip as i32
Expand Down Expand Up @@ -428,6 +477,7 @@ impl<EventStream: Stream<Item = WorkerEvent> + Send + 'static> P2PController<Eve
block_range,
&query.chunk_id,
Some(peer_id),
query_type,
)
.await;

Expand Down
Loading
Loading