diff --git a/Cargo.lock b/Cargo.lock index 3598a3ddb..4d5a3aab0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -848,6 +848,15 @@ version = "6.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d5dde061bd34119e902bbb2d9b90c5692635cf59fb91d582c2b68043f1b8293" +[[package]] +name = "array-util" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e509844de8f09b90a2c3444684a2b6695f4071360e13d2fda0af9f749cc2ed6" +dependencies = [ + "arrayvec 0.7.6", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -1137,6 +1146,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "autocfg" version = "1.5.0" @@ -1175,6 +1195,7 @@ dependencies = [ "async-channel 1.9.0", "avail-base", "avail-core", + "avail-fri", "avail-observability", "base64", "clap 4.5.53", @@ -1229,7 +1250,7 @@ dependencies = [ [[package]] name = "avail-core" version = "0.6.2" -source = "git+https://github.com/availproject/avail-core.git?rev=b591f8d2a8868fd782b133fcd028a22ddd597293#b591f8d2a8868fd782b133fcd028a22ddd597293" +source = "git+https://github.com/availproject/avail-core.git?rev=93a52cd542b13a4d500c5e9febf93af6b1668498#93a52cd542b13a4d500c5e9febf93af6b1668498" dependencies = [ "binary-merkle-tree", "blake2b_simd", @@ -1260,6 +1281,24 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "avail-fri" +version = "0.1.0" +source = "git+https://github.com/availproject/avail-core.git?rev=93a52cd542b13a4d500c5e9febf93af6b1668498#93a52cd542b13a4d500c5e9febf93af6b1668498" +dependencies = [ + "avail-core", + "binius-field", + "binius-math", + "binius-prover", + "binius-transcript", + "binius-verifier", + "blake2b_simd", + "log", + "parity-scale-codec", + "rand_chacha 0.3.1", + "serde", +] + [[package]] name = "avail-node" version = "0.1.0" @@ -1529,6 +1568,120 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "binius-core" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-utils", + "bytemuck", + "bytes", + "thiserror 2.0.17", +] + +[[package]] +name = "binius-field" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-utils", + "bytemuck", + "cfg-if", + "derive_more 0.99.20", + "rand 0.9.2", + "seq-macro", + "thiserror 2.0.17", +] + +[[package]] +name = "binius-math" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-field", + "binius-utils", + "bytemuck", + "getset", + "itertools 0.14.0", + "rand 0.9.2", + "thiserror 2.0.17", + "tracing", + "uninit", +] + +[[package]] +name = "binius-prover" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-core", + "binius-field", + "binius-math", + "binius-transcript", + "binius-utils", + "binius-verifier", + "bytemuck", + "bytes", + "derive_more 0.99.20", + "digest 0.10.7", + "either", + "getset", + "itertools 0.14.0", + "rand 0.9.2", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "binius-transcript" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "auto_impl", + "binius-field", + "binius-utils", + "bytes", + "digest 0.10.7", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "binius-utils" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "array-util", + "bytemuck", + "bytes", + "cfg-if", + "generic-array 0.14.7", + "itertools 0.14.0", + "rayon", + "thiserror 2.0.17", + "trait-set", +] + +[[package]] +name = "binius-verifier" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-core", + "binius-field", + "binius-math", + "binius-transcript", + "binius-utils", + "bytemuck", + "bytes", + "digest 0.10.7", + "getset", + "itertools 0.14.0", + "sha2 0.10.9", + "thiserror 2.0.17", + "tracing", +] + [[package]] name = "bip32" version = "0.5.3" @@ -2818,9 +2971,11 @@ name = "da-commitment" version = "0.1.0" dependencies = [ "anyhow", + "avail-fri", "divan", "kate", "log", + "primitive-types 0.13.1", "thiserror-no-std", ] @@ -2870,6 +3025,7 @@ dependencies = [ "binary-merkle-tree", "bounded-collections", "criterion", + "da-commitment", "da-control", "derive_more 0.99.20", "divan", @@ -4618,6 +4774,18 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "ghash" version = "0.5.1" @@ -6003,7 +6171,7 @@ dependencies = [ [[package]] name = "kate" version = "0.9.2" -source = "git+https://github.com/availproject/avail-core.git?rev=b591f8d2a8868fd782b133fcd028a22ddd597293#b591f8d2a8868fd782b133fcd028a22ddd597293" +source = "git+https://github.com/availproject/avail-core.git?rev=93a52cd542b13a4d500c5e9febf93af6b1668498#93a52cd542b13a4d500c5e9febf93af6b1668498" dependencies = [ "avail-core", "derive_more 0.99.20", @@ -6029,7 +6197,7 @@ dependencies = [ [[package]] name = "kate-recovery" version = "0.10.0" -source = "git+https://github.com/availproject/avail-core.git?rev=b591f8d2a8868fd782b133fcd028a22ddd597293#b591f8d2a8868fd782b133fcd028a22ddd597293" +source = "git+https://github.com/availproject/avail-core.git?rev=93a52cd542b13a4d500c5e9febf93af6b1668498#93a52cd542b13a4d500c5e9febf93af6b1668498" dependencies = [ "avail-core", "derive_more 0.99.20", @@ -11764,6 +11932,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +[[package]] +name = "seq-macro" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc711410fbe7399f390ca1c3b60ad0f53f80e95c5eb935e52268a0e2cd49acc" + [[package]] name = "serde" version = "1.0.228" @@ -14603,6 +14777,17 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "trait-set" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b79e2e9c9ab44c6d7c20d5976961b47e8f49ac199154daa514b77cd1ab536625" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "trie-db" version = "0.24.0" @@ -14813,6 +14998,12 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "uninit" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "359fdaaabedff944f100847f2e0ea88918d8012fe64baf5b54c191ad010168c9" + [[package]] name = "universal-hash" version = "0.5.1" @@ -16308,8 +16499,3 @@ dependencies = [ "cc", "pkg-config", ] - -[[patch.unused]] -name = "sp-crypto-ec-utils" -version = "0.18.0" -source = "git+https://github.com/availproject/polkadot-sdk.git?rev=ff3c47f6c3f52c0b1b919bc56cd3108c17cce822#ff3c47f6c3f52c0b1b919bc56cd3108c17cce822" diff --git a/Cargo.toml b/Cargo.toml index 7ac958a8f..0713c80d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,10 +32,12 @@ homepage = "https://www.availproject.org/" [workspace.dependencies] -avail-core = { git = "https://github.com/availproject/avail-core.git", rev = "b591f8d2a8868fd782b133fcd028a22ddd597293", default-features = false, features = [ "runtime"] } -kate = { git = "https://github.com/availproject/avail-core", rev = "b591f8d2a8868fd782b133fcd028a22ddd597293", default-features = false } -kate-recovery = { git = "https://github.com/availproject/avail-core", rev = "b591f8d2a8868fd782b133fcd028a22ddd597293", default-features = false } +avail-core = { git = "https://github.com/availproject/avail-core.git", rev = "93a52cd542b13a4d500c5e9febf93af6b1668498", default-features = false, features = [ "runtime"] } +avail-fri = { git = "https://github.com/availproject/avail-core.git", rev = "93a52cd542b13a4d500c5e9febf93af6b1668498", default-features = false } +kate = { git = "https://github.com/availproject/avail-core", rev = "93a52cd542b13a4d500c5e9febf93af6b1668498", default-features = false } +kate-recovery = { git = "https://github.com/availproject/avail-core", rev = "93a52cd542b13a4d500c5e9febf93af6b1668498", default-features = false } # avail-core = { path = "../avail-core/core", default-features = false, features = [ "runtime"] } +# avail-fri = { path = "../avail-core/fri", default-features = false } # kate = { path = "../avail-core/kate/", default-features = false } # kate-recovery = { path = "../avail-core/kate/recovery/", default-features = false} @@ -65,7 +67,7 @@ iai-callgrind = "0.7.3" divan = "0.1.11" # Logging and testing -log = "0.4.20" +log = { version = "0.4.20", default-features = false } test-case = "1.2.3" hex = "0.4" const-hex = { version = "1.14.1", default-features = false, features = ["alloc"] } @@ -260,8 +262,8 @@ frame-system = { path = "pallets/system" } frame-system-benchmarking = { path = "pallets/system/benchmarking" } frame-system-rpc-runtime-api = { path = "pallets/system/rpc/runtime-api" } -[patch."https://github.com/paritytech/polkadot-sdk"] -sp-crypto-ec-utils = { git = "https://github.com/availproject/polkadot-sdk.git", rev = "ff3c47f6c3f52c0b1b919bc56cd3108c17cce822" } +# [patch."https://github.com/paritytech/polkadot-sdk"] +# sp-crypto-ec-utils = { git = "https://github.com/availproject/polkadot-sdk.git", rev = "ff3c47f6c3f52c0b1b919bc56cd3108c17cce822" } [patch.crates-io] # Other stuff @@ -363,3 +365,4 @@ zeroize = { opt-level = 3 } [profile.release] # Substrate runtime requires unwinding. panic = "unwind" +lto = "thin" diff --git a/base/Cargo.toml b/base/Cargo.toml index 6017e7c70..d1b5d8de6 100644 --- a/base/Cargo.toml +++ b/base/Cargo.toml @@ -14,8 +14,6 @@ avail-core = { workspace = true, default-features = false } # Substrate related codec = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } -# parking_lot= { workspace = true } -parking_lot= { workspace = true, optional = true } scale-info = { workspace = true, default-features = false } sp-core = { workspace = true, default-features = false, features = ["serde"] } sp-std = { workspace = true, default-features = false } @@ -28,7 +26,8 @@ frame-support = { workspace = true, default-features = false } binary-merkle-tree = { workspace = true, default-features = false } # 3rd-party -log.workspace = true +log = { workspace = true, default-features = false } +parking_lot = { workspace = true, optional = true } derive_more.workspace = true itertools = { workspace = true, default-features = false } @@ -43,6 +42,7 @@ std = [ "binary-merkle-tree/std", "codec/std", "frame-support/std", + "log/std", "parking_lot", "sp-api/std", "sp-authority-discovery/std", diff --git a/base/src/header_extension/builder_data.rs b/base/src/header_extension/builder_data.rs index c06eed1f9..e214bd01e 100644 --- a/base/src/header_extension/builder_data.rs +++ b/base/src/header_extension/builder_data.rs @@ -9,6 +9,7 @@ use codec::{Decode, Encode}; use derive_more::Constructor; use sp_core::H256; use sp_runtime::OpaqueExtrinsic; +use sp_std::collections::btree_map::BTreeMap; use sp_std::{iter::repeat, vec::Vec}; #[derive(Constructor, Debug, Encode, Decode, Clone, PartialEq, Eq)] @@ -17,12 +18,16 @@ pub struct BridgedData { pub addr_msg: AddressedMessage, } -#[derive(Debug, Constructor, Encode, Decode, PartialEq, Eq, Clone)] +#[derive(Debug, Constructor, Encode, Decode, Default, PartialEq, Eq, Clone)] pub struct SubmittedData { pub id: AppId, pub tx_index: u32, pub hash: H256, + pub size_bytes: u64, pub commitments: Vec, + pub eval_point_seed: Option<[u8; 32]>, + pub eval_claim: Option<[u8; 16]>, + pub eval_proof: Option>, } impl GetAppId for SubmittedData { @@ -43,34 +48,36 @@ pub struct HeaderExtensionBuilderData { pub bridge_messages: Vec, } +#[derive(Clone, Debug, Default)] +pub struct PostInherentInfo { + pub eval_proofs: BTreeMap>, + pub failed: Vec, +} + impl HeaderExtensionBuilderData { pub fn from_raw_extrinsics( block: u32, extrinsics: &[Vec], - cols: u32, - rows: u32, ) -> Self { let opaques: Vec = extrinsics .iter() .filter_map(|e| OpaqueExtrinsic::from_bytes(e).ok()) .collect(); - Self::from_opaque_extrinsics::(block, &opaques, cols, rows) + Self::from_opaque_extrinsics::(block, &opaques) } pub fn from_opaque_extrinsics( block: u32, opaques: &[OpaqueExtrinsic], - cols: u32, - rows: u32, ) -> Self { - let failed_transactions = F::get_failed_transaction_ids(opaques); + let post_inherent_info = F::get_data_from_post_inherents(opaques); let extracted_tx_datas: Vec = opaques .into_iter() .enumerate() .filter_map(|(idx, opaque)| { - F::filter(&failed_transactions, opaque.clone(), block, idx, cols, rows) + F::filter(post_inherent_info.clone(), opaque.clone(), block, idx) }) .collect(); @@ -259,8 +266,7 @@ mod tests { #[test] fn test_from_raw_extrinsics() { let extrinsics: Vec> = vec![vec![1, 2, 3], vec![4, 5, 6]]; - let builder_data = - HeaderExtensionBuilderData::from_raw_extrinsics::<()>(1, &extrinsics, 1024, 4096); + let builder_data = HeaderExtensionBuilderData::from_raw_extrinsics::<()>(1, &extrinsics); assert_eq!(builder_data.data_submissions.len(), 0); assert_eq!(builder_data.bridge_messages.len(), 0); } @@ -272,7 +278,9 @@ mod tests { id: AppId::default(), tx_index: 0, commitments: vec![], + size_bytes: 0, hash: H256::from(keccak_256(&vec![1, 2, 3])), + ..Default::default() }], bridge_messages: vec![], }; @@ -289,7 +297,9 @@ mod tests { id: AppId::default(), tx_index: 0, hash: H256::from(keccak_256(&vec![1, 2, 3])), + size_bytes: 0, commitments: vec![], + ..Default::default() }], bridge_messages: vec![], }; @@ -305,8 +315,10 @@ mod tests { data_submissions: vec![SubmittedData { id: AppId::default(), tx_index: 0, + size_bytes: 0, hash: H256::from(keccak_256(&vec![1, 2, 3])), commitments: vec![], + ..Default::default() }], bridge_messages: vec![], }; @@ -319,26 +331,34 @@ mod tests { SubmittedData { id: AppId(3), tx_index: 0, + size_bytes: 0, hash: H256::from(keccak_256(&vec![1, 2, 3])), commitments: vec![], + ..Default::default() }, SubmittedData { id: AppId(1), tx_index: 1, + size_bytes: 0, hash: H256::from(keccak_256(&vec![4, 5, 6])), commitments: vec![], + ..Default::default() }, SubmittedData { id: AppId(2), tx_index: 2, + size_bytes: 0, hash: H256::from(keccak_256(&vec![7, 8, 9])), commitments: vec![], + ..Default::default() }, SubmittedData { id: AppId(1), tx_index: 3, + size_bytes: 0, hash: H256::from(keccak_256(&vec![7, 8, 9])), commitments: vec![], + ..Default::default() }, ]; diff --git a/base/src/header_extension/mod.rs b/base/src/header_extension/mod.rs index f82adea21..5ffdad271 100644 --- a/base/src/header_extension/mod.rs +++ b/base/src/header_extension/mod.rs @@ -3,5 +3,7 @@ pub mod builder_data; pub mod traits; // Reexport -pub use builder_data::{BridgedData, ExtractedTxData, HeaderExtensionBuilderData, SubmittedData}; +pub use builder_data::{ + BridgedData, ExtractedTxData, HeaderExtensionBuilderData, PostInherentInfo, SubmittedData, +}; pub use traits::HeaderExtensionDataFilter; diff --git a/base/src/header_extension/traits.rs b/base/src/header_extension/traits.rs index 6eae154d9..687aa3a12 100644 --- a/base/src/header_extension/traits.rs +++ b/base/src/header_extension/traits.rs @@ -1,51 +1,44 @@ -use super::ExtractedTxData; +use super::{ExtractedTxData, PostInherentInfo}; use sp_runtime::OpaqueExtrinsic; -use sp_std::vec::Vec; pub trait HeaderExtensionDataFilter { fn filter( - failed_transactions: &[u32], + post_inherent_info: PostInherentInfo, opaque: OpaqueExtrinsic, block: u32, tx_idx: usize, - cols: u32, - rows: u32, ) -> Option; - fn get_failed_transaction_ids(opaques: &[OpaqueExtrinsic]) -> Vec; + fn get_data_from_post_inherents(opaques: &[OpaqueExtrinsic]) -> PostInherentInfo; } #[cfg(feature = "std")] impl HeaderExtensionDataFilter for () { fn filter( - _: &[u32], + _: PostInherentInfo, _: OpaqueExtrinsic, _: u32, _: usize, - _: u32, - _: u32, ) -> Option { None } - fn get_failed_transaction_ids(_: &[OpaqueExtrinsic]) -> Vec { - Vec::new() + fn get_data_from_post_inherents(_: &[OpaqueExtrinsic]) -> PostInherentInfo { + PostInherentInfo::default() } } #[cfg(not(feature = "std"))] impl HeaderExtensionDataFilter for () { fn filter( - _: &[u32], + _: PostInherentInfo, _: OpaqueExtrinsic, _: u32, _: usize, - _: u32, - _: u32, ) -> Option { None } - fn get_failed_transaction_ids(_: &[OpaqueExtrinsic]) -> Vec { - Vec::new() + fn get_data_from_post_inherents(_: &[OpaqueExtrinsic]) -> PostInherentInfo { + PostInherentInfo::default() } } diff --git a/base/src/post_inherents.rs b/base/src/post_inherents.rs index 2598b5c20..9d7de6102 100644 --- a/base/src/post_inherents.rs +++ b/base/src/post_inherents.rs @@ -68,6 +68,7 @@ decl_runtime_apis! { bool, Option, Vec<(sp_runtime::AccountId32, AuthorityId, String, Vec)>, + Option>, )>, total_blob_size: u64) -> Vec<::Extrinsic>; } } diff --git a/blob/Cargo.toml b/blob/Cargo.toml index 96783ebeb..99ff57e6c 100644 --- a/blob/Cargo.toml +++ b/blob/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" da-runtime = { workspace = true, default-features = false } da-control = { workspace = true, default-features = false } avail-core = { workspace = true, default-features = false } +avail-fri = { workspace = true, default-features = false, features = ["serde"]} pallet-staking = { workspace = true, default-features = false } sp-consensus-babe = { workspace = true, default-features = false } sc-transaction-pool = { workspace = true, default-features = false } @@ -72,6 +73,7 @@ default = [ "std" ] std = [ "avail-base/std", "avail-core/std", + "avail-fri/std", "codec/std", "da-commitment/std", "da-control/std", @@ -89,7 +91,6 @@ std = [ "sp-api/std", "sp-authority-discovery/std", "sp-core/std", - "sp-io/std", "sp-runtime/std", "sp-std/std", "sp-transaction-pool/std", diff --git a/blob/benches/submit_data.rs b/blob/benches/submit_data.rs index 88d08e0b3..77d55fcac 100644 --- a/blob/benches/submit_data.rs +++ b/blob/benches/submit_data.rs @@ -8,8 +8,8 @@ use avail_blob::traits::*; use avail_blob::types::CompressedBlob; use avail_blob::utils::CommitmentQueue; use avail_rust::prelude::*; -use da_commitment::build_da_commitments; -use da_commitment::build_da_commitments::build_da_commitments; +use da_commitment::build_kzg_commitments; +use da_commitment::build_kzg_commitments::build_da_commitments; use da_runtime::AccountId; use da_runtime::UncheckedExtrinsic; use da_runtime::AVAIL; @@ -108,6 +108,13 @@ impl RuntimeApiT for DummyRuntimeApi { fn get_blob_vouch_fee_reserve(&self, _block_hash: H256) -> Result { Ok(AVAIL) } + + fn commitment_scheme( + &self, + _block_hash: H256, + ) -> Result { + Ok(avail_core::header::extension::CommitmentScheme::Fri) + } } struct BuildTxOutput { @@ -215,7 +222,7 @@ mod validation { let queue: Arc = Arc::new(queue); // grid & Commitment - let grid = build_da_commitments::build_polynomial_grid( + let grid = build_kzg_commitments::build_polynomial_grid( &*tx.data, DEFAULT_ROWS, DEFAULT_COLS, @@ -228,7 +235,7 @@ mod validation { .bench_local_refs(|params| { let runtime = tokio::runtime::Runtime::new().unwrap(); runtime.block_on(async { - avail_blob::validation::commitment_validation( + avail_blob::validation::validate_kzg_commitment( tx.data_hash, ¶ms.0, params.1.clone(), @@ -246,7 +253,7 @@ mod validation { let tx = build_transaction(data); bencher.with_inputs(|| &tx).bench_local_refs(|tx| { - build_da_commitments::build_polynomial_grid( + build_kzg_commitments::build_polynomial_grid( &*tx.data, DEFAULT_ROWS, DEFAULT_COLS, diff --git a/blob/src/lib.rs b/blob/src/lib.rs index 09476d908..435af951f 100644 --- a/blob/src/lib.rs +++ b/blob/src/lib.rs @@ -11,18 +11,21 @@ pub mod validation; use crate::{ p2p::BlobHandle, + traits::RuntimeClient, types::{ Blob, BlobHash, BlobMetadata, BlobNotification, BlobQueryRequest, BlobReceived, BlobReputationChange, BlobRequest, BlobRequestEnum, BlobResponse, BlobResponseEnum, BlobSignatureData, BlobStored, OwnershipEntry, BLOB_REQ_PROTO, }, utils::{ - build_signature_payload, generate_base_index, get_active_validators, get_my_validator_id, - get_validator_id_from_key, get_validator_per_blob, sign_blob_data, validators_for_blob, - verify_signed_blob_data, + build_signature_payload, designated_prover_index, generate_base_index, + get_active_validators, get_my_validator_id, get_validator_id_from_key, + get_validator_per_blob, sign_blob_data, validators_for_blob, verify_signed_blob_data, }, + validation::{validate_fri_commitment, validate_fri_proof}, }; use anyhow::{anyhow, Result}; +use avail_core::header::extension::CommitmentScheme; use codec::{Decode, Encode}; use da_control::BlobRuntimeParameters; use da_runtime::apis::BlobApi; @@ -191,27 +194,83 @@ async fn handle_blob_received_notification( }, }; - // Get the existing blob or create a new one - let mut blob_meta = maybe_metadata.unwrap_or_else(|| BlobMetadata { - hash: blob_received.hash, - size: blob_received.size, - commitment: blob_received.commitment.clone(), - is_notified: true, - expires_at: 0, - finalized_block_hash: Default::default(), - finalized_block_number: 0, - nb_validators_per_blob: 0, - nb_validators_per_blob_threshold: 0, - storing_validator_list: Default::default(), - }); + // If the eval_proof is received, validate it + if blob_received.fri_eval_proof.is_some() { + let eval_point_seed = match &blob_received.eval_point_seed { + Some(seed) => seed, + None => { + log::error!(target: LOG_TARGET, "Missing eval_point_seed for FRI blob"); + return; + }, + }; + let eval_claim = match &blob_received.eval_claim { + Some(claim) => claim, + None => { + log::error!(target: LOG_TARGET, "Missing eval_claim for FRI blob"); + return; + }, + }; - // If we already had the blob metadata but incomplete (is notified == false) we fill the missing data. - if !blob_meta.is_notified { - blob_meta.size = blob_received.size; - blob_meta.commitment = blob_received.commitment; - blob_meta.is_notified = true; + match validate_fri_proof( + blob_received.size as usize, + eval_point_seed, + eval_claim, + &blob_received + .fri_eval_proof + .as_ref() + .expect("checked above"), + ) { + Ok(_) => { + log::info!( + target: LOG_TARGET, + "Successfully validated eval proof for blob: {:?}", + blob_received.hash + ); + }, + Err(e) => { + log::error!(target: LOG_TARGET, "FRI proof validation failed: {}", e); + return; + }, + } } + // Get the existing blob or create a new one + let mut blob_meta = if let Some(existing) = maybe_metadata { + let mut merged = existing; + + // Fill missing notification data + if !merged.is_notified { + merged.size = blob_received.size; + merged.commitment = blob_received.commitment.clone(); + merged.is_notified = true; + } + + // allow enrichment with eval proof + if merged.fri_eval_proof.is_none() && blob_received.fri_eval_proof.is_some() { + merged.fri_eval_proof = blob_received.fri_eval_proof; + merged.fri_eval_prover_index = blob_received.fri_eval_prover_index; + } + + merged + } else { + BlobMetadata { + hash: blob_received.hash, + size: blob_received.size, + commitment: blob_received.commitment.clone(), + is_notified: true, + expires_at: 0, + finalized_block_hash: Default::default(), + finalized_block_number: 0, + nb_validators_per_blob: 0, + nb_validators_per_blob_threshold: 0, + storing_validator_list: Default::default(), + eval_point_seed: blob_received.eval_point_seed, + eval_claim: blob_received.eval_claim, + fri_eval_proof: blob_received.fri_eval_proof, + fri_eval_prover_index: blob_received.fri_eval_prover_index, + } + }; + // TODO Blob hack let h: [u8; 32] = match announced_finalized_hash.encode().try_into() { Ok(x) => x, @@ -266,12 +325,12 @@ async fn handle_blob_received_notification( let (my_validator_id, babe_key) = match get_my_validator_id( &blob_handle.keystore, - &blob_handle.client, - &announced_finalized_hash.encode(), + Arc::new(RuntimeClient::new(blob_handle.client.clone())).as_ref(), + announced_finalized_hash, ) { - Some(v) => v, - None => { - log::error!(target: LOG_TARGET, "No keys found while trying to get this node's id"); + Ok(v) => v, + Err(e) => { + log::error!(target: LOG_TARGET, "No keys found while trying to get this node's id: {e}"); return; }, }; @@ -296,6 +355,17 @@ async fn handle_blob_received_notification( let should_store_blob = storing_validators.contains(&my_validator_id); + let prover_index = designated_prover_index( + &blob_received.hash, + &blob_received.finalized_block_hash, + nb_validators_per_blob, + ); + + let should_send_proof = storing_validators + .get(prover_index as usize) + .map_or(false, |id| *id == my_validator_id); + + let mut eval_proof: Option> = None; blob_meta.storing_validator_list = storing_validators; if should_store_blob { @@ -392,6 +462,59 @@ async fn handle_blob_received_notification( return; } + // Do the commitment validation and other checks such whether eval_point_seed is correctly provided or not + // Also, generate the eval_proof if needed + let commitment_scheme = match blob_handle + .client + .runtime_api() + .commitement_scheme(blob_received.finalized_block_hash) + { + Ok(scheme) => scheme, + Err(e) => { + log::error!( + "Could not get commitment scheme from runtime at {:?}: {e:?}. Falling back to Fri.", + blob_received.finalized_block_hash + ); + CommitmentScheme::Fri + }, + }; + + match commitment_scheme { + CommitmentScheme::Kzg => { + todo!("KZG commitment validation") + }, + CommitmentScheme::Fri => { + // Check if the eval_point_seed and eval_claim are present in the associated BlobMetadata tx + if blob_received.eval_point_seed.is_none() || blob_received.eval_claim.is_none() + { + log::error!(target: LOG_TARGET, "Missing eval_point_seed or eval_claim for FRI blob"); + return; + } + + let fri_eval_proof = match validate_fri_commitment( + blob_received.hash, + &blob_data, + &blob_received.commitment, + &blob_received.eval_point_seed.expect("checked above"), + &blob_received.eval_claim.expect("checked above"), + ) { + Ok(proof_bytes) => proof_bytes, + Err(e) => { + log::error!(target: LOG_TARGET, "FRI commitment validation failed: {}", e); + return; + }, + }; + + if should_send_proof { + // send the eval_proof with stored_blob notification & also update the local metadata with eval_proof + log::info!(target: LOG_TARGET, "Designated prover for blob {}, sending eval proof", blob_received.hash); + eval_proof = Some(fri_eval_proof); + blob_meta.fri_eval_proof = eval_proof.clone(); + blob_meta.fri_eval_prover_index = Some(prover_index); + } + }, + } + // Insert the blob in the store if let Err(e) = blob_handle .blob_database @@ -420,6 +543,7 @@ async fn handle_blob_received_notification( &blob_handle, ownership, announced_finalized_hash, + eval_proof, ) .await; } @@ -730,19 +854,22 @@ pub async fn send_blob_stored_notification( blob_handle: &BlobHandle, ownership_entry: OwnershipEntry, finalized_block_hash: H256, + eval_proof: Option>, ) where Block: BlockT, { let timer = std::time::Instant::now(); log::info!( - "BLOB - send_blob_stored_notification - START - {:?} - {:?}", + "BLOB - send_blob_stored_notification - START - {:?}: eval_proof?: {} - {:?}", blob_hash, + eval_proof.is_some(), timer.elapsed() ); let blob_stored = BlobStored { hash: blob_hash, ownership_entry, finalized_block_hash, + eval_proof, }; if let Err(e) = blob_handle @@ -767,8 +894,9 @@ async fn handle_blob_stored_notification( { let timer = std::time::Instant::now(); log::info!( - "BLOB - handle_blob_stored_notification - START - {:?} - {:?}", + "BLOB - handle_blob_stored_notification - START - {:?}: eval_proof?: {} - {:?}", blob_stored.hash, + blob_stored.eval_proof.is_some(), timer.elapsed() ); let is_authority = blob_handle.role.is_authority(); @@ -874,6 +1002,81 @@ async fn handle_blob_stored_notification( } } + // check if we have the blob metadata in our store, if yes, we can valiadate the eval proof and store it + if metadata_exists && blob_stored.eval_proof.is_some() { + let mut blob_metadata = match blob_handle + .blob_database + .get_blob_metadata(&blob_stored.hash) + { + Ok(Some(m)) => m, + Ok(None) => { + log::error!( + target: LOG_TARGET, + "Could not find blob metadata while trying to store eval proof for blob: {:?}", + blob_stored.hash + ); + return; + }, + Err(e) => { + log::error!( + target: LOG_TARGET, + "An error has occured while trying to get blob metadata from the store: {e}" + ); + return; + }, + }; + + match validate_fri_proof( + blob_metadata.size as usize, + &blob_metadata + .eval_point_seed + .as_ref() + .expect("should be present in metadata"), + &blob_metadata + .eval_claim + .as_ref() + .expect("should be present in metadata"), + blob_stored.eval_proof.as_ref().expect("checked above"), + ) { + Ok(_) => { + log::info!( + target: LOG_TARGET, + "Successfully validated eval proof for blob: {:?}", + blob_stored.hash + ); + }, + Err(e) => { + log::error!( + target: LOG_TARGET, + "Failed to validate eval proof for blob {:?}: {}", + blob_stored.hash, + e + ); + return; + }, + }; + blob_metadata.fri_eval_proof = blob_stored.eval_proof; + + if let Err(e) = blob_handle + .blob_database + .insert_blob_metadata(&blob_metadata) + { + log::error!( + target: LOG_TARGET, + "An error has occured while trying to update eval_proof to blob metadata in the store: {e}" + ); + } + } + // if we received a notification with eval_proof but no metadata, we do nothing for now so just log it + // TODO: handle this case better, maybe queue it for later processing + else if !metadata_exists && blob_stored.eval_proof.is_some() { + log::warn!( + target: LOG_TARGET, + "Received eval_proof for blob {:?} but no metadata found, skipping for now", + blob_stored.hash + ); + } + log::info!( "BLOB - handle_blob_stored_notification - END - {:?} - {:?}", blob_stored.hash, diff --git a/blob/src/rpc.rs b/blob/src/rpc.rs index 610c5e7dd..e9e69a517 100644 --- a/blob/src/rpc.rs +++ b/blob/src/rpc.rs @@ -1,6 +1,9 @@ use crate::traits::CommitmentQueueApiT; -use crate::types::BlobInfo; -use crate::validation::{commitment_validation, initial_validation, tx_validation}; +use crate::types::{BlobEvalData, BlobInfo, BlobSummary, FriData}; +use crate::utils::{designated_prover_index, get_babe_randomness_key, get_my_validator_id}; +use crate::validation::{ + initial_validation, tx_validation, validate_fri_commitment, validate_kzg_commitment, +}; use crate::{ nonce_cache::NonceCache, p2p::BlobHandle, @@ -18,20 +21,26 @@ use crate::{ }, MAX_RPC_RETRIES, }; -use anyhow::Result; -use avail_core::DataProof; +use avail_base::HeaderExtensionBuilderData; +use avail_core::header::extension::CommitmentScheme; +use avail_core::{AppId, DataProof}; +use avail_fri::eval_utils::derive_seed_from_inputs; +use avail_fri::{ + transcript_to_bytes, BytesEncoder, FriBiniusPCS, FriParamsVersion, SamplingProof, B128, +}; use avail_observability::metrics::BlobMetrics; use codec::{Decode, Encode}; -use da_commitment::build_da_commitments::build_polynomial_grid; +use da_commitment::build_kzg_commitments::build_polynomial_grid; use da_control::{BlobRuntimeParameters, Call}; use da_runtime::apis::KateApi; -use da_runtime::{RuntimeCall, UncheckedExtrinsic}; +use da_runtime::{Runtime, RuntimeCall, UncheckedExtrinsic}; use frame_system::limits::BlockLength; use jsonrpsee::{ core::{async_trait, RpcResult}, proc_macros::rpc, types::error::ErrorObject, }; +use parking_lot::Mutex; use sc_client_api::{BlockBackend, HeaderBackend, StateBackend}; use sc_network::NetworkStateInfo; use sc_network::PeerId; @@ -43,6 +52,7 @@ use sp_runtime::{ transaction_validity::TransactionSource, AccountId32, SaturatedConversion, }; +use std::collections::HashMap; use std::{ marker::{PhantomData, Sync}, str::FromStr, @@ -50,6 +60,18 @@ use std::{ }; use tokio::task; +/// Cached FRI state for a blob at a given block +#[derive(Clone)] +struct FriSamplingCacheEntry { + commit_output: Arc>, + pcs: Arc, +} + +// block_hash, blob_hash +type FriSamplingCacheKey = (H256, H256); + +type RTExtractor = ::HeaderExtensionDataFilter; + pub enum Error { BlobError, } @@ -77,6 +99,34 @@ pub trait BlobApi where Block: BlockT, { + /// Submits a data blob and its metadata transaction to the network. + /// + /// This RPC performs the full client-side submission flow: + /// - validates the metadata transaction + /// - validates the blob size and commitment + /// - verifies (or generates) commitment-related proofs + /// - gossips the blob to designated blob owners + /// - submits the metadata transaction to the transaction pool + /// + /// The blob data itself is **not** included on-chain. Only the metadata + /// transaction is submitted to the chain. + /// + /// ### Parameters + /// - `metadata_signed_transaction`: + /// A SCALE-encoded, signed metadata transaction (base64-encoded), + /// typically a `submit_blob_metadata` call. + /// - `blob`: + /// The raw blob data (base64-encoded). + /// + /// ### Returns + /// - `()` on successful submission. + /// + /// ### Errors + /// - If the blob is empty or exceeds size limits. + /// - If the metadata transaction is invalid or expired. + /// - If the commitment or evaluation data is invalid. + /// - If commitment validation fails. + /// - If submission to the transaction pool fails. #[method(name = "blob_submitBlob")] async fn submit_blob( &self, @@ -84,16 +134,90 @@ where blob: B64Param, ) -> RpcResult<()>; + /// Returns the full blob data for a given blob hash. + /// + /// This RPC retrieves the blob either from local storage or + /// from the network, depending on availability. + /// + /// The RPC operates in **two modes**: + /// + /// ### Mode A: Block-scoped lookup + /// If `at` is provided: + /// - Blob ownership is derived from the DA post-inherent + /// in the specified block. + /// - The node attempts to fetch the blob from the + /// owners listed in that block. + /// + /// ### Mode B: Storage-based lookup + /// If `at` is omitted: + /// - Blob ownership is derived from the local blob indexer. + /// - The node attempts to fetch the blob from known owners. + /// + /// In both cases: + /// - If the blob exists locally, it is returned immediately. + /// - Otherwise, the node queries blob owners via p2p. + /// + /// ### Parameters + /// - `blob_hash`: The hash of the blob to retrieve. + /// - `at`: Optional block hash. + /// - If provided, restricts lookup to blob ownership + /// recorded in that block. + /// - If omitted, uses locally indexed blob ownership. + /// + /// ### Returns + /// - `Blob` containing: + /// - blob hash + /// - blob size + /// - raw blob data + /// + /// ### Errors + /// - If the blob hash is unknown. + /// - If no owners are known for the blob. + /// - If all attempts to fetch the blob from owners fail. + /// - If the block specified by `at` cannot be found or decoded. #[method(name = "blob_getBlob")] - /// This RPC will work in two different modes based on params passed: - /// if 'at' param is passed, it will try to get blob ownership from that block's blob tx summaries - /// if 'at' param is None, it will try to get blob ownership from storage's blob info - /// based on the blob ownership, it will try to get the blob from local storage or from p2p network async fn get_blob(&self, blob_hash: H256, at: Option) -> RpcResult; + /// Returns metadata and inclusion information for a blob. + /// + /// This RPC queries the local blob indexer and returns information + /// about the block in which the blob was included and the validators + /// that claimed ownership of the blob. + /// + /// This does **not** return the blob data itself. + /// + /// ### Parameters + /// - `blob_hash`: The hash of the blob. + /// + /// ### Returns + /// - `BlobInfo` containing: + /// - blob hash + /// - block hash and block number where the blob was included + /// - ownership information (validators who stored the blob) + /// + /// ### Errors + /// - If the blob hash is unknown to the node. #[method(name = "blob_getBlobInfo")] async fn get_blob_info(&self, blob_hash: H256) -> RpcResult; + /// Returns a proof that a blob was included in a specific block. + /// + /// The proof allows a verifier to verify that + /// the blob hash was included in the block's data root. + /// + /// ### Parameters + /// - `blob_hash`: The hash of the blob. + /// - `at`: Optional block hash. + /// - If provided, the proof is generated against that block. + /// - If omitted, the node uses the local indexer to get the block where the blob + /// was included. + /// + /// ### Returns + /// - `DataProof` proving inclusion of the blob in the block. + /// + /// ### Errors + /// - If the blob is not found in the specified block. + /// - If the block cannot be retrieved or decoded. #[method(name = "blob_inclusionProof")] async fn inclusion_proof( &self, @@ -101,8 +225,125 @@ where at: Option, ) -> RpcResult; + // TODO: feature-gate this RPC only for debugging & development #[method(name = "blob_logStuff")] async fn log_stuff(&self) -> RpcResult<()>; + + /// Returns a summary of all successfully included blobs in a block. + /// + /// The summary is derived from the DA post-inherent and does **not** + /// include heavy evaluation data (e.g. FRI proofs). + /// + /// This is suitable for: + /// - Light Clients + /// - Custom Indexers + /// - Explorers + /// + /// ### Parameters + /// - `at`: Optional block hash. + /// - If omitted, uses the node's best block. + /// + /// ### Returns + /// - A list of `BlobSummary`, each containing: + /// - blob hash + /// - transaction index + /// - AppId + /// - blob size (bytes) + /// + /// ### Errors + /// - If the block is not found. + /// - If the block does not contain a DA post-inherent. + #[method(name = "blob_getBlobsSummary")] + async fn get_blobs_summary(&self, at: Option) -> RpcResult>; + + /// Returns all blob hashes associated with a given AppId in a block. + /// + /// This RPC is a filtered view over `blob_getBlobsSummary` + /// and is useful for application-specific indexing. + /// + /// ### Parameters + /// - `app_id`: The AppId to filter blobs by. + /// - `at`: Optional block hash. + /// - If omitted, uses the node's best block. + /// + /// ### Returns + /// - A list of blob hashes associated with the given AppId. + /// + /// ### Errors + /// - If the block is not found. + /// - If the block does not contain a DA post-inherent. + #[method(name = "blob_getBlobsByAppId")] + async fn get_blobs_by_appid( + &self, + app_id: AppId, + at: Option, + ) -> RpcResult>; + + /// Returns FRI evaluation data for a blob in a block. + /// + /// This RPC exposes the data required to verify the correctness + /// of the blob's FRI commitment: + /// - evaluation point seed + /// - evaluation claim + /// - evaluation proof + /// + /// This data is included in the block body (post-inherent) and the BlobSummary extrinsic + /// and can be independently verified by Light Clients. + /// + /// ### Parameters + /// - `blob_hash`: The hash of the blob. + /// - `at`: Optional block hash. + /// - If omitted, uses the node's best block. + /// + /// ### Returns + /// - `BlobEvalData` containing: + /// - evaluation point seed + /// - evaluation claim + /// - evaluation proof bytes + /// + /// ### Errors + /// - If the blob is not found in the block. + /// - If the blob does not contain evaluation data + /// (e.g. non-FRI or incomplete data). + #[method(name = "blob_getEvalData")] + async fn get_eval_data( + &self, + blob_hash: H256, + at: Option, + ) -> RpcResult; + + /// Returns FRI sampling (inclusion) proofs for specific cells of a blob. + /// + /// Each sampling proof allows a verifier to check that a specific + /// codeword cell belongs to the committed polynomial. + /// + /// This RPC is intended for: + /// - Light Clients performing data availability sampling + /// - External verifiers auditing blob availability + /// + /// ### Parameters + /// - `cells`: A list of codeword indices (`u32`) to sample. + /// - `blob_hash`: The hash of the blob. + /// - `at`: Optional block hash. + /// - If omitted, uses the node's best block. + /// + /// ### Returns + /// - A list of `SamplingProof`, each containing: + /// - cell index + /// - cell value (16 bytes) + /// - serialized inclusion proof transcript + /// + /// ### Errors + /// - If the blob cannot be retrieved. + /// - If any cell index is out of bounds. + /// - If proof generation fails. + #[method(name = "blob_getSamplingProof")] + async fn get_sampling_proof( + &self, + cells: Vec, + blob_hash: H256, + at: Option, + ) -> RpcResult>; } pub struct BlobRpc { @@ -111,10 +352,15 @@ pub struct BlobRpc { blob_handle: Arc>, commitment_queue: Arc, nonce_cache: Arc, + fri_sampling_cache: Arc>>, _block: PhantomData, } -impl BlobRpc { +impl BlobRpc +where + H256: From<::Hash>, + ::Hash: From, +{ pub fn new( blob_handle: Arc>, pool: Arc, @@ -130,9 +376,43 @@ impl BlobRpc { blob_handle, commitment_queue: Arc::new(queue), nonce_cache: Arc::new(NonceCache::new()), + fri_sampling_cache: Arc::new(Mutex::new(HashMap::new())), _block: PhantomData, } } + + fn at_or_best(&self, at: Option) -> Block::Hash { + at.unwrap_or_else(|| self.blob_handle.client.info().best_hash.into()) + } + + // The SubmittedData contains info about only succesfull blob from both BlobMetdata & BlobSummary post-inherent + fn load_da_submissions( + &self, + at: Block::Hash, + ) -> RpcResult> { + let block = self + .blob_handle + .client + .block(at.into()) + .map_err(|e| internal_err!("Failed to get block: {:?}", e))? + .ok_or_else(|| internal_err!("Block not found: {:?}", at))? + .block; + + let extrinsics = block.extrinsics(); + if extrinsics.len() < 2 { + return Err(internal_err!( + "Block does not contain post-inherent summary extrinsic" + )); + } + + Ok( + HeaderExtensionBuilderData::from_opaque_extrinsics::( + block.header.number, + &extrinsics, + ) + .data_submissions, + ) + } } #[async_trait] @@ -422,6 +702,140 @@ where let _ = self.blob_handle.blob_database.log_all_entries(); Ok(()) } + + async fn get_blobs_summary(&self, at: Option) -> RpcResult> { + let at = self.at_or_best(at); + let submissions = self.load_da_submissions(at)?; + + Ok(submissions + .iter() + .map(|d| BlobSummary::new(d.hash, d.tx_index, d.id, d.size_bytes)) + .collect()) + } + + async fn get_blobs_by_appid( + &self, + app_id: AppId, + at: Option, + ) -> RpcResult> { + let at = self.at_or_best(at); + let submissions = self.load_da_submissions(at)?; + + Ok(submissions + .iter() + .filter(|d| d.id == app_id) + .map(|d| d.hash) + .collect()) + } + + async fn get_eval_data( + &self, + blob_hash: H256, + at: Option, + ) -> RpcResult { + let at = self.at_or_best(at); + let submissions = self.load_da_submissions(at)?; + + let d = submissions + .iter() + .find(|d| d.hash == blob_hash) + .ok_or_else(|| { + internal_err!( + "Blob submission data not found for blob {:?} in block {:?}", + blob_hash, + at + ) + })?; + + match (&d.eval_point_seed, &d.eval_claim, &d.eval_proof) { + (Some(seed), Some(claim), Some(proof)) => { + Ok(BlobEvalData::new(*seed, *claim, proof.clone())) + }, + _ => Err(internal_err!( + "Blob {:?} does not contain eval data in block {:?}", + blob_hash, + at + )), + } + } + + async fn get_sampling_proof( + &self, + cells: Vec, + blob_hash: H256, + at: Option, + ) -> RpcResult> { + let at = self.at_or_best(at); + let cache_key = (at.into(), blob_hash); + + if let Some(entry) = { + let cache = self.fri_sampling_cache.lock(); + cache.get(&cache_key).cloned() + } { + return build_sampling_proofs(entry, cells); + } + + let blob = self.get_blob(blob_hash, Some(at)).await?; + + let encoder = BytesEncoder::::new(); + let packed = encoder + .bytes_to_packed_mle(&blob.data) + .map_err(|e| internal_err!("bytes_to_packed_mle failed: {e}"))?; + + let cfg = FriParamsVersion(0).to_config(packed.total_n_vars); + let pcs = Arc::new(FriBiniusPCS::new(cfg)); + + let ctx = pcs + .initialize_fri_context::(packed.packed_mle.log_len()) + .map_err(|e| internal_err!("FRI ctx init failed: {e}"))?; + + let commit_output = Arc::new( + pcs.commit(&packed.packed_mle, &ctx) + .map_err(|e| internal_err!("FRI commit failed: {e}"))?, + ); + + let entry = FriSamplingCacheEntry { + pcs: pcs.clone(), + commit_output: commit_output.clone(), + }; + + { + let mut cache = self.fri_sampling_cache.lock(); + cache.insert(cache_key, entry.clone()); + } + + build_sampling_proofs(entry, cells) + } +} + +fn build_sampling_proofs( + entry: FriSamplingCacheEntry, + cells: Vec, +) -> RpcResult> { + let max = entry.commit_output.codeword.len(); + if cells.iter().any(|&c| (c as usize) >= max) { + return Err(internal_err!("One or more cell indices out of bounds")); + } + + let mut proofs = Vec::with_capacity(cells.len()); + + for &cell in &cells { + let idx = cell as usize; + let value = entry.commit_output.codeword[idx]; + + let transcript = entry + .pcs + .inclusion_proof::(&entry.commit_output.committed, idx) + .map_err(|e| internal_err!("Sampling proof failed: {e}"))?; + + proofs.push(SamplingProof::new( + cell, + value.val().to_le_bytes().to_vec(), + transcript_to_bytes(&transcript), + )); + } + + Ok(proofs) } async fn check_rpc_store_blob( @@ -481,6 +895,21 @@ async fn check_rpc_store_blob( })) } +fn get_babe_randomness( + backend_client: &Arc, + finalized_block_hash: H256, +) -> RpcResult<[u8; 32]> { + let storage_key = get_babe_randomness_key(); + let maybe_raw = backend_client + .storage(finalized_block_hash, &storage_key.0) + .map_err(|e| internal_err!("Storage query error: {e:?}"))?; + let raw = maybe_raw.ok_or(internal_err!("Randomness not found"))?; + let randomness = + <[u8; 32]>::decode(&mut &raw[..]).map_err(|e| internal_err!("Decode error: {e:?}"))?; + + Ok(randomness) +} + fn get_dynamic_block_length( backend_client: &Arc, finalized_block_hash: H256, @@ -514,6 +943,17 @@ pub async fn submit_blob_main_task( let best_hash = client_info.best_hash; let finalized_block_hash = client_info.finalized_hash; + let commitment_scheme = match runtime_client.commitment_scheme(best_hash) { + Ok(scheme) => scheme, + Err(e) => { + log::error!( + "Could not get commitment scheme from runtime at {:?}: {e:?}. Falling back to Fri.", + best_hash + ); + CommitmentScheme::Fri + }, + }; + let blob_params = match runtime_client.get_blob_runtime_parameters(finalized_block_hash) { Ok(p) => p, Err(e) => { @@ -524,8 +964,8 @@ pub async fn submit_blob_main_task( let max_blob_size = blob_params.max_blob_size as usize; stop_watch.start("Initial Validation"); - let (blob_hash, provided_commitment) = - initial_validation(max_blob_size as usize, &blob, &metadata_signed_transaction) + let (blob_hash, provided_commitment, eval_point_seed, eval_claim) = + initial_validation(max_blob_size, &blob, &metadata_signed_transaction) .map_err(|e| internal_err!("{}", e))?; stop_watch.stop("Initial Validation"); stop_watch.add_extra_information(std::format!("Blob Hash: {:?}", blob_hash)); @@ -545,61 +985,134 @@ pub async fn submit_blob_main_task( .map_err(|e| internal_err!("{}", e))?; stop_watch.stop("TX validation"); - // Commitment Validation can take a long time. stop_watch.start("Commitments (Total)"); - let (cols, rows) = get_dynamic_block_length(&friends.backend_client, finalized_block_hash)?; - let blob = Arc::new(blob); - - let start = crate::utils::get_current_timestamp_ms(); - stop_watch.start("Polynominal Grid Gen."); - let grid = build_polynomial_grid(&*blob, cols, rows, Default::default()); - stop_watch.stop("Polynominal Grid Gen."); - let end = crate::utils::get_current_timestamp_ms(); - // Telemetry - crate::telemetry::BlobSubmission::build_poly_grid(blob_hash, start, end); - stop_watch.start("Commitment Validation"); - commitment_validation(blob_hash, &provided_commitment, grid, &commitment_queue) - .await - .map_err(|e| internal_err!("{}", e))?; - stop_watch.stop("Commitment Validation"); + match commitment_scheme { + CommitmentScheme::Kzg => { + let (cols, rows) = + get_dynamic_block_length(&friends.backend_client, finalized_block_hash)?; + let blob = Arc::new(blob); + + let start = crate::utils::get_current_timestamp_ms(); + stop_watch.start("Polynominal Grid Gen."); + let grid = build_polynomial_grid(&*blob, cols, rows, Default::default()); + stop_watch.stop("Polynominal Grid Gen."); + let end = crate::utils::get_current_timestamp_ms(); + // Telemetry + crate::telemetry::BlobSubmission::build_poly_grid(blob_hash, start, end); + + stop_watch.start("Commitment Validation"); + validate_kzg_commitment(blob_hash, &provided_commitment, grid, &commitment_queue) + .await + .map_err(|e| internal_err!("{}", e))?; + stop_watch.stop("Commitment Validation"); + + stop_watch.stop("Commitments (Total)"); + + // After potentially long work, re-validate tx + let client_info = friends.externalities.client_info(); + let best_hash = client_info.best_hash; + + let _ = tx_validation( + best_hash, + &metadata_signed_transaction, + blob_params.min_transaction_validity, + blob_params.max_transaction_validity, + &runtime_client, + &nonce_cache, + ) + .map_err(|e| internal_err!("{}", e))?; - stop_watch.stop("Commitments (Total)"); + let handle = task::spawn(async move { + submit_blob_background_task( + opaque_tx, + blob_hash, + blob, + blob_params, + provided_commitment, + None, + friends, + nonce_cache, + ) + .await + }); - // Because Commitment Validation can take a long time - // the moment it is done minutes can pass. - // Let's check once more to see if the transactions is still valid - // - // TODO Blob we might remove this - let client_info = friends.externalities.client_info(); - let best_hash = client_info.best_hash; + // ideally eval_point_seed and eval_claim should be None here for KZG, but we can let it pass for now + Ok(handle) + }, - let _ = tx_validation( - best_hash, - &metadata_signed_transaction, - blob_params.min_transaction_validity, - blob_params.max_transaction_validity, - &runtime_client, - &nonce_cache, - ) - .map_err(|e| internal_err!("{}", e))?; + CommitmentScheme::Fri => { + // Check if the eval_point_seed and eval_claim are present for Fri + if eval_point_seed.is_none() || eval_claim.is_none() { + return Err(internal_err!( + "eval_point_seed and eval_claim must be present for Fri commitment scheme" + )); + } - // From this point, the transaction should not fail as the user has done everything correctly - // We will spawn a task to finish the work and instantly return to the user. - let handle = task::spawn(async move { - submit_blob_background_task( - opaque_tx, - blob_hash, - blob, - blob_params, - provided_commitment, - friends, - nonce_cache, - ) - .await - }); + let eval_point_seed = eval_point_seed.expect("checked above; qed"); + let eval_claim = eval_claim.expect("checked above; qed"); + let babe_randomness = + get_babe_randomness(&friends.backend_client, finalized_block_hash)?; + let derived_eval_seed = derive_seed_from_inputs(&babe_randomness, &blob_hash.0); + if eval_point_seed != derived_eval_seed { + return Err(internal_err!( + "eval_point_seed does not match derived seed!" + )); + } + stop_watch.start("Fri Commitment Validation"); + let fri_eval_proof = match validate_fri_commitment( + blob_hash, + &blob, + &provided_commitment, + &derived_eval_seed, + &eval_claim, + ) { + Ok(proof_bytes) => proof_bytes, + Err(e) => { + stop_watch.stop("Fri Commitment Validation"); + stop_watch.stop("Commitments (Total)"); + return Err(internal_err!("{}", e)); + }, + }; + stop_watch.stop("Fri Commitment Validation"); + stop_watch.stop("Commitments (Total)"); + + let client_info = friends.externalities.client_info(); + let best_hash = client_info.best_hash; + + let _ = tx_validation( + best_hash, + &metadata_signed_transaction, + blob_params.min_transaction_validity, + blob_params.max_transaction_validity, + &runtime_client, + &nonce_cache, + ) + .map_err(|e| internal_err!("{}", e))?; + + let blob = Arc::new(blob); + let fri_data = FriData { + eval_point_seed, + eval_claim, + fri_eval_proof: Some(fri_eval_proof), + }; + let handle = task::spawn(async move { + submit_blob_background_task( + opaque_tx, + blob_hash, + blob, + blob_params, + provided_commitment, + Some(fri_data), + friends, + nonce_cache, + ) + .await + }); - Ok(handle) + Ok(handle) + }, + } } async fn submit_blob_background_task( @@ -608,6 +1121,7 @@ async fn submit_blob_background_task( blob: Arc>, blob_params: BlobRuntimeParameters, commitment: Vec, + fri_data: Option, friends: Friends, nonce_cache: Arc, ) { @@ -617,7 +1131,8 @@ async fn submit_blob_background_task( nonce_cache.commit(&who, nonce); } - let stored = store_and_gossip_blob(blob_hash, blob, blob_params, commitment, &friends).await; + let stored = + store_and_gossip_blob(blob_hash, blob, blob_params, commitment, fri_data, &friends).await; if stored.is_err() { return; } @@ -649,6 +1164,7 @@ pub async fn store_and_gossip_blob( blob: Arc>, blob_params: BlobRuntimeParameters, commitment: Vec, + fri_data: Option, friends: &Friends, ) -> Result<(), ()> { let mut stop_watch = SmartStopwatch::new("😍😍 STORE AND GOSSIP BLOB"); @@ -671,6 +1187,19 @@ pub async fn store_and_gossip_blob( }, }; + let commitment_scheme = match friends + .runtime_client + .commitment_scheme(finalized_block_hash) + { + Ok(scheme) => scheme, + Err(e) => { + log::error!( + "Could not get commitment scheme from runtime at {:?}: {e:?}. Falling back to Fri.", + finalized_block_hash + ); + CommitmentScheme::Fri + }, + }; let mut blob_metadata = maybe_blob_metadata.unwrap_or_else(|| { let blob_len = blob.len(); @@ -685,6 +1214,10 @@ pub async fn store_and_gossip_blob( nb_validators_per_blob: 0, nb_validators_per_blob_threshold: 0, storing_validator_list: Default::default(), + eval_point_seed: None, + eval_claim: None, + fri_eval_proof: None, + fri_eval_prover_index: None, } }); @@ -725,6 +1258,41 @@ pub async fn store_and_gossip_blob( return Err(()); }, }; + + if commitment_scheme == CommitmentScheme::Fri { + if fri_data.is_none() { + log::error!("Fri data must be available for Fri commitment scheme"); + return Err(()); + } + let fri_data = fri_data.expect("checked above; qed"); + let prover_index = + designated_prover_index(&blob_hash, &finalized_block_hash, nb_validators_per_blob); + + let (my_validator_id, _babe_key) = match get_my_validator_id( + &friends.externalities.keystore(), + friends.runtime_client.as_ref(), + finalized_block_hash, + ) { + Ok(v) => v, + Err(e) => { + log::error!("No keys found while trying to get this node's id: {e}"); + return Err(()); + }, + }; + if storing_validators[prover_index as usize] == my_validator_id { + log::info!( + "I am the designated prover for blob {:?} including eval_proof? {}", + blob_hash, + fri_data.fri_eval_proof.is_some() + ); + // I am the designated prover, maybe also do the sanity check whether we have the eval proof or not + blob_metadata.fri_eval_proof = fri_data.fri_eval_proof; + blob_metadata.fri_eval_prover_index = Some(prover_index); + } + blob_metadata.eval_point_seed = Some(fri_data.eval_point_seed); + blob_metadata.eval_claim = Some(fri_data.eval_claim); + } + blob_metadata.is_notified = true; blob_metadata.expires_at = finalized_block_number.saturating_add(blob_params.temp_blob_ttl); blob_metadata.finalized_block_hash = finalized_block_hash.into(); @@ -783,6 +1351,10 @@ pub async fn store_and_gossip_blob( original_peer_id: my_peer_id_base58.clone(), finalized_block_hash: finalized_block_hash.into(), finalized_block_number, + eval_point_seed: blob_metadata.eval_point_seed, + eval_claim: blob_metadata.eval_claim.clone(), + fri_eval_proof: blob_metadata.fri_eval_proof.clone(), + fri_eval_prover_index: blob_metadata.fri_eval_prover_index, }); let gossip_cmd_sender = friends.externalities.gossip_cmd_sender(); diff --git a/blob/src/store/doublerocksdb.rs b/blob/src/store/doublerocksdb.rs index 09c038bdc..11ad3fe3c 100644 --- a/blob/src/store/doublerocksdb.rs +++ b/blob/src/store/doublerocksdb.rs @@ -66,7 +66,13 @@ impl Default for DoubleRocksdbBlobStore { impl StorageApiT for DoubleRocksdbBlobStore { fn insert_blob_metadata(&self, blob_metadata: &BlobMetadata) -> Result<()> { if let Some(existing) = self.get_blob_metadata(&blob_metadata.hash).ok().flatten() { - if existing.is_notified { + // existing already has eval proof + if existing.fri_eval_proof.is_some() { + return Ok(()); + } + + // existing has no eval proof AND incoming also has no eval proof + if existing.is_notified && blob_metadata.fri_eval_proof.is_none() { return Ok(()); } } diff --git a/blob/src/store/rocksdb.rs b/blob/src/store/rocksdb.rs index 64a48d56a..aaf4260e9 100644 --- a/blob/src/store/rocksdb.rs +++ b/blob/src/store/rocksdb.rs @@ -54,7 +54,13 @@ impl Default for RocksdbBlobStore { impl StorageApiT for RocksdbBlobStore { fn insert_blob_metadata(&self, blob_metadata: &BlobMetadata) -> Result<()> { if let Some(existing) = self.get_blob_metadata(&blob_metadata.hash).ok().flatten() { - if existing.is_notified { + // existing already has eval proof + if existing.fri_eval_proof.is_some() { + return Ok(()); + } + + // existing has no eval proof AND incoming also has no eval proof + if existing.is_notified && blob_metadata.fri_eval_proof.is_none() { return Ok(()); } } diff --git a/blob/src/traits.rs b/blob/src/traits.rs index 34a1d29d5..1b9711a5e 100644 --- a/blob/src/traits.rs +++ b/blob/src/traits.rs @@ -1,5 +1,6 @@ use crate::utils::CommitmentQueueMessage; use crate::{BlobHandle, BlobNotification}; +use avail_core::header::extension::CommitmentScheme; use da_runtime::{apis::BlobApi, AccountId, UncheckedExtrinsic}; use jsonrpsee::core::async_trait; use sc_client_api::{BlockBackend, HeaderBackend, StateBackend, TrieCacheContext}; @@ -46,6 +47,8 @@ pub trait RuntimeApiT: Send + Sync { fn account_nonce(&self, block_hash: H256, who: AccountId) -> Result; fn get_blob_vouch_fee_reserve(&self, block_hash: H256) -> Result; + + fn commitment_scheme(&self, block_hash: H256) -> Result; } pub struct RuntimeClient(Arc, PhantomData); @@ -111,6 +114,10 @@ where .runtime_api() .get_blob_vouch_fee_reserve(block_hash.into()) } + + fn commitment_scheme(&self, block_hash: H256) -> Result { + self.0.runtime_api().commitement_scheme(block_hash.into()) + } } pub trait BackendApiT: Send + Sync { diff --git a/blob/src/types.rs b/blob/src/types.rs index bd9c3ae66..5283f8675 100644 --- a/blob/src/types.rs +++ b/blob/src/types.rs @@ -2,6 +2,7 @@ use crate::{ utils::{zstd_compress, zstd_decompress}, LOG_TARGET, }; +use avail_core::AppId; use codec::{Decode, Encode}; use da_runtime::{apis::RuntimeApi, NodeBlock as Block}; use parking_lot::Mutex; @@ -193,6 +194,25 @@ pub struct BlobMetadata { pub finalized_block_number: u64, /// The list of storing validators pub storing_validator_list: Vec, + /// Evaluation point seed for FRI blobs + pub eval_point_seed: Option<[u8; 32]>, + /// Evaluation claim for FRI blobs + pub eval_claim: Option<[u8; 16]>, + /// Evaluation proof for FRI blobs + pub fri_eval_proof: Option>, + /// Index of the designated prover among storing_validator_list (maybe we dont need to store this?) + pub fri_eval_prover_index: Option, +} + +/// FriData will store Fri scheme related data for blob +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize, Deserialize)] +pub struct FriData { + /// Evaluation point seed + pub eval_point_seed: [u8; 32], + /// Evaluation claim for specific eval point + pub eval_claim: [u8; 16], + /// Evaluation proof for specific eval point + pub fri_eval_proof: Option>, } /// Blob object that will get store by each validator @@ -224,6 +244,14 @@ pub struct BlobReceived { pub finalized_block_hash: H256, /// The finalized block number for other nodes reference pub finalized_block_number: u64, + /// Evaluation point seed for FRI blobs + pub eval_point_seed: Option<[u8; 32]>, + /// Evaluation claim for FRI blobs + pub eval_claim: Option<[u8; 16]>, + /// Evaluation proof for FRI blobs + pub fri_eval_proof: Option>, + /// Index of the designated prover among storing_validator_list + pub fri_eval_prover_index: Option, } /// Structure for the request when a blob is requested from a validator @@ -253,6 +281,8 @@ pub struct BlobStored { pub ownership_entry: OwnershipEntry, /// The finalized block hash for other nodes reference pub finalized_block_hash: H256, + /// Evaluation proof + pub eval_proof: Option>, } /// Structure for the signature that validator sends when sending notification / requests @@ -288,6 +318,8 @@ pub struct BlobTxSummary { pub reason: Option, /// The vector of ownership entries pub ownership: Vec, + /// Evaluation proof for FRI + pub eval_proof: Option>, } impl BlobTxSummary { pub fn convert_to_primitives( @@ -303,6 +335,7 @@ impl BlobTxSummary { String, // Encoded Peer id Vec, // Signature )>, + Option>, // Evaluation proof )> { input .into_iter() @@ -326,12 +359,54 @@ impl BlobTxSummary { summary.success, summary.reason, ownership, + summary.eval_proof, ) }) .collect() } } +/// A lightweight summary of blob to be used by LC's +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Serialize, Deserialize)] +pub struct BlobSummary { + /// The hash of the blob + hash: BlobHash, + /// The transaction index in the block + tx_index: u32, + /// App id + app_id: AppId, + /// Size of the blob in bytes + size_bytes: u64, +} + +impl BlobSummary { + pub fn new(hash: BlobHash, tx_index: u32, app_id: AppId, size_bytes: u64) -> Self { + Self { + hash, + tx_index, + app_id, + size_bytes, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Serialize, Deserialize)] +pub struct BlobEvalData { + eval_point_seed: [u8; 32], + eval_claim: [u8; 16], + eval_proof: Vec, +} + +impl BlobEvalData { + pub fn new(eval_point_seed: [u8; 32], eval_claim: [u8; 16], eval_proof: Vec) -> Self { + Self { + eval_point_seed, + eval_claim, + eval_proof, + } + } +} + /// Blob info used to store info about blobs which were included in blocks #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, TypeInfo, Serialize, Deserialize)] pub struct BlobInfo { diff --git a/blob/src/utils.rs b/blob/src/utils.rs index 64dd8546c..920c4413f 100644 --- a/blob/src/utils.rs +++ b/blob/src/utils.rs @@ -1,4 +1,4 @@ -use crate::traits::CommitmentQueueApiT; +use crate::traits::{CommitmentQueueApiT, RuntimeApiT}; use crate::{ store::StorageApiT, types::{BlobHash, BlobMetadata, BlobSignatureData, BlobTxSummary, OwnershipEntry}, @@ -7,7 +7,7 @@ use anyhow::{anyhow, Context, Result}; use avail_observability::metrics::BlobMetrics; use base64::Engine; use codec::{Decode, Encode}; -use da_commitment::build_da_commitments::build_commitments_from_polynomial_grid; +use da_commitment::build_kzg_commitments::build_commitments_from_polynomial_grid; use da_control::{BlobRuntimeParameters, Call}; use da_runtime::UncheckedExtrinsic; use da_runtime::{apis::BlobApi, RuntimeCall}; @@ -53,28 +53,24 @@ pub fn get_my_validator_public_account( } /// Get this node's Address -pub fn get_my_validator_id( +pub fn get_my_validator_id( keystore: &Arc, - client: &Arc, - at: &[u8], -) -> Option<(AccountId32, AuthorityId)> -where - Block: BlockT, - Client: ProvideRuntimeApi, - Client::Api: BlobApi, -{ - let key_type = key_types::BABE; + runtime_client: &dyn RuntimeApiT, + at: H256, +) -> Result<(AccountId32, AuthorityId), String> { + let Some((authority_id, key_type_id)) = get_my_validator_public_account(keystore) else { + return Err("Validator BABE key not found in keystore".into()); + }; - // Get keys from the keystore - let keys = keystore.sr25519_public_keys(key_type); + let owner_opt = runtime_client + .get_validator_from_key(at, key_type_id, authority_id.encode()) + .map_err(|e| format!("runtime error: {e:?}"))?; - // Return None if no keys are in the keystore - if keys.len() == 0 { - return None; - } - let k = keys[keys.len() - 1]; + let Some(validator_id) = owner_opt else { + return Err("No validator found for local BABE key".into()); + }; - get_validator_id_from_key(&k.into(), client, at) + Ok((validator_id, authority_id)) } pub fn get_validator_id_from_key( @@ -126,6 +122,20 @@ where } } +/// Deterministically select designated prover index for a blob. +pub fn designated_prover_index( + blob_hash: &H256, + finalized_block_hash: &H256, + nb_validators_per_blob: u32, +) -> u32 { + let mut input = [0u8; 64]; + input[..32].copy_from_slice(finalized_block_hash.as_bytes()); + input[32..].copy_from_slice(blob_hash.as_bytes()); + + let h = sp_core::blake2_256(&input); + u32::from_le_bytes(h[..4].try_into().unwrap()) % nb_validators_per_blob +} + /// Return the list of storing validators for a blob pub fn validators_for_blob( blob_hash: H256, @@ -449,6 +459,7 @@ fn get_block_tx_summary( success: false, reason: None, ownership: Vec::new(), + eval_proof: None, }; let (meta, ownerships) = match blob_metadata { @@ -479,6 +490,8 @@ fn get_block_tx_summary( return blob_summary; } + blob_summary.eval_proof = meta.fri_eval_proof.clone(); + blob_summary.success = true; return blob_summary; @@ -563,6 +576,14 @@ pub fn verify_signed_blob_data( Ok(valid) } +pub fn get_babe_randomness_key() -> StorageKey { + let mut key = Vec::new(); + key.extend(&twox_128(b"Babe")); + key.extend(&twox_128(b"Randomness")); + let storage_key = StorageKey(key); + storage_key +} + pub fn get_dynamic_blocklength_key() -> StorageKey { let mut key = Vec::new(); key.extend(&twox_128(b"System")); diff --git a/blob/src/validation.rs b/blob/src/validation.rs index 8c11463cf..7ffdfc2ef 100644 --- a/blob/src/validation.rs +++ b/blob/src/validation.rs @@ -3,8 +3,16 @@ use crate::{ traits::{NonceCacheApiT, RuntimeApiT}, utils::{extract_signer_and_nonce, CommitmentQueueMessage}, }; +use avail_fri::FriProof; +use avail_fri::{ + core::{FriBiniusPCS, B128}, + encoding::{mle_dims_from_blob_size, BytesEncoder}, + eval_utils::{derive_evaluation_point, eval_claim_from_bytes}, + FriParamsVersion, +}; use avail_observability::metrics::BlobMetrics; use codec::Decode; +// use da_commitment::build_fri_commitments::build_fri_da_commitment; use da_control::Call; use da_runtime::RuntimeCall; use da_runtime::UncheckedExtrinsic; @@ -18,7 +26,7 @@ pub fn initial_validation( max_blob_size: usize, blob: &[u8], metadata: &[u8], -) -> Result<(H256, Vec), String> { +) -> Result<(H256, Vec, Option<[u8; 32]>, Option<[u8; 16]>), String> { if blob.len() > max_blob_size { return Err("blob is too big".into()); } @@ -26,19 +34,29 @@ pub fn initial_validation( let mut metadata = metadata; let encoded_metadata_signed_transaction: UncheckedExtrinsic = Decode::decode(&mut metadata) .map_err(|_| String::from("failed to decode concrete metadata call"))?; - let (provided_size, provided_blob_hash, provided_commitment) = + let (provided_size, provided_blob_hash, provided_commitment, eval_pont_seed, eval_claim) = match encoded_metadata_signed_transaction.function { RuntimeCall::DataAvailability(Call::submit_blob_metadata { app_id: _, size, blob_hash, commitment, - }) => (size as usize, blob_hash, commitment), + eval_point_seed, + eval_claim, + }) => ( + size as usize, + blob_hash, + commitment, + eval_point_seed, + eval_claim, + ), _ => { return Err("metadata extrinsic must be dataAvailability.submitBlobMetadata".into()) }, }; + // TODO: do basic check like if the current commitment scheme is Fri, eval_point_seed and eval_claim must be present + // Check size if provided_size != blob.len() { return Err(std::format!( @@ -53,7 +71,7 @@ pub fn initial_validation( return Err(std::format!("submitted blob: {provided_blob_hash:?} does not correspond to generated blob {blob_hash:?}")); } - Ok((blob_hash, provided_commitment)) + Ok((blob_hash, provided_commitment, eval_pont_seed, eval_claim)) } pub fn tx_validation( @@ -101,7 +119,7 @@ pub fn tx_validation( Ok(opaque_tx) } -pub async fn commitment_validation( +pub async fn validate_kzg_commitment( hash: H256, provided_commitment: &Vec, grid: PolynomialGrid, @@ -131,3 +149,87 @@ pub async fn commitment_validation( Ok(()) } + +/// Validate FRI commitment for the given blob and verify the evaluation proof for the given evaluation point and claim. +pub fn validate_fri_commitment( + blob_hash: H256, + blob: &[u8], + provided_commitment: &[u8], + eval_point_seed: &[u8; 32], + eval_claim: &[u8; 16], +) -> Result, String> { + const FRI_COMMITMENT_SIZE: usize = 32; + + if provided_commitment.len() != FRI_COMMITMENT_SIZE { + return Err(format!( + "Fri commitment must be {} bytes, got {}", + FRI_COMMITMENT_SIZE, + provided_commitment.len() + )); + } + + // let expected = build_fri_da_commitment(blob, FriParamsVersion(0)); + let params_version = FriParamsVersion(0); + // Encode bytes → multilinear extension over B128 + let encoder = BytesEncoder::::new(); + let packed = encoder + .bytes_to_packed_mle(blob) + .map_err(|e| e.to_string())?; + + let n_vars = packed.total_n_vars; + + // Map version + n_vars → concrete FriParamsConfig + let cfg = params_version.to_config(n_vars); + + // Build PCS + FRI context + let pcs = FriBiniusPCS::new(cfg); + let ctx = pcs + .initialize_fri_context::(packed.packed_mle.log_len()) + .map_err(|e| e.to_string())?; + + // Commit to the blob MLE: returns a 32-byte digest in `commitment` + let commit_output = pcs + .commit(&packed.packed_mle, &ctx) + .map_err(|e| e.to_string())?; + + if commit_output.commitment.as_slice() != provided_commitment { + return Err(format!("Fri commitment mismatch for blob {blob_hash:?}")); + } + + let eval_point = derive_evaluation_point(*eval_point_seed, n_vars); + let eval_claim = eval_claim_from_bytes(eval_claim) + .map_err(|e| format!("Failed to deserialize evaluation claim: {}", e))?; + + let proof = pcs + .prove::(packed.packed_mle.clone(), &ctx, &commit_output, &eval_point) + .map_err(|e| e.to_string())?; + + pcs.verify(&proof, eval_claim, &eval_point, &ctx) + .map_err(|e| e.to_string())?; + let proof_bytes = proof.transcript_bytes; + Ok(proof_bytes) +} + +pub fn validate_fri_proof( + blob_size: usize, + eval_point_seed: &[u8; 32], + eval_claim: &[u8; 16], + eval_proof: &[u8], +) -> Result<(), String> { + let params_version = FriParamsVersion(0); + let (log_len, n_vars) = mle_dims_from_blob_size(blob_size); + let cfg = params_version.to_config(n_vars); + let pcs = FriBiniusPCS::new(cfg); + let ctx = pcs + .initialize_fri_context::(log_len) + .map_err(|e| e.to_string())?; + + let eval_point = derive_evaluation_point(*eval_point_seed, n_vars); + let eval_claim = eval_claim_from_bytes(eval_claim) + .map_err(|e| format!("Failed to deserialize evaluation claim: {}", e))?; + let proof = FriProof { + transcript_bytes: eval_proof.to_vec(), + }; + pcs.verify(&proof, eval_claim, &eval_point, &ctx) + .map_err(|e| e.to_string()) +} diff --git a/da-commitment/Cargo.toml b/da-commitment/Cargo.toml index 627422c79..4150fabea 100644 --- a/da-commitment/Cargo.toml +++ b/da-commitment/Cargo.toml @@ -10,9 +10,11 @@ repository.workspace = true workspace = true [dependencies] +avail-fri = { workspace = true, default-features = false } kate = { workspace = true, default-features = false } +primitive-types.workspace = true thiserror-no-std.workspace = true -log.workspace = true +log = { workspace = true, default-features = false } anyhow = { version = "1.0", default-features = false} [dev-dependencies] @@ -20,7 +22,7 @@ divan = { version = "0.1"} [features] default = [ "std" ] -std = [ "anyhow/std", "kate/std" ] +std = [ "avail-fri/std", "kate/std", "log/std" ] [[bench]] name = "commitment" diff --git a/da-commitment/benches/commitment.rs b/da-commitment/benches/commitment.rs index d84505ada..c38ca39ec 100644 --- a/da-commitment/benches/commitment.rs +++ b/da-commitment/benches/commitment.rs @@ -1,4 +1,4 @@ -use da_commitment::build_da_commitments::{ +use da_commitment::build_kzg_commitments::{ build_commitments_from_polynomial_grid, build_polynomial_grid, }; use divan::Bencher; diff --git a/da-commitment/src/build_fri_commitments.rs b/da-commitment/src/build_fri_commitments.rs new file mode 100644 index 000000000..51a54cfac --- /dev/null +++ b/da-commitment/src/build_fri_commitments.rs @@ -0,0 +1,64 @@ +#![cfg(feature = "std")] + +use anyhow::Result; +use avail_fri::{ + core::{FriBiniusPCS, B128}, + encoding::BytesEncoder, +}; +use log; +use thiserror_no_std::Error; +// re-export for convineince +pub use avail_fri::core::FriParamsVersion; + +/// Single Fri commitment for a DA blob (32-byte) +pub type FriDaCommitment = Vec; + +#[derive(Error, Debug)] +pub enum FriDaCommitmentError { + #[error("Bytes → packed MLE encoding failed: {0}")] + EncodingFailed(String), + #[error("FRI context initialization failed: {0}")] + ContextInitFailed(String), + #[error("PCS commitment failed: {0}")] + CommitFailed(String), +} + +fn build_fri_commitment_internal( + data: &[u8], + params_version: FriParamsVersion, +) -> Result { + // Encode bytes → multilinear extension over B128 + let encoder = BytesEncoder::::new(); + let packed = encoder + .bytes_to_packed_mle(data) + .map_err(|e| FriDaCommitmentError::EncodingFailed(e.to_string()))?; + + let n_vars = packed.total_n_vars; + + // Map version + n_vars → concrete FriParamsConfig + let cfg = params_version.to_config(n_vars); + + // Build PCS + FRI context + let pcs = FriBiniusPCS::new(cfg); + let ctx = pcs + .initialize_fri_context::(packed.packed_mle.log_len()) + .map_err(|e| FriDaCommitmentError::ContextInitFailed(e.to_string()))?; + + // Commit to the blob MLE: returns a 32-byte digest in `commitment` + let commit_output = pcs + .commit(&packed.packed_mle, &ctx) + .map_err(|e| FriDaCommitmentError::CommitFailed(e.to_string()))?; + + Ok(commit_output.commitment) +} + +/// Build commitment using Fri PCS with given version configuration +pub fn build_fri_da_commitment(data: &[u8], params_version: FriParamsVersion) -> FriDaCommitment { + match build_fri_commitment_internal(data, params_version) { + Ok(c) => c, + Err(e) => { + log::error!("Fri DA commitment generation failed: {:?}", e); + FriDaCommitment::new() + }, + } +} diff --git a/da-commitment/src/build_da_commitments.rs b/da-commitment/src/build_kzg_commitments.rs similarity index 100% rename from da-commitment/src/build_da_commitments.rs rename to da-commitment/src/build_kzg_commitments.rs diff --git a/da-commitment/src/lib.rs b/da-commitment/src/lib.rs index 160393d03..8e8bd4a1f 100644 --- a/da-commitment/src/lib.rs +++ b/da-commitment/src/lib.rs @@ -1 +1,2 @@ -pub mod build_da_commitments; +pub mod build_fri_commitments; +pub mod build_kzg_commitments; diff --git a/e2e/Cargo.lock b/e2e/Cargo.lock index 9a5ed0bd2..d11ca1a16 100644 --- a/e2e/Cargo.lock +++ b/e2e/Cargo.lock @@ -685,6 +685,15 @@ version = "6.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d5dde061bd34119e902bbb2d9b90c5692635cf59fb91d582c2b68043f1b8293" +[[package]] +name = "array-util" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e509844de8f09b90a2c3444684a2b6695f4071360e13d2fda0af9f749cc2ed6" +dependencies = [ + "arrayvec 0.7.6", +] + [[package]] name = "arrayref" version = "0.3.9" @@ -848,6 +857,17 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "auto_impl" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.99", +] + [[package]] name = "autocfg" version = "1.5.0" @@ -899,8 +919,6 @@ dependencies = [ "frame-support 43.0.0", "hash-db", "hash256-std-hasher", - "hex", - "impl-serde 0.5.0", "log", "num-traits", "parity-scale-codec", @@ -918,10 +936,27 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "avail-fri" +version = "0.1.0" +source = "git+https://github.com/availproject/avail-core.git?rev=5e786ae65302610b2db7df1c834b40f857cfa36a#5e786ae65302610b2db7df1c834b40f857cfa36a" +dependencies = [ + "avail-core 0.6.2 (git+https://github.com/availproject/avail-core.git?rev=5e786ae65302610b2db7df1c834b40f857cfa36a)", + "binius-field", + "binius-math", + "binius-prover", + "binius-transcript", + "binius-verifier", + "blake2b_simd", + "log", + "parity-scale-codec", + "rand_chacha 0.3.1", +] + [[package]] name = "avail-rust-client" -version = "0.4.0" -source = "git+https://github.com/availproject/avail-rust.git?rev=610db1c5bcad3e636e6c3192b80cec95f002c737#610db1c5bcad3e636e6c3192b80cec95f002c737" +version = "0.5.0" +source = "git+https://github.com/availproject/avail-rust.git?rev=9599f05851b52b822e6bc818810773a31794aab9#9599f05851b52b822e6bc818810773a31794aab9" dependencies = [ "avail-rust-core", "const-hex", @@ -939,8 +974,8 @@ dependencies = [ [[package]] name = "avail-rust-core" -version = "0.4.0" -source = "git+https://github.com/availproject/avail-rust.git?rev=610db1c5bcad3e636e6c3192b80cec95f002c737#610db1c5bcad3e636e6c3192b80cec95f002c737" +version = "0.5.0" +source = "git+https://github.com/availproject/avail-rust.git?rev=9599f05851b52b822e6bc818810773a31794aab9#9599f05851b52b822e6bc818810773a31794aab9" dependencies = [ "base64", "const-hex", @@ -950,6 +985,7 @@ dependencies = [ "scale-decode", "scale-encode", "scale-info", + "schnorrkel", "serde", "serde_json", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1047,6 +1083,120 @@ dependencies = [ "serde", ] +[[package]] +name = "binius-core" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-utils", + "bytemuck", + "bytes", + "thiserror 2.0.12", +] + +[[package]] +name = "binius-field" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-utils", + "bytemuck", + "cfg-if", + "derive_more 0.99.19", + "rand 0.9.2", + "seq-macro", + "thiserror 2.0.12", +] + +[[package]] +name = "binius-math" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-field", + "binius-utils", + "bytemuck", + "getset", + "itertools 0.14.0", + "rand 0.9.2", + "thiserror 2.0.12", + "tracing", + "uninit", +] + +[[package]] +name = "binius-prover" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-core", + "binius-field", + "binius-math", + "binius-transcript", + "binius-utils", + "binius-verifier", + "bytemuck", + "bytes", + "derive_more 0.99.19", + "digest 0.10.7", + "either", + "getset", + "itertools 0.14.0", + "rand 0.9.2", + "thiserror 2.0.12", + "tracing", +] + +[[package]] +name = "binius-transcript" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "auto_impl", + "binius-field", + "binius-utils", + "bytes", + "digest 0.10.7", + "thiserror 2.0.12", + "tracing", +] + +[[package]] +name = "binius-utils" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "array-util", + "bytemuck", + "bytes", + "cfg-if", + "generic-array", + "itertools 0.14.0", + "rayon", + "thiserror 2.0.12", + "trait-set", +] + +[[package]] +name = "binius-verifier" +version = "0.1.0" +source = "git+https://github.com/binius-zk/binius64.git?rev=41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896#41cda4a3eeb3fcb57bcd324e20a0ffe0b653f896" +dependencies = [ + "binius-core", + "binius-field", + "binius-math", + "binius-transcript", + "binius-utils", + "bytemuck", + "bytes", + "digest 0.10.7", + "getset", + "itertools 0.14.0", + "sha2 0.10.8", + "thiserror 2.0.12", + "tracing", +] + [[package]] name = "bip39" version = "2.2.0" @@ -1068,9 +1218,9 @@ checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" [[package]] name = "bitcoin-io" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" +checksum = "2dee39a0ee5b4095224a0cfc6bf4cc1baf0f9624b96b367e53b66d974e51d953" [[package]] name = "bitcoin_hashes" @@ -1084,9 +1234,9 @@ dependencies = [ [[package]] name = "bitcoin_hashes" -version = "0.14.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +checksum = "26ec84b80c482df901772e931a9a681e26a1b9ee2302edeff23cb30328745c8b" dependencies = [ "bitcoin-io", "hex-conservative 0.2.2", @@ -1303,7 +1453,7 @@ checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ "iana-time-zone", "num-traits", - "windows-link", + "windows-link 0.1.3", ] [[package]] @@ -1874,10 +2024,10 @@ checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" name = "e2e" version = "0.1.0" dependencies = [ + "avail-fri", "avail-rust-client", - "da-commitment", "hex", - "kate 0.9.2 (git+https://github.com/availproject/avail-core?branch=ghali%2Finfinity-da)", + "kate", "parity-scale-codec", "serde", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2587,6 +2737,18 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.99", +] + [[package]] name = "gimli" version = "0.27.3" @@ -2885,7 +3047,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.1", "tokio", "tower-service", "tracing", @@ -3199,6 +3361,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" @@ -4405,6 +4576,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.99", +] + [[package]] name = "proc-macro-warning" version = "1.84.1" @@ -4501,6 +4694,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ + "rand_chacha 0.9.0", "rand_core 0.9.3", ] @@ -4538,6 +4732,9 @@ name = "rand_core" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.1", +] [[package]] name = "rand_xorshift" @@ -4942,9 +5139,9 @@ dependencies = [ [[package]] name = "scale-encode" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64901733157f9d25ef86843bd783eda439fac7efb0ad5a615d12d2cf3a29464b" +checksum = "f2a976d73564a59e482b74fd5d95f7518b79ca8c8ca5865398a4d629dd15ee50" dependencies = [ "parity-scale-codec", "primitive-types 0.13.1", @@ -4957,9 +5154,9 @@ dependencies = [ [[package]] name = "scale-encode-derive" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a3993a13b4eafa89350604672c8757b7ea84c7c5947d4b3691e3169c96379b" +checksum = "17020f2d59baabf2ddcdc20a4e567f8210baf089b8a8d4785f5fd5e716f92038" dependencies = [ "darling", "proc-macro-crate", @@ -5106,7 +5303,7 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" dependencies = [ - "bitcoin_hashes 0.14.0", + "bitcoin_hashes 0.14.1", "rand 0.8.5", "secp256k1-sys 0.10.1", ] @@ -5189,6 +5386,12 @@ version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" +[[package]] +name = "seq-macro" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc711410fbe7399f390ca1c3b60ad0f53f80e95c5eb935e52268a0e2cd49acc" + [[package]] name = "serde" version = "1.0.228" @@ -5511,6 +5714,16 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + [[package]] name = "soketto" version = "0.8.1" @@ -6984,7 +7197,7 @@ dependencies = [ "mio", "parking_lot", "pin-project-lite", - "socket2", + "socket2 0.5.10", "tokio-macros", "windows-sys 0.61.2", ] @@ -7267,6 +7480,17 @@ dependencies = [ "tracing-log 0.2.0", ] +[[package]] +name = "trait-set" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b79e2e9c9ab44c6d7c20d5976961b47e8f49ac199154daa514b77cd1ab536625" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "trie-db" version = "0.28.0" @@ -7388,6 +7612,12 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "uninit" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "359fdaaabedff944f100847f2e0ea88918d8012fe64baf5b54c191ad010168c9" + [[package]] name = "universal-hash" version = "0.5.1" @@ -7868,7 +8098,7 @@ checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-link", + "windows-link 0.1.3", "windows-result", "windows-strings", ] @@ -7901,13 +8131,19 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + [[package]] name = "windows-result" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ - "windows-link", + "windows-link 0.1.3", ] [[package]] @@ -7916,7 +8152,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-link", + "windows-link 0.1.3", ] [[package]] diff --git a/e2e/Cargo.toml b/e2e/Cargo.toml index 0e439bf3d..2ada9be4e 100644 --- a/e2e/Cargo.toml +++ b/e2e/Cargo.toml @@ -6,8 +6,13 @@ edition = "2021" [workspace] [dependencies] -avail-rust = { package = "avail-rust-client", git = "https://github.com/availproject/avail-rust.git", rev = "610db1c5bcad3e636e6c3192b80cec95f002c737", default-features = false, features = ["native", "reqwest", "next"] } -da-commitment = { path = "../da-commitment" } +# avail-core = { path = "../../avail-core/core", default-features = false, features = [ "runtime"] } +# avail-fri = { path = "../../avail-core/fri", default-features = false } +# avail-rust = { package = "avail-rust-client", path = "../../avail-rust/client", default-features = false, features = ["native", "reqwest"] } +# avail-core = { git = "https://github.com/availproject/avail-core.git", rev = "5e786ae65302610b2db7df1c834b40f857cfa36a", default-features = false, features = [ "runtime"] } +avail-fri = { git = "https://github.com/availproject/avail-core.git", rev = "5e786ae65302610b2db7df1c834b40f857cfa36a", default-features = false } +avail-rust = { package = "avail-rust-client", git = "https://github.com/availproject/avail-rust.git", rev = "9599f05851b52b822e6bc818810773a31794aab9", default-features = false, features = ["native", "reqwest"] } +# da-commitment = { path = "../da-commitment" } sp-std = { git = "https://github.com/availproject/polkadot-sdk.git", branch = "ghali/infinity-da", default-features = false, features = ["std"] } kate = { git = "https://github.com/availproject/avail-core", branch = "ghali/infinity-da", default-features = false, features = ["std"] } hex = "0.4" diff --git a/e2e/src/infinity_da_automatic_test.rs b/e2e/src/infinity_da_automatic_test.rs index 6c69307e6..0fafe953b 100644 --- a/e2e/src/infinity_da_automatic_test.rs +++ b/e2e/src/infinity_da_automatic_test.rs @@ -1,86 +1,86 @@ -#![allow(dead_code)] +// #![allow(dead_code)] -use avail_rust::{ - avail_rust_core::rpc::blob::submit_blob, - block_api::{BlockExtOptionsExpanded, BlockWithRawExt}, - prelude::*, -}; -use da_commitment::build_da_commitments::build_da_commitments; -use kate::Seed; -use sp_crypto_hashing::keccak_256; -use sp_std::iter::repeat; +// use avail_rust::{ +// avail_rust_core::rpc::blob::submit_blob, +// block_api::{BlockExtOptionsExpanded, BlockWithRawExt}, +// prelude::*, +// }; +// use da_commitment::build_kzg_commitments::build_da_commitments; +// use kate::Seed; +// use sp_crypto_hashing::keccak_256; +// use sp_std::iter::repeat; -pub async fn run() -> Result<(), Error> { - println!("---------- START Submission ---------- "); - let len = 1 * 1024 * 1024; - let client = Client::new(LOCAL_ENDPOINT).await?; - let signer = alice(); - let byte = b'A'; - let nonce = client.chain().account_nonce(signer.account_id()).await?; +// pub async fn run() -> Result<(), Error> { +// println!("---------- START Submission ---------- "); +// let len = 1 * 1024 * 1024; +// let client = Client::new(LOCAL_ENDPOINT).await?; +// let signer = alice(); +// let byte = b'A'; +// let nonce = client.chain().account_nonce(signer.account_id()).await?; - let mut blobs: Vec<(Vec, H256, Vec)> = Vec::new(); - let nb_tx: u32 = 10; - println!("---------- START Commitments generation ---------- "); - for i in 0..(nb_tx as usize) { - println!("---------- START Commitment generation {i} ---------- "); - let blob: Vec = repeat(byte).take(len - i).collect::>(); - let blob_hash = H256::from(keccak_256(&blob)); - let commitments = build_da_commitments(&blob, 1024, 4096, Seed::default()); - println!("blob len = {:?}", blob.len()); - println!("blob_hash = {:?}", blob_hash); - println!("commitments len = {:?}", commitments.len()); - blobs.push((blob, blob_hash, commitments)); - } +// let mut blobs: Vec<(Vec, H256, Vec)> = Vec::new(); +// let nb_tx: u32 = 10; +// println!("---------- START Commitments generation ---------- "); +// for i in 0..(nb_tx as usize) { +// println!("---------- START Commitment generation {i} ---------- "); +// let blob: Vec = repeat(byte).take(len - i).collect::>(); +// let blob_hash = H256::from(keccak_256(&blob)); +// let commitments = build_da_commitments(&blob, 1024, 4096, Seed::default()); +// println!("blob len = {:?}", blob.len()); +// println!("blob_hash = {:?}", blob_hash); +// println!("commitments len = {:?}", commitments.len()); +// blobs.push((blob, blob_hash, commitments)); +// } - let block_height_before = client.finalized().block_header().await?.number; - let mut sub = Sub::new(client.clone()); - sub.use_best_block(true); - sub.set_block_height(block_height_before); +// let block_height_before = client.finalized().block_header().await?.number; +// let mut sub = Sub::new(client.clone()); +// sub.use_best_block(true); +// sub.set_block_height(block_height_before); - for (i, (blob, hash, commitments)) in blobs.into_iter().enumerate() { - println!("---------- START Submission {i} ---------- "); - let options = Options::default().nonce(nonce + i as u32); - let unsigned_tx = client.tx().data_availability().submit_blob_metadata( - 5, - hash, - blob.len() as u64, - commitments, - ); +// for (i, (blob, hash, commitments)) in blobs.into_iter().enumerate() { +// println!("---------- START Submission {i} ---------- "); +// let options = Options::default().nonce(nonce + i as u32); +// let unsigned_tx = client.tx().data_availability().submit_blob_metadata( +// 5, +// hash, +// blob.len() as u64, +// commitments, +// ); - let tx = unsigned_tx.sign(&signer, options).await.unwrap().encode(); +// let tx = unsigned_tx.sign(&signer, options).await.unwrap().encode(); - if let Err(e) = submit_blob(&client.rpc_client, &tx, &blob).await { - println!("An error has occured: {e}"); - } - println!("---------- END Submission {i} ---------- "); - } +// if let Err(e) = submit_blob(&client.rpc_client, &tx, &blob).await { +// println!("An error has occured: {e}"); +// } +// println!("---------- END Submission {i} ---------- "); +// } - let mut found_blob_count = 0; - let mut block_searched = 0; +// let mut found_blob_count = 0; +// let mut block_searched = 0; - loop { - let block_ref = sub.next().await?; - let block = BlockWithRawExt::new(client.clone(), block_ref.height); - let regular_count = block - .count(BlockExtOptionsExpanded::default()) - .await - .unwrap(); - let count = (regular_count - 3).max(0); - println!( - "Searched in block {}, found {} blobs", - block_ref.height, count - ); - found_blob_count += count; - block_searched += 1; - if found_blob_count >= nb_tx as usize { - println!("Successfully found all blobs"); - break; - } - if block_searched > 10 { - println!("Failed to find blobs, stopped at {found_blob_count} blob(s)"); - break; - } - } +// loop { +// let block_ref = sub.next().await?; +// let block = BlockWithRawExt::new(client.clone(), block_ref.height); +// let regular_count = block +// .count(BlockExtOptionsExpanded::default()) +// .await +// .unwrap(); +// let count = (regular_count - 3).max(0); +// println!( +// "Searched in block {}, found {} blobs", +// block_ref.height, count +// ); +// found_blob_count += count; +// block_searched += 1; +// if found_blob_count >= nb_tx as usize { +// println!("Successfully found all blobs"); +// break; +// } +// if block_searched > 10 { +// println!("Failed to find blobs, stopped at {found_blob_count} blob(s)"); +// break; +// } +// } - Ok(()) -} +// Ok(()) +// } diff --git a/e2e/src/infinity_da_test.rs b/e2e/src/infinity_da_test.rs index 4e711eaa3..ce14a08a5 100644 --- a/e2e/src/infinity_da_test.rs +++ b/e2e/src/infinity_da_test.rs @@ -1,14 +1,30 @@ #![allow(dead_code)] use avail_rust::{avail_rust_core::rpc::blob::submit_blob, prelude::*}; -use da_commitment::build_da_commitments::build_da_commitments; -use kate::Seed; +// use avail_core::FriParamsVersion; +use avail_fri::{ + core::{FriBiniusPCS, B128}, + encoding::BytesEncoder, + eval_utils::{derive_evaluation_point, derive_seed_from_inputs, eval_claim_to_bytes}, + FriParamsVersion, +}; +// use da_commitment::build_kzg_commitments::build_da_commitments; +// use da_commitment::build_fri_commitments::build_fri_da_commitment; +// use kate::Seed; use sp_crypto_hashing::keccak_256; use sp_std::iter::repeat; +pub struct BabeRandomness; +impl StorageValue for BabeRandomness { + type VALUE = [u8; 32]; + + const PALLET_NAME: &str = "Babe"; + const STORAGE_NAME: &str = "Randomness"; +} + pub async fn run() -> Result<(), Error> { println!("---------- START Submission ---------- "); - let len = 31 * 1024 * 1024; + let len = 31 * 1024; let mode = 1; let local_endpoint: &str = if mode == 1 { @@ -44,19 +60,61 @@ pub async fn run() -> Result<(), Error> { let nonce = client.chain().account_nonce(signer.account_id()).await?; println!("Nonce: {nonce}"); - let mut blobs: Vec<(Vec, H256, Vec)> = Vec::new(); + let mut blobs: Vec<(Vec, H256, Vec, Option<[u8; 32]>, Option<[u8; 16]>)> = Vec::new(); println!("---------- START Commitments generation ---------- "); - for i in 0..50 { + for i in 0..2 { println!("---------- START Commitment generation {i} ---------- "); let blob: Vec = repeat(byte).take(len - i).collect::>(); let blob_hash = H256::from(keccak_256(&blob)); - let commitments = build_da_commitments(&blob, 1024, 4096, Seed::default()); + // let commitments = build_da_commitments(&blob, 1024, 4096, Seed::default()); + // let commitments = build_fri_da_commitment(&blob, FriParamsVersion(0)); + let params_version = FriParamsVersion(0); + // Encode bytes → multilinear extension over B128 + let encoder = BytesEncoder::::new(); + let packed = encoder + .bytes_to_packed_mle(&blob) + .expect("Failed to encode blob to packed MLE"); + + let n_vars = packed.total_n_vars; + + // Map version + n_vars → concrete FriParamsConfig + let cfg = params_version.to_config(n_vars); + + // Build PCS + FRI context + let pcs = FriBiniusPCS::new(cfg); + let ctx = pcs + .initialize_fri_context::(packed.packed_mle.log_len()) + .expect("Failed to initialize FRI context"); + + // Commit to the blob MLE: returns a 32-byte digest in `commitment` + let commit_output = pcs + .commit(&packed.packed_mle, &ctx) + .expect("Failed to commit to blob MLE"); + let commitments = commit_output.commitment; + // fetch current epoch randomness from the chain & use it to derive eval point seed + let rpc_client = &client.rpc_client; + let babe_randomness = BabeRandomness::fetch(&rpc_client, None) + .await? + .expect("Babe Randomness should be available for every epoch except genesis era"); + let eval_point_seed = derive_seed_from_inputs(&babe_randomness, &blob_hash.0); + let eval_point = derive_evaluation_point(eval_point_seed, n_vars); + let eval_claim = pcs + .calculate_evaluation_claim(&packed.packed_values, &eval_point) + .expect("Failed to calculate evaluation claim"); + let eval_cliam_bytes = eval_claim_to_bytes(eval_claim); println!("blob len = {:?}", blob.len()); println!("blob_hash = {:?}", blob_hash); println!("commitments len = {:?}", commitments.len()); - blobs.push((blob, blob_hash, commitments)); + blobs.push(( + blob, + blob_hash, + commitments, + Some(eval_point_seed), + Some(eval_cliam_bytes), + )); } - for (i, (blob, hash, commitments)) in blobs.into_iter().enumerate() { + for (i, (blob, hash, commitments, eval_point_seed, eval_claim)) in blobs.into_iter().enumerate() + { println!("---------- START Submission {i} ---------- "); let options = Options::default().nonce(nonce + i as u32); let unsigned_tx = client.tx().data_availability().submit_blob_metadata( @@ -64,6 +122,8 @@ pub async fn run() -> Result<(), Error> { hash, blob.len() as u64, commitments, + eval_point_seed, + eval_claim, ); let tx = unsigned_tx.sign(&signer, options).await.unwrap().encode(); diff --git a/e2e/src/max_block_submit.rs b/e2e/src/max_block_submit.rs index 78a5bbca6..05a534e25 100644 --- a/e2e/src/max_block_submit.rs +++ b/e2e/src/max_block_submit.rs @@ -23,7 +23,7 @@ pub async fn run() -> Result<(), Error> { let client = Client::new(LOCAL_ENDPOINT).await?; let alice = alice(); - let options = Options::new(5); + let options = Options::new(); // Testing if MAX_TX_SIZE is viable println!("{}: Submitting transaction...", TAG); @@ -58,16 +58,17 @@ pub async fn run() -> Result<(), Error> { println!("{}: Checking transactions...", TAG); // Get details - let mut expected_block_id = None; + // let mut expected_block_id = None; for submitted_tx in submitted_txs { let receipt = submitted_tx.receipt(true).await.unwrap().unwrap(); - if expected_block_id.is_none() { - expected_block_id = Some(receipt.block_ref) - } - let expected_id = expected_block_id.unwrap(); + // if expected_block_id.is_none() { + // expected_block_id = Some(receipt.block_ref) + // } + // let expected_id = expected_block_id.unwrap(); - assert_eq!(expected_id.height, receipt.block_ref.height); - assert_eq!(expected_id.hash, receipt.block_ref.hash); + println!("Submitted tx at block: {:?}", receipt.block_hash); + // assert_eq!(expected_id.height, receipt.block_height); + // assert_eq!(expected_id.hash, receipt.block_hash); } println!("{}: Done", TAG); diff --git a/node/src/da_block_import.rs b/node/src/da_block_import.rs index 9617412a9..3276be476 100644 --- a/node/src/da_block_import.rs +++ b/node/src/da_block_import.rs @@ -11,10 +11,14 @@ use avail_blob::{ }; use avail_core::{ ensure, - header::{extension as he, HeaderExtension}, - kate::COMMITMENT_SIZE, - kate_commitment as kc, AppId, BlockLengthColumns, BlockLengthRows, DataLookup, HeaderVersion, - BLOCK_CHUNK_SIZE, + header::{ + extension::{ + fri::{FriHeader, FriHeaderVersion}, + kzg::{KzgHeader, KzgHeaderVersion}, + }, + HeaderExtension, + }, + BlockLengthColumns, BlockLengthRows, BLOCK_CHUNK_SIZE, }; use avail_observability::metrics::avail::{MetricObserver, ObserveKind}; use da_control::BlobTxSummaryRuntime; @@ -22,9 +26,8 @@ use da_runtime::{ apis::{DataAvailApi, ExtensionBuilder}, Header as DaHeader, Runtime, }; -use frame_system::limits::BlockLength; -use sp_runtime::OpaqueExtrinsic; - +use frame_system::limits::{BlockLength, BlockLengthError}; +use frame_system::native::build_extension; use sc_consensus::{ block_import::{BlockCheckParams, BlockImport as BlockImportT, BlockImportParams}, ImportResult, @@ -34,7 +37,8 @@ use sp_blockchain::HeaderBackend; use sp_consensus::{BlockOrigin, Error as ConsensusError}; use sp_core::H256; use sp_runtime::traits::Block as BlockT; -use std::{marker::PhantomData, sync::Arc, time::Instant}; +use sp_runtime::OpaqueExtrinsic; +use std::{marker::PhantomData, sync::Arc}; type RTExtractor = ::HeaderExtensionDataFilter; @@ -66,8 +70,8 @@ where client, inner, unsafe_da_sync, - _block: PhantomData, blob_store, + _block: PhantomData, } } @@ -75,93 +79,127 @@ where &self, block: &BlockImportParams, ) -> Result<(), ConsensusError> { - let err = block_doesnt_contain_vector_post_inherent(); - - let maybe_body = block.body.as_ref(); - let Some(body) = maybe_body else { - return Err(err); - }; + let body = block + .body + .as_ref() + .ok_or_else(block_doesnt_contain_vector_post_inherent)?; - let Some(last_extrinsic) = body.last() else { - return Err(err); - }; + let last = body + .last() + .ok_or_else(block_doesnt_contain_vector_post_inherent)?; let parent_hash = ::Hash::from(block.header.parent_hash); let api = self.client.runtime_api(); - let Ok(found) = api.check_if_extrinsic_is_vector_post_inherent(parent_hash, last_extrinsic) - else { - return Err(err); - }; - - ensure!(found, err); + let found = api + .check_if_extrinsic_is_vector_post_inherent(parent_hash, last) + .map_err(|_| block_doesnt_contain_vector_post_inherent())?; + ensure!(found, block_doesnt_contain_vector_post_inherent()); Ok(()) } - // Now this ALWAYS runs and returns the decoded summaries (if any) - // by calling the new runtime API that both checks and decodes the - // DA post-inherent. - fn ensure_before_last_extrinsic_is_blob_summary_tx( + fn extract_blob_summaries( &self, block: &BlockImportParams, ) -> Result, ConsensusError> { - let err = block_doesnt_contain_da_post_inherent(); + let body = block + .body + .as_ref() + .ok_or_else(block_doesnt_contain_da_post_inherent)?; - let maybe_body = block.body.as_ref(); - let Some(body) = maybe_body else { - return Err(err); - }; - - let Some(da_summary_extrinsic) = body.get(body.len().wrapping_sub(2)) else { - return Err(err); - }; + let da_xt = body + .get(body.len().wrapping_sub(2)) + .ok_or_else(block_doesnt_contain_da_post_inherent)?; let parent_hash = ::Hash::from(block.header.parent_hash); let api = self.client.runtime_api(); - let Ok(extracted) = api.extract_post_inherent_summaries(parent_hash, da_summary_extrinsic) - else { - return Err(err); - }; - - ensure!(extracted.is_some(), err); + let extracted = api + .extract_post_inherent_summaries(parent_hash, da_xt) + .map_err(|_| block_doesnt_contain_da_post_inherent())?; - Ok(extracted.expect("Checked above; qed")) + ensure!(extracted.is_some(), block_doesnt_contain_da_post_inherent()); + Ok(extracted.expect("checked above; qed")) } fn ensure_valid_header_extension( &self, block: &BlockImportParams, + extracted: &HeaderExtensionBuilderData, + skip_sync: bool, ) -> Result<(), ConsensusError> { - let block_len = extension_block_len(&block.header.extension); - let extrinsics = || block.body.clone().unwrap_or_default(); + let extrinsics = block.body.clone().unwrap_or_default(); let block_number: u32 = block.header.number; let parent_hash = ::Hash::from(block.header.parent_hash); let api = self.client.runtime_api(); - // Calculate data root and extension. + let block_length = extension_block_len(&block.header.extension)?; let data_root = api - .build_data_root(parent_hash, block_number, extrinsics()) + .build_data_root(parent_hash, block_number, extrinsics.clone()) .map_err(data_root_fail)?; - let version = block.header.extension.get_header_version(); - - let extension = match version { - // Since V3 has AppExtrinsics which is derived from the AppId SignedExtension, We cant support it GOING FORWARD - HeaderVersion::V3 => todo!(), - HeaderVersion::V4 => build_extension_with_comms( - extrinsics(), - data_root, - block_len, - block_number, - block.header.extension.get_header_version(), - )?, + let submitted_blobs = extracted.data_submissions.clone(); + let regenerated_extension = match &block.header.extension { + HeaderExtension::Kzg(kzg_hdr) => { + let kzg_version = match kzg_hdr { + KzgHeader::V4(_) => KzgHeaderVersion::V4, + }; + + build_extension::build_kzg_extension( + submitted_blobs, + data_root, + block_length, + kzg_version, + ) + }, + + HeaderExtension::Fri(fri_hdr) => { + // Extract params_version + version from the header itself + let (params_version, fri_version) = match fri_hdr { + FriHeader::V1(inner) => (inner.params_version, FriHeaderVersion::V1), + }; + + // Verify FRI proofs unless syncing + if !skip_sync { + for da in submitted_blobs.iter() { + if da.eval_point_seed.is_none() + || da.eval_claim.is_none() + || da.eval_proof.is_none() + { + return Err(ConsensusError::ClientImport(format!( + "Missing FRI proof data for blob {:?}", + da.hash + ))); + } + + avail_blob::validation::validate_fri_proof( + da.size_bytes as usize, + &da.eval_point_seed.expect("checked above; qed"), + &da.eval_claim.expect("checked above; qed"), + da.eval_proof.as_ref().expect("checked above; qed"), + ) + .map_err(|e| { + ConsensusError::ClientImport(format!( + "FRI proof validation failed for blob {:?}: {e}", + da.hash + )) + })?; + } + } + + build_extension::build_fri_extension( + submitted_blobs, + data_root, + params_version, + fri_version, + ) + }, }; // Check equality between calculated and imported extensions. ensure!( - block.header.extension == extension, - extension_mismatch(&block.header.extension, &extension) + block.header.extension == regenerated_extension, + extension_mismatch(&block.header.extension, ®enerated_extension) ); Ok(()) } @@ -192,18 +230,21 @@ where ); let skip_sync = self.unsafe_da_sync && is_sync; - // Always extract blob summaries (if any) from DA post-inherent extrinsic. - // we know that it will add small overheasd but simplifies the code flow. - let pre_extracted_summaries = - self.ensure_before_last_extrinsic_is_blob_summary_tx(&block)?; + let blob_summaries = self.extract_blob_summaries(&block)?; + + let extrinsics = block.body.clone().unwrap_or_default(); + let extracted = HeaderExtensionBuilderData::from_opaque_extrinsics::( + block.header.number, + &extrinsics, + ); if !is_own && !skip_sync && !block.with_state() { self.ensure_last_extrinsic_is_failed_send_message_txs(&block)?; - self.ensure_valid_header_extension(&block)?; + self.ensure_valid_header_extension(&block, &extracted, skip_sync)?; } - let candidate_block_number: u32 = block.header.number; - let candidate_block_hash = block.post_hash(); + let block_number: u32 = block.header.number; + let block_hash = block.post_hash(); // Next import block stage & metrics let result = self.inner.import_block(block).await; @@ -211,10 +252,10 @@ where // On successful import of block, write to our blob indexer. if let Ok(ImportResult::Imported(_imported)) = &result { // filter out successful blobs only and collect BlobInfo entries - let mut blob_infos: Vec = Vec::new(); + let mut blob_infos = Vec::new(); - for s in pre_extracted_summaries.iter().filter(|s| s.success) { - let ownership_entries: Vec = s + for s in blob_summaries.iter().filter(|s| s.success) { + let ownership = s .ownership .iter() .map(|(a, b, c, d)| OwnershipEntry { @@ -225,83 +266,21 @@ where }) .collect(); - let blob_info = BlobInfo { + blob_infos.push(BlobInfo { hash: s.hash, - block_hash: candidate_block_hash, - block_number: candidate_block_number, - ownership: ownership_entries, - }; - - blob_infos.push(blob_info); + block_hash, + block_number, + ownership, + }); } - // If there are none, skip DB work and logs - if blob_infos.is_empty() { - log::debug!( - "No successful blob summaries to write for block #{}/{}", - candidate_block_number, - candidate_block_hash - ); - } else { - // Batch insert per-block history (blob_by_hash_block + blob_by_block) - let write_start = std::time::Instant::now(); - let mut written_history = 0usize; - if let Err(e) = self + if !blob_infos.is_empty() { + let _ = self .blob_store - .insert_blob_infos_by_block_batch(&blob_infos) - { - log::warn!( - "Failed batch insert_blob_infos_by_block_batch for block #{}/{}: {}", - candidate_block_number, - candidate_block_hash, - e - ); - } else { - written_history = blob_infos.len(); - } - let history_ns = write_start.elapsed().as_nanos(); - - // Append pending pending_by_block - let pending_start = std::time::Instant::now(); - let mut written_pending = 0usize; - if let Err(e) = self + .insert_blob_infos_by_block_batch(&blob_infos); + let _ = self .blob_store - .append_pending_blob_infos_batch(&candidate_block_hash, &blob_infos) - { - log::warn!( - "Failed append_pending_blob_infos_batch for block #{}/{}: {}", - candidate_block_number, - candidate_block_hash, - e - ); - } else { - written_pending = blob_infos.len(); - } - let pending_ns = pending_start.elapsed().as_nanos(); - - // Logging aggregated stats - if written_history > 0 || written_pending > 0 { - let total_written = std::cmp::max(written_history, written_pending); - let total_ns = history_ns + pending_ns; - let avg_us = (total_ns as f64 / total_written as f64) / 1_000.0_f64; - log::info!( - "⏱️ Persisted {} BlobInfo entries for block #{}/{} (history={}, pending={}), total_time = {} ms, avg = {:.3} µs", - total_written, - candidate_block_number, - candidate_block_hash, - written_history, - written_pending, - (total_ns as f64) / 1_000_000.0_f64, - avg_us - ); - } else { - log::warn!( - "Attempted writes for blob_info in block #{}/{} took total {} ms (all failed)", - candidate_block_number, - candidate_block_hash, - ((history_ns + pending_ns) as f64) / 1_000_000.0_f64 - ); - } + .append_pending_blob_infos_batch(&block_hash, &blob_infos); } } @@ -325,164 +304,43 @@ impl Clone for BlockImport { } } -/// builds header extension by regenerating the commitments for DA txs -fn build_extension_with_comms( - extrinsics: Vec, - data_root: H256, - block_length: BlockLength, - block_number: u32, - version: HeaderVersion, -) -> Result { - let timer_total = Instant::now(); - let timer_app_ext = Instant::now(); - let app_extrinsics = HeaderExtensionBuilderData::from_opaque_extrinsics::( - block_number, - &extrinsics, - block_length.cols.0, - block_length.rows.0, - ) - .data_submissions; - log::info!( - "⏱️ Extracting app extrinsics took {:?}", - timer_app_ext.elapsed() - ); - log::info!("Ext length: {}", extrinsics.len()); - - // Blocks with non-DA extrinsics will have empty commitments - if app_extrinsics.is_empty() { - log::info!( - "✅ No DA extrinsics, returning empty header. Total time: {:?}", - timer_total.elapsed() - ); - return Ok(HeaderExtension::get_empty_header(data_root, version)); - } - - let max_columns = block_length.cols.0 as usize; - if max_columns == 0 { - log::info!( - "⚠️ Max columns = 0, returning empty header. Total time: {:?}", - timer_total.elapsed() - ); - return Ok(HeaderExtension::get_empty_header(data_root, version)); - } - - let timer_commitment_prep = Instant::now(); - let total_commitments_len: usize = app_extrinsics - .iter() - .map(|da_call| da_call.commitments.len()) - .sum(); - let mut commitment = Vec::with_capacity(total_commitments_len); - - let mut app_rows: Vec<(AppId, usize)> = Vec::with_capacity(app_extrinsics.len()); - - for da_call in app_extrinsics.iter() { - // Commitments from blob submission where checked - // Commitments from regular submit data are computed by the node - commitment.extend(da_call.commitments.clone()); - let rows_taken = da_call.commitments.len() / COMMITMENT_SIZE; - - // Update app_rows - app_rows.push((da_call.id, rows_taken)); - } - log::info!( - "⏱️ Collecting commitments + app_rows took {:?}", - timer_commitment_prep.elapsed() - ); - - let timer_lookup = Instant::now(); - let app_lookup = DataLookup::from_id_and_len_iter(app_rows.clone().into_iter()) - .map_err(|_| data_lookup_failed())?; - log::info!("⏱️ Building DataLookup took {:?}", timer_lookup.elapsed()); - - let timer_padding = Instant::now(); - let original_rows = app_lookup.len(); - let padded_rows = original_rows.next_power_of_two(); - if padded_rows > original_rows { - let (_, padded_row_commitment) = - kate::gridgen::core::get_pregenerated_row_and_commitment(max_columns) - .map_err(|_| pregenerated_comms_failed())?; - commitment = commitment - .into_iter() - .chain( - std::iter::repeat_n( - padded_row_commitment, - (padded_rows - original_rows) as usize, - ) - .flatten(), +fn extension_block_len(extension: &HeaderExtension) -> Result { + match extension { + HeaderExtension::Kzg(kzg_hdr) => { + let (rows, cols) = match kzg_hdr { + KzgHeader::V4(ext) => (ext.rows() as u32, ext.cols() as u32), + }; + + BlockLength::with_normal_ratio( + BlockLengthRows(rows), + BlockLengthColumns(cols), + BLOCK_CHUNK_SIZE, + sp_runtime::Perbill::from_percent(90), ) - .collect(); + .map_err(block_contains_invalid_block_length) + }, + HeaderExtension::Fri(_) => Ok(BlockLength::default()), } - log::info!("⏱️ Padding commitments took {:?}", timer_padding.elapsed()); - - let timer_kate = Instant::now(); - let commitment = kc::v3::KateCommitment::new( - padded_rows.try_into().unwrap_or_default(), - max_columns.try_into().unwrap_or_default(), - data_root, - commitment, - ); - log::info!("⏱️ Building KateCommitment took {:?}", timer_kate.elapsed()); - - log::info!( - "✅ Finished build_extension_with_comms in {:?}", - timer_total.elapsed() - ); - - Ok(he::v4::HeaderExtension { - app_lookup, - commitment, - } - .into()) } -/// Calculate block length from `extension`. -fn extension_block_len(extension: &HeaderExtension) -> BlockLength { - BlockLength::with_normal_ratio( - BlockLengthRows(extension.rows() as u32), - BlockLengthColumns(extension.cols() as u32), - BLOCK_CHUNK_SIZE, - sp_runtime::Perbill::from_percent(90), - ) - .expect("Valid BlockLength at genesis .qed") -} - -fn extension_mismatch(imported: &HeaderExtension, generated: &HeaderExtension) -> ConsensusError { - let msg = - format!("DA Extension does NOT match\nExpected: {imported:#?}\nGenerated:{generated:#?}"); - ConsensusError::ClientImport(msg) -} - -// fn commitments_mismatch(tx_id: u32) -> ConsensusError { -// let msg = format!("DA Commitments does NOT match for tx_id: {tx_id}."); -// ConsensusError::ClientImport(msg) -// } - -fn pregenerated_comms_failed() -> ConsensusError { - let msg = "Failed to get pregenerated rows & commitments.".to_string(); - ConsensusError::ClientImport(msg) -} - -fn data_lookup_failed() -> ConsensusError { - let msg = "Failed to construct DataLookup.".to_string(); - ConsensusError::ClientImport(msg) +fn extension_mismatch(a: &HeaderExtension, b: &HeaderExtension) -> ConsensusError { + ConsensusError::ClientImport(format!( + "DA extension mismatch\nExpected: {a:#?}\nGenerated: {b:#?}" + )) } fn data_root_fail(e: ApiError) -> ConsensusError { - let msg = format!("Data root cannot be calculated: {e:?}"); - ConsensusError::ClientImport(msg) + ConsensusError::ClientImport(format!("Data root build failed: {e:?}")) } -// fn build_ext_fail(e: ApiError) -> ConsensusError { -// let msg = format!("Build extension fails due to: {e:?}"); -// ConsensusError::ClientImport(msg) -// } - fn block_doesnt_contain_vector_post_inherent() -> ConsensusError { - let msg = "Block does not contain vector post inherent".to_string(); - ConsensusError::ClientImport(msg) + ConsensusError::ClientImport("Missing vector post inherent".into()) } fn block_doesnt_contain_da_post_inherent() -> ConsensusError { - let msg = "Block does not contain da post inherent".to_string(); - ConsensusError::ClientImport(msg) + ConsensusError::ClientImport("Missing DA post inherent".into()) +} + +fn block_contains_invalid_block_length(err: BlockLengthError) -> ConsensusError { + ConsensusError::ClientImport(format!("Invalid block length: {err:?}")) } diff --git a/observability/Cargo.toml b/observability/Cargo.toml index ab3d2f0c5..bfa182465 100644 --- a/observability/Cargo.toml +++ b/observability/Cargo.toml @@ -11,6 +11,6 @@ sc-telemetry = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } substrate-prometheus-endpoint = { workspace = true } -log = { workspace = true } +log = { workspace = true, default-features = false } once_cell = { workspace = true } tokio = { version = "1.43.0", features = ["sync"] } diff --git a/pallets/dactr/Cargo.toml b/pallets/dactr/Cargo.toml index 4db423731..77019694f 100644 --- a/pallets/dactr/Cargo.toml +++ b/pallets/dactr/Cargo.toml @@ -16,7 +16,7 @@ avail-base = { workspace = true, default-features = false } avail-core = { workspace = true, default-features = false } kate = { workspace = true, default-features = false } frame-system = { workspace = true, default-features = false } -da-commitment = { workspace = true, default-features = false } +da-commitment = { workspace = true, default-features = false, optional = true } pallet-transaction-payment = { workspace = true, default-features = false } pallet-vector = { workspace = true, default-features = false } pallet-multisig = { workspace = true, default-features = false } @@ -25,7 +25,7 @@ pallet-scheduler = { workspace = true, default-features = false } pallet-preimage = { workspace = true, default-features = false } # Others -log.workspace = true +log = { workspace = true, default-features = false } thiserror-no-std.workspace = true derive_more.workspace = true @@ -56,6 +56,7 @@ std = [ "avail-core/std", "codec/std", "da-commitment/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "kate/std", diff --git a/pallets/dactr/src/extensions/native.rs b/pallets/dactr/src/extensions/native.rs index 6c9db052a..b9da0ec79 100644 --- a/pallets/dactr/src/extensions/native.rs +++ b/pallets/dactr/src/extensions/native.rs @@ -1,3 +1,4 @@ +use avail_core::FriParamsVersion; use kate::Seed; use sp_runtime::SaturatedConversion; use sp_runtime_interface::{ @@ -11,22 +12,13 @@ pub type DaCommitments = AllocateAndReturnFatPointer>; /// Hosted function to build the DA commitments. #[runtime_interface] pub trait HostedCommitmentBuilder { - fn build_da_commitments( - data: PassFatPointerAndRead<&[u8]>, - cols: u32, - rows: u32, - seed: PassFatPointerAndDecode, - ) -> DaCommitments { + fn build_kzg_commitments(data: PassFatPointerAndRead<&[u8]>, cols: u32, rows: u32, seed: PassFatPointerAndDecode) -> DaCommitments { let cols: usize = cols.saturated_into(); let rows: usize = rows.saturated_into(); - #[cfg(feature = "std")] - { - da_commitment::build_da_commitments::build_da_commitments(data, cols, rows, seed) - } - #[cfg(not(feature = "std"))] - { - // one should never reach here - Vec::new() - } + da_commitment::build_kzg_commitments::build_da_commitments(data, cols, rows, seed) + } + + fn build_fri_commitments(data: PassFatPointerAndRead<&[u8]>, params_version: PassFatPointerAndDecode) -> DaCommitments { + da_commitment::build_fri_commitments::build_fri_da_commitment(data, params_version) } } diff --git a/pallets/dactr/src/lib.rs b/pallets/dactr/src/lib.rs index 009afc100..977f3047c 100644 --- a/pallets/dactr/src/lib.rs +++ b/pallets/dactr/src/lib.rs @@ -18,7 +18,6 @@ use frame_support::{ use frame_system::{limits::BlockLength, pallet::DynamicBlockLength}; use sp_arithmetic::traits::SaturatedConversion; use sp_core::H256; -use sp_io::hashing::keccak_256; use sp_runtime::traits::Convert; use sp_runtime::Perbill; use sp_std::{mem::replace, vec, vec::Vec}; @@ -371,28 +370,12 @@ pub mod pallet { DispatchClass::Normal, SubmitDataFeeModifier::::get() ))] + /// TODO: Remove this. Currently no-op pub fn submit_data( - origin: OriginFor, - app_id: AppId, - data: AppDataFor, + _origin: OriginFor, + _app_id: AppId, + #[allow(unused_variables)] data: AppDataFor, ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - ensure!( - app_id < Self::peek_next_application_id(), - Error::::InvalidAppId - ); - ensure!(!data.is_empty(), Error::::DataCannotBeEmpty); - ensure!( - !BlobRuntimeParams::::get().disable_old_da_submission, - Error::::OldDaSubmissionDisabled - ); - - let data_hash = keccak_256(&data); - Self::deposit_event(Event::DataSubmitted { - who, - data_hash: H256(data_hash), - }); - Ok(().into()) } @@ -510,6 +493,8 @@ pub mod pallet { blob_hash: H256, size: u64, commitment: Vec, + _eval_point_seed: Option<[u8; 32]>, + _eval_claim: Option<[u8; 16]>, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!( diff --git a/pallets/dactr/src/tests.rs b/pallets/dactr/src/tests.rs index 6e87a4c5e..21fdd5b04 100644 --- a/pallets/dactr/src/tests.rs +++ b/pallets/dactr/src/tests.rs @@ -76,38 +76,38 @@ mod create_application_key { } mod submit_data { - use avail_core::AppId; + // use avail_core::AppId; use super::*; - #[test] - fn submit_data() { - new_test_ext().execute_with(|| { - let alice: RuntimeOrigin = RawOrigin::Signed(ALICE).into(); - let max_app_key_length: usize = MaxAppDataLength::get().try_into().unwrap(); - let data = AppDataFor::::try_from(vec![b'X'; max_app_key_length]).unwrap(); - let data_hash = H256(sp_io::hashing::keccak_256(&data)); - - assert_ok!(DataAvailability::submit_data(alice, AppId(1), data)); - - let event = RuntimeEvent::DataAvailability(Event::DataSubmitted { - who: ALICE, - data_hash, - }); - System::assert_last_event(event); - }) - } - - #[test] - fn data_cannot_be_empty() { - new_test_ext().execute_with(|| { - let alice: RuntimeOrigin = RawOrigin::Signed(ALICE).into(); - let data = AppDataFor::::try_from(vec![]).unwrap(); - - let err = DataAvailability::submit_data(alice, AppId(1), data); - assert_noop!(err, Error::DataCannotBeEmpty); - }) - } + // #[test] + // fn submit_data() { + // new_test_ext().execute_with(|| { + // let alice: RuntimeOrigin = RawOrigin::Signed(ALICE).into(); + // let max_app_key_length: usize = MaxAppDataLength::get().try_into().unwrap(); + // let data = AppDataFor::::try_from(vec![b'X'; max_app_key_length]).unwrap(); + // let data_hash = H256(sp_io::hashing::keccak_256(&data)); + + // assert_ok!(DataAvailability::submit_data(alice, AppId(1), data)); + + // let event = RuntimeEvent::DataAvailability(Event::DataSubmitted { + // who: ALICE, + // data_hash, + // }); + // System::assert_last_event(event); + // }) + // } + + // #[test] + // fn data_cannot_be_empty() { + // new_test_ext().execute_with(|| { + // let alice: RuntimeOrigin = RawOrigin::Signed(ALICE).into(); + // let data = AppDataFor::::try_from(vec![]).unwrap(); + + // let err = DataAvailability::submit_data(alice, AppId(1), data); + // assert_noop!(err, Error::DataCannotBeEmpty); + // }) + // } #[test] fn submit_data_too_long() { @@ -402,7 +402,9 @@ mod submit_blob_metadata { AppId(1), blob_hash, size, - commitment + commitment, + None, + None, )); let event = RuntimeEvent::DataAvailability(Event::SubmitBlobMetadataRequest { @@ -427,6 +429,8 @@ mod submit_blob_metadata { blob_hash, size, commitment, + None, + None, ); assert_noop!(err, Error::CommitmentCannotBeEmpty); }) @@ -446,6 +450,8 @@ mod submit_blob_metadata { blob_hash, size, commitment, + None, + None, ); assert_noop!(err, Error::DataCannotBeEmpty); }) @@ -465,6 +471,8 @@ mod submit_blob_metadata { blob_hash, size, commitment, + None, + None, ); assert_noop!(err, Error::DataCannotBeEmpty); }) @@ -485,6 +493,7 @@ mod submit_blob_txs_summary { success: true, reason: None, ownership: Vec::new(), + eval_proof: None, }; let s2 = crate::BlobTxSummaryRuntime { hash: H256::random(), @@ -492,6 +501,7 @@ mod submit_blob_txs_summary { success: false, reason: Some("example".into()), ownership: Vec::new(), + eval_proof: None, }; let total_blob_size: u64 = (2 * H256::random().0.len()) as u64; diff --git a/pallets/dactr/src/types.rs b/pallets/dactr/src/types.rs index 77d30076c..6edb8a98d 100644 --- a/pallets/dactr/src/types.rs +++ b/pallets/dactr/src/types.rs @@ -61,6 +61,7 @@ pub struct BlobTxSummaryRuntime { pub success: bool, pub reason: Option, pub ownership: Vec<(AccountId32, AuthorityId, String, Vec)>, + pub eval_proof: Option>, } impl BlobTxSummaryRuntime { pub fn convert_into( @@ -70,17 +71,19 @@ impl BlobTxSummaryRuntime { bool, Option, Vec<(AccountId32, AuthorityId, String, Vec)>, + Option>, )>, ) -> Vec { input .into_iter() .map( - |(hash, tx_index, success, reason, ownership)| BlobTxSummaryRuntime { + |(hash, tx_index, success, reason, ownership, eval_proof)| BlobTxSummaryRuntime { hash, tx_index, success, reason, ownership, + eval_proof, }, ) .collect() diff --git a/pallets/system/Cargo.toml b/pallets/system/Cargo.toml index ed697c4ac..288c6c085 100644 --- a/pallets/system/Cargo.toml +++ b/pallets/system/Cargo.toml @@ -25,7 +25,7 @@ avail-observability = { workspace = true, optional = true } # Other cfg-if.workspace = true static_assertions.workspace = true -log.workspace = true +log = { workspace = true, default-features = false } docify.workspace = true derive_more.workspace = true hex-literal.workspace = true @@ -94,7 +94,7 @@ header_commitment_corruption = [] # Enables secure seed generation using for padding fill during the matrix # generation for Kate commitment. -# NOTE: WIP because that affects the Data Availability Protocol used during +# NOTE: WIP because that affects the Data Availability Protocol used during # the block import process. secure_padding_fill = [] diff --git a/pallets/system/src/lib.rs b/pallets/system/src/lib.rs index 142e45665..aa104c732 100644 --- a/pallets/system/src/lib.rs +++ b/pallets/system/src/lib.rs @@ -99,11 +99,14 @@ #![warn(unused_extern_crates)] use avail_base::{HeaderExtensionBuilderData, HeaderExtensionDataFilter}; +use avail_core::header::extension::fri::FriHeaderVersion; +use avail_core::header::extension::kzg::KzgHeaderVersion; +use avail_core::header::extension::CommitmentScheme; +use avail_core::FriParamsVersion; use avail_core::{ ensure, header::{Header as DaHeader, HeaderExtension}, traits::{ExtendedBlock, ExtendedHeader}, - HeaderVersion, }; extern crate alloc; @@ -312,8 +315,11 @@ pub mod pallet { /// Default implementations of [`DefaultConfig`], which can be used to implement [`Config`]. pub mod config_preludes { - use super::{inject_runtime_type, AccountInfo, BlakeTwo256, DaHeader, DefaultConfig}; - use frame_support::{derive_impl, traits::ConstU32}; + use super::{ + inject_runtime_type, AccountInfo, BlakeTwo256, CommitmentScheme, DaHeader, + DefaultConfig, + }; + use frame_support::{derive_impl, parameter_types, traits::ConstU32}; /// Provides a viable default config that can be used with /// [`derive_impl`](`frame_support::derive_impl`) to derive a testing pallet config @@ -323,6 +329,10 @@ pub mod pallet { /// a downstream user of this particular `TestDefaultConfig` pub struct TestDefaultConfig; + parameter_types! { + pub const DefaultDaCommitmentScheme: CommitmentScheme = CommitmentScheme::Fri; + } + #[frame_support::register_default_impl(TestDefaultConfig)] impl DefaultConfig for TestDefaultConfig { type Nonce = u32; @@ -358,6 +368,7 @@ pub mod pallet { type MaxDiffAppIdPerBlock = ConstU32<1_024>; type MaxTxPerAppIdPerBlock = ConstU32<8_192>; type HeaderExtensionDataFilter = (); + type DaCommitmentScheme = DefaultDaCommitmentScheme; type SingleBlockMigrations = (); type MultiBlockMigrator = (); type PreInherents = (); @@ -465,6 +476,7 @@ pub mod pallet { type MaxDiffAppIdPerBlock = ConstU32<1_024>; type MaxTxPerAppIdPerBlock = ConstU32<8_192>; type HeaderExtensionDataFilter = (); + type DaCommitmentScheme = DefaultDaCommitmentScheme; type SingleBlockMigrations = (); type MultiBlockMigrator = (); type PreInherents = (); @@ -688,6 +700,9 @@ pub mod pallet { #[pallet::constant] type MaxTxPerAppIdPerBlock: Get; + /// Commitment Scheme to be used + #[pallet::constant] + type DaCommitmentScheme: Get; /// All migrations that should run in the next runtime upgrade. /// /// These used to be formerly configured in `Executive`. Parachains need to ensure that @@ -2088,25 +2103,31 @@ impl Pallet { // Code beyond is custom added code for computing the extension. // - let block_length = Self::block_length(); - - let header_extension_builder_data = - HeaderExtensionBuilderData::from_raw_extrinsics::( - block_number, - &extrinsics, - block_length.cols.0, - block_length.rows.0, - ); + let header_extension_builder_data = HeaderExtensionBuilderData::from_raw_extrinsics::< + T::HeaderExtensionDataFilter, + >(block_number, &extrinsics); let extrinsics_root = extrinsics_data_root::(extrinsics); let data_root = header_extension_builder_data.data_root(); - let extension = - native::hosted_header_builder::da::HeaderExtensionBuilder::::build_extension( - header_extension_builder_data.data_submissions, - data_root, - block_length, - HeaderVersion::V4, - ); + let extension = match T::DaCommitmentScheme::get() { + CommitmentScheme::Kzg => { + let block_length = Self::block_length(); + native::hosted_header_builder::da::HeaderExtensionBuilder::::build_kzg_extension( + header_extension_builder_data.data_submissions, + data_root, + block_length, + KzgHeaderVersion::V4, + ) + }, + CommitmentScheme::Fri => { + native::hosted_header_builder::da::HeaderExtensionBuilder::::build_fri_extension( + header_extension_builder_data.data_submissions, + data_root, + FriParamsVersion(0), + FriHeaderVersion::V1, + ) + }, + }; let header = as ExtendedHeader>::new( number, diff --git a/pallets/system/src/native/build_extension.rs b/pallets/system/src/native/build_extension.rs new file mode 100644 index 000000000..7f1b7af69 --- /dev/null +++ b/pallets/system/src/native/build_extension.rs @@ -0,0 +1,152 @@ +// !!!! +// If the logic is changed in this file it will break Turing/Mainnet. Do not change it. +// If the logic is changed in avail-core it will break Turing/Mainnet as well. Do no change it. +// !!!! +#![cfg(feature = "std")] + +use crate::limits::BlockLength; +use avail_base::header_extension::SubmittedData; +use avail_core::FriParamsVersion; +use avail_core::{ + header::extension as he, kate::COMMITMENT_SIZE, kate_commitment as kc, AppId, DataLookup, +}; +use he::fri::{FriHeader, FriHeaderVersion}; +use he::fri_v1::{FriBlobCommitment, HeaderExtension as FriV1HeaderExtension}; +use he::{ + kzg::{KzgHeader, KzgHeaderVersion}, + HeaderExtension, +}; +use sp_core::H256; +use std::vec::Vec; + +#[cfg(feature = "testing-environment")] +use avail_base::testing_env::*; + +/// Build a KZG v4 header extension +pub fn build_kzg_extension( + submitted: Vec, + data_root: H256, + block_length: BlockLength, + kzg_version: KzgHeaderVersion, +) -> HeaderExtension { + // Blocks with non-DA extrinsics will have empty commitments + if submitted.is_empty() { + return HeaderExtension::get_empty_kzg(data_root, kzg_version); + } + + let max_columns = block_length.cols.0 as usize; + if max_columns == 0 { + // Blocks with 0 columns will have empty commitments, ideally we should never reach here + return HeaderExtension::get_empty_kzg(data_root, kzg_version); + } + + let total_commitments: usize = submitted + .iter() + .map(|da_call| da_call.commitments.len()) + .sum(); + let mut commitment = Vec::with_capacity(total_commitments); + + let mut app_rows: Vec<(AppId, usize)> = Vec::with_capacity(submitted.len()); + + for da_call in submitted.iter() { + commitment.extend(da_call.commitments.clone()); + // As we have already correctness of commitments against data, we can safely assume that the commitments are correct + let rows_taken = da_call.commitments.len() / COMMITMENT_SIZE; + + // Update app_rows + app_rows.push((da_call.id, rows_taken)); + } + + let app_lookup = match DataLookup::from_id_and_len_iter(app_rows.into_iter()) { + Ok(lookup) => lookup, + Err(_) => return HeaderExtension::get_faulty_kzg(data_root, kzg_version), + }; + + let original_rows = app_lookup.len(); + let padded_rows = original_rows.next_power_of_two(); + + // We can reduce the header size further letting the verification clients do this padding + // since anyway they're extending the commitments. + if padded_rows > original_rows { + let (_, padded_row_commitment) = + match kate::gridgen::core::get_pregenerated_row_and_commitment(max_columns) { + Ok(result) => result, + Err(e) => { + log::error!("NODE_CRITICAL_ERROR_003 - A critical error has occured: {e:?}."); + log::error!( + "NODE_CRITICAL_ERROR_003 - If you see this, please warn Avail team and raise an issue." + ); + return HeaderExtension::get_faulty_kzg(data_root, kzg_version); + }, + }; + commitment = commitment + .into_iter() + .chain( + std::iter::repeat(padded_row_commitment) + .take((padded_rows - original_rows) as usize) + .flatten(), + ) + .collect(); + } + + let commitment = kc::v3::KateCommitment::new( + padded_rows.try_into().unwrap_or_default(), + max_columns.try_into().unwrap_or_default(), + data_root, + commitment, + ); + + // Build the v4 KZG header extension + let v4_ext = he::v4::HeaderExtension { + app_lookup, + commitment, + }; + + HeaderExtension::Kzg(KzgHeader::from(v4_ext)) +} + +/// Build a Fri header extension (V1) from submitted blobs. +/// +/// - We expect `submitted[i].commitments` to contain exactly one 32-byte Fri commitment +/// (Merkle root of the RS codewords). If any entry has len != 32, we log and return a *faulty* header. +pub fn build_fri_extension( + submitted: Vec, + data_root: H256, + params_version: FriParamsVersion, + fri_version: FriHeaderVersion, +) -> HeaderExtension { + if submitted.is_empty() { + return HeaderExtension::get_empty_fri(data_root, fri_version); + } + + // Just do some sanitary check, as we cant actually check teh commitments here + let fri_v1 = match fri_version { + FriHeaderVersion::V1 => { + let mut blobs: Vec = Vec::with_capacity(submitted.len()); + + for (idx, s) in submitted.into_iter().enumerate() { + if s.commitments.len() != 32 { + log::error!( + "Fri header: expected 32-byte commitment for blob #{idx}, got {} bytes", + s.commitments.len() + ); + return HeaderExtension::get_faulty_fri(data_root, fri_version); + } + + blobs.push(FriBlobCommitment { + blob_hash: s.hash, + size_bytes: s.size_bytes, + commitment: s.commitments, + }); + } + + FriV1HeaderExtension { + blobs, + data_root, + params_version, + } + }, + }; + + HeaderExtension::Fri(FriHeader::V1(fri_v1)) +} diff --git a/pallets/system/src/native/build_extension_v2.rs b/pallets/system/src/native/build_extension_v2.rs deleted file mode 100644 index caed9ea0f..000000000 --- a/pallets/system/src/native/build_extension_v2.rs +++ /dev/null @@ -1,96 +0,0 @@ -// !!!! -// If the logic is changed in this file it will break Turing/Mainnet. Do not change it. -// If the logic is changed in avail-core it will break Turing/Mainnet as well. Do no change it. -// !!!! -#![cfg(feature = "std")] - -use crate::limits::BlockLength; -use avail_base::header_extension::SubmittedData; -use avail_core::{ - header::{extension as he, HeaderExtension}, - kate::COMMITMENT_SIZE, - kate_commitment as kc, AppId, DataLookup, HeaderVersion, -}; -use sp_core::H256; -use std::vec::Vec; - -#[cfg(feature = "testing-environment")] -use avail_base::testing_env::*; - -#[allow(unused_mut)] -pub fn build_extension_v4( - mut submitted: Vec, - data_root: H256, - block_length: BlockLength, - version: HeaderVersion, -) -> HeaderExtension { - // Blocks with non-DA extrinsics will have empty commitments - if submitted.is_empty() { - return HeaderExtension::get_empty_header(data_root, version); - } - - let max_columns = block_length.cols.0 as usize; - if max_columns == 0 { - // Blocks with 0 columns will have empty commitments, ideally we should never reach here - return HeaderExtension::get_empty_header(data_root, version); - } - - let total_commitments: usize = submitted - .iter() - .map(|da_call| da_call.commitments.len()) - .sum(); - let mut commitment = Vec::with_capacity(total_commitments); - - let mut app_rows: Vec<(AppId, usize)> = Vec::with_capacity(submitted.len()); - - for da_call in submitted.iter() { - commitment.extend(da_call.commitments.clone()); - // As we have already correctness of commitments against data, we can safely assume that the commitments are correct - let rows_taken = da_call.commitments.len() / COMMITMENT_SIZE; - - // Update app_rows - app_rows.push((da_call.id, rows_taken)); - } - - let app_lookup = match DataLookup::from_id_and_len_iter(app_rows.into_iter()) { - Ok(lookup) => lookup, - Err(_) => return HeaderExtension::get_faulty_header(data_root, version), - }; - - let original_rows = app_lookup.len(); - let padded_rows = original_rows.next_power_of_two(); - - // We can reduce the header size further letting the verification clients to do this padding since anyway they're extending the commitments - if padded_rows > original_rows { - let (_, padded_row_commitment) = - match kate::gridgen::core::get_pregenerated_row_and_commitment(max_columns) { - Ok(result) => result, - Err(e) => { - log::error!("NODE_CRITICAL_ERROR_003 - A critical error has occured: {e:?}."); - log::error!("NODE_CRITICAL_ERROR_003 - If you see this, please warn Avail team and raise an issue."); - return HeaderExtension::get_faulty_header(data_root, version); - }, - }; - commitment = commitment - .into_iter() - .chain( - std::iter::repeat(padded_row_commitment) - .take((padded_rows - original_rows) as usize) - .flatten(), - ) - .collect(); - } - - let commitment = kc::v3::KateCommitment::new( - padded_rows.try_into().unwrap_or_default(), - max_columns.try_into().unwrap_or_default(), - data_root, - commitment, - ); - - he::v4::HeaderExtension { - app_lookup, - commitment, - } - .into() -} diff --git a/pallets/system/src/native/hosted_header_builder.rs b/pallets/system/src/native/hosted_header_builder.rs index 77a30ba0d..8a8c52fc9 100644 --- a/pallets/system/src/native/hosted_header_builder.rs +++ b/pallets/system/src/native/hosted_header_builder.rs @@ -1,11 +1,22 @@ -// !!!! -// More info about how runtime interfaces work: (https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/attr.runtime_interface.html -// !!!! +//! Runtime interface and abstraction for building DA header extensions. +//! +//! More info about runtime interfaces: +//! https://docs.rs/sp-runtime-interface/latest/sp_runtime_interface/attr.runtime_interface.html use crate::{limits::BlockLength, Config}; use avail_base::header_extension::SubmittedData; -use avail_core::HeaderVersion; -use avail_core::{header::HeaderExtension, traits::ExtendedHeader}; +use avail_core::{ + header::{ + extension::{ + fri::FriHeaderVersion, + kzg::KzgHeaderVersion, + }, + HeaderExtension, + }, + traits::ExtendedHeader, + FriParamsVersion, +}; + pub use kate::{ metrics::{IgnoreMetrics, Metrics}, Seed, @@ -20,76 +31,109 @@ use sp_std::vec::Vec; pub const MIN_WIDTH: usize = 4; -pub mod da { - use core::marker::PhantomData; +/// Trait used by the runtime to build the DA header extension. +/// +/// This is a thin abstraction over the host call exposed via +/// the `HostedHeaderBuilder` runtime interface. +pub trait HeaderExtensionBuilder { + type Header: ExtendedHeader; - use avail_base::header_extension::SubmittedData; - use avail_core::header::{Header as DaHeader, HeaderExtension}; - use sp_runtime::traits::BlakeTwo256; + /// Build the KZG header extension. + fn build_kzg_extension( + submitted: Vec, + data_root: H256, + block_length: BlockLength, + kzg_version: KzgHeaderVersion, + ) -> HeaderExtension; + + /// Build the FRI header extension. + fn build_fri_extension( + submitted: Vec, + data_root: H256, + params_version: FriParamsVersion, + fri_version: FriHeaderVersion, + ) -> HeaderExtension; +} +/// Runtime-side DA header builder. +pub mod da { use super::*; + use avail_core::header::Header as DaHeader; + use core::marker::PhantomData; + use sp_runtime::traits::BlakeTwo256; - pub type Hash = sp_core::H256; pub type BlockNumber = u32; - /// avail-node Header builder. + /// Runtime-facing header extension builder. pub struct HeaderExtensionBuilder(PhantomData); impl super::HeaderExtensionBuilder for HeaderExtensionBuilder { type Header = DaHeader; #[inline] - fn build_extension( + fn build_kzg_extension( submitted: Vec, data_root: H256, block_length: BlockLength, - version: HeaderVersion, + kzg_version: KzgHeaderVersion, ) -> HeaderExtension { - super::hosted_header_builder::build_extension( + super::hosted_header_builder::build_kzg_extension( submitted, data_root, block_length, - version, + kzg_version, ) + .into() } - } -} - -/// Trait for header builder. -pub trait HeaderExtensionBuilder { - type Header: ExtendedHeader; - /// Creates the header using the given parameters. - fn build_extension( - app_extrinsics: Vec, - data_root: H256, - block_length: BlockLength, - version: HeaderVersion, - ) -> HeaderExtension; + #[inline] + fn build_fri_extension( + submitted: Vec, + data_root: H256, + params_version: FriParamsVersion, + fri_version: FriHeaderVersion, + ) -> HeaderExtension { + super::hosted_header_builder::build_fri_extension( + submitted, + data_root, + params_version, + fri_version, + ) + .into() + } + } } -/// Hosted function to build the header using `kate` commitments. +/// Runtime interface forwarding header construction to the host. +/// +/// The actual implementation lives in `crate::native::build_extension`. #[runtime_interface] pub trait HostedHeaderBuilder { - fn build_extension( + fn build_kzg_extension( submitted: PassFatPointerAndDecode>, data_root: PassFatPointerAndDecode, block_length: PassFatPointerAndDecode, - version: PassFatPointerAndDecode, + kzg_version: PassFatPointerAndDecode, ) -> AllocateAndReturnByCodec { - #[cfg(feature = "std")] - { - return crate::native::build_extension_v2::build_extension_v4( - submitted.to_vec(), - data_root, - block_length, - version, - ); - } + crate::native::build_extension::build_kzg_extension( + submitted.to_vec(), + data_root, + block_length, + kzg_version, + ) + } - #[cfg(not(feature = "std"))] - { - return HeaderExtension::get_faulty_header(data_root, version); - } + fn build_fri_extension( + submitted: PassFatPointerAndDecode>, + data_root: PassFatPointerAndDecode, + params_version: PassFatPointerAndDecode, + fri_version: PassFatPointerAndDecode, + ) -> AllocateAndReturnByCodec { + crate::native::build_extension::build_fri_extension( + submitted.to_vec(), + data_root, + params_version, + fri_version, + ) } -} +} \ No newline at end of file diff --git a/pallets/system/src/native/mod.rs b/pallets/system/src/native/mod.rs index bf4d8cfce..0bf811a2d 100644 --- a/pallets/system/src/native/mod.rs +++ b/pallets/system/src/native/mod.rs @@ -1,4 +1,4 @@ #[cfg(feature = "std")] -pub mod build_extension_v2; +pub mod build_extension; pub mod hosted_header_builder; diff --git a/pallets/vector/Cargo.toml b/pallets/vector/Cargo.toml index ee358af70..2725f0d73 100644 --- a/pallets/vector/Cargo.toml +++ b/pallets/vector/Cargo.toml @@ -29,7 +29,7 @@ ark-std.workspace = true ark-ff.workspace = true serde.workspace = true serde_json.workspace = true -log.workspace = true +log = { workspace = true, default-features = false } hex-literal.workspace = true trie-db.workspace = true rlp.workspace = true @@ -51,6 +51,7 @@ std = [ "frame-benchmarking?/std", "frame-support/std", "frame-system/std", + "log/std", "scale-info/std", "sp-core/std", "sp-io/std", diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 8bd09f055..e3efa6835 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -25,7 +25,7 @@ pallet-vector = { workspace = true, default-features = false } # External static_assertions.workspace = true -log.workspace = true +log = { workspace = true, default-features = false } hex-literal.workspace = true rayon = { workspace = true, optional = true } serde = { workspace = true, optional = true, features = ["derive"] } @@ -35,7 +35,7 @@ derive_more.workspace = true # Substrate scale-info = { workspace = true, default-features = false, features = [ "derive" ] } -codec = { package = "parity-scale-codec", version = "3", default-features = false, features = [ "derive", ] } +codec = { package = "parity-scale-codec", version = "3", default-features = false, features = [ "derive", "max-encoded-len"] } binary-merkle-tree = { workspace = true, default-features = false } ## primitives @@ -107,6 +107,7 @@ frame-system-benchmarking = { workspace = true, default-features = false, option substrate-wasm-builder = { workspace = true, default-features = false } [dev-dependencies] +da-commitment.workspace = true bounded-collections.workspace = true test-case.workspace = true hex.workspace = true @@ -192,6 +193,7 @@ std = [ "rayon", "scale-info/std", "serde", + "serde_json/std", "sp-api/std", "sp-authority-discovery/std", "sp-block-builder/std", diff --git a/runtime/src/apis.rs b/runtime/src/apis.rs index 9746d6359..60f826e3e 100644 --- a/runtime/src/apis.rs +++ b/runtime/src/apis.rs @@ -1,36 +1,15 @@ -// use super::kate::{Error as RTKateError, GDataProof, GRow}; use crate::LOG_TARGET; use crate::{ - constants, - // kate::{GCellBlock, GMultiProof}, - mmr, - version::VERSION, - AccountId, - AuthorityDiscovery, - Babe, - Block, - BlockNumber, - EpochDuration, - Executive, - Grandpa, - Historical, - Index, - InherentDataExt, - Mmr, - NominationPools, - OpaqueMetadata, - Runtime, - RuntimeCall, - RuntimeGenesisConfig, - SessionKeys, - Staking, - System, + constants, mmr, version::VERSION, AccountId, AuthorityDiscovery, Babe, Block, BlockNumber, + EpochDuration, Executive, Grandpa, Historical, Index, InherentDataExt, Mmr, NominationPools, + OpaqueMetadata, Runtime, RuntimeCall, RuntimeGenesisConfig, SessionKeys, Staking, System, TransactionPayment, }; use avail_base::{HeaderExtensionBuilderData, ProvidePostInherent}; use avail_core::{ currency::Balance, data_proof::{DataProof, ProofResponse, SubTrie}, + header::extension::CommitmentScheme, header::HeaderExtension, }; use sp_runtime::OpaqueExtrinsic; @@ -72,7 +51,6 @@ decl_runtime_apis! { block_length: BlockLength, block_number: u32, ) -> HeaderExtension; - fn build_data_root(block: u32, extrinsics: Vec) -> H256; fn check_if_extrinsic_is_vector_post_inherent(uxt: &::Extrinsic) -> bool; fn check_if_extrinsic_is_da_post_inherent(uxt: &::Extrinsic) -> bool; @@ -108,6 +86,9 @@ decl_runtime_apis! { /// Get the blob vouch fee reserve amount fn get_blob_vouch_fee_reserve() -> u128; + + /// Get teh commitment_scheme active in the Runtime + fn commitement_scheme() -> CommitmentScheme; } } @@ -460,10 +441,7 @@ impl_runtime_apis! { #[api_version(4)] impl crate::apis::ExtensionBuilder for Runtime { fn build_data_root(block: u32, extrinsics: Vec) -> H256 { - let bl = frame_system::Pallet::::block_length(); - let cols = bl.cols.0; - let rows = bl.rows.0; - HeaderExtensionBuilderData::from_opaque_extrinsics::(block, &extrinsics, cols, rows).data_root() + HeaderExtensionBuilderData::from_opaque_extrinsics::(block, &extrinsics).data_root() } fn build_extension( @@ -559,14 +537,15 @@ impl_runtime_apis! { fn get_blob_vouch_fee_reserve() -> u128 { crate::constants::da::BlobVouchFeeReserve::get() } + + fn commitement_scheme() -> CommitmentScheme { + ::DaCommitmentScheme::get() + } } impl crate::apis::KateApi for Runtime { fn data_proof(block_number: u32, extrinsics: Vec, tx_idx: u32) -> Option { - let bl = frame_system::Pallet::::block_length(); - let cols = bl.cols.0; - let rows = bl.rows.0; - let data = HeaderExtensionBuilderData::from_opaque_extrinsics::(block_number, &extrinsics, cols, rows); + let data = HeaderExtensionBuilderData::from_opaque_extrinsics::(block_number, &extrinsics); let (leaf_idx, sub_trie) = data.leaf_idx(tx_idx)?; log::trace!( target: LOG_TARGET, @@ -595,8 +574,7 @@ impl_runtime_apis! { } fn inclusion_proof(extrinsics: Vec, blob_hash: H256) -> Option { - // TODO: block_number, rows & cols has no significance in this case, should be refactored later - let builder_data = HeaderExtensionBuilderData::from_opaque_extrinsics::(0, &extrinsics, 0, 0); + let builder_data = HeaderExtensionBuilderData::from_opaque_extrinsics::(0, &extrinsics); let (leaf_idx, sub_trie) = builder_data.leaf_idx_by_hash(blob_hash)?; @@ -643,9 +621,8 @@ impl_runtime_apis! { bool, Option, Vec<(AccountId32, AuthorityDiscoveryId, String, Vec)>, - )>, - total_blob_size: u64, - ) -> Vec<::Extrinsic> { + Option>, + )>, total_blob_size: u64) -> Vec<::Extrinsic> { // 1. Vector pallet post-inherent extrinsics (unsigned, bare) let mut post_inherent_extrinsics: Vec<::Extrinsic> = pallet_vector::Pallet::::create_inherent(&data) diff --git a/runtime/src/extensions/check_batch_transactions.rs b/runtime/src/extensions/check_batch_transactions.rs index b56fffcfb..2e0af2e5c 100644 --- a/runtime/src/extensions/check_batch_transactions.rs +++ b/runtime/src/extensions/check_batch_transactions.rs @@ -71,12 +71,11 @@ where + IsSubType>, [u8; 32]: From<::AccountId>, { - pub fn is_submit_data_call(&self) -> bool { - matches!(self.0.is_sub_type(), Some(DACall::::submit_data { .. })) - || matches!( - self.0.is_sub_type(), - Some(DACall::::submit_blob_metadata { .. }) - ) + pub fn is_da_call(&self) -> bool { + matches!( + self.0.is_sub_type(), + Some(DACall::::submit_blob_metadata { .. }) + ) } pub fn is_send_message_call(&self) -> bool { @@ -244,7 +243,7 @@ where let call = WrappedCall::(call); ensure!( - !call.is_submit_data_call(), + !call.is_da_call(), InvalidTransaction::Custom(UnexpectedSubmitDataCall as u8) ); ensure!( @@ -639,10 +638,14 @@ mod tests { RuntimeCall::System(SysCall::remark { remark: vec![] }) } - fn submit_data_call() -> RuntimeCall { - RuntimeCall::DataAvailability(DACall::submit_data { + fn submit_blob_metadata_call() -> RuntimeCall { + RuntimeCall::DataAvailability(DACall::submit_blob_metadata { app_id: AppId(1), - data: vec![].try_into().unwrap(), + blob_hash: H256::zero(), + size: 0, + commitment: Vec::new(), + eval_claim: None, + eval_point_seed: None, }) } @@ -697,14 +700,14 @@ mod tests { ); } - #[test_case(submit_data_call() => Ok(ValidTransaction::default()); "Single Submit Data call should be allowed" )] + #[test_case(submit_blob_metadata_call() => Ok(ValidTransaction::default()); "Single Submit Data call should be allowed" )] #[test_case(send_message_call() => Ok(ValidTransaction::default()); "Single Send Message call should be allowed" )] #[test_case(remark_call() => Ok(ValidTransaction::default()); "Single Non-Submit-Data and Non-Send-Message call should be allowed" )] fn test_single_call(call: RuntimeCall) -> TransactionValidity { validate(call) } - #[test_case(vec![remark_call(), submit_data_call()] => to_invalid_tx(UnexpectedSubmitDataCall); "Submit Data call inside a Batch call should be blocked" )] + #[test_case(vec![remark_call(), submit_blob_metadata_call()] => to_invalid_tx(UnexpectedSubmitDataCall); "Submit Data call inside a Batch call should be blocked" )] #[test_case(vec![remark_call(), send_message_call()] => to_invalid_tx(UnexpectedSendMessageCall); "Send Message call inside a Batch call should be blocked" )] #[test_case(vec![remark_call(), remark_call()] => Ok(ValidTransaction::default()); "Non-Submit-Data and Non-Send-Message call inside a Batch call should be allowed" )] fn test_batch_call(calls: Vec) -> TransactionValidity { diff --git a/runtime/src/header_extension_builder_data_tests.rs b/runtime/src/header_extension_builder_data_tests.rs index 47e64f61c..22860aefd 100644 --- a/runtime/src/header_extension_builder_data_tests.rs +++ b/runtime/src/header_extension_builder_data_tests.rs @@ -4,24 +4,25 @@ use crate::{Runtime, SignedExtra, UncheckedExtrinsic}; use avail_base::HeaderExtensionBuilderData; use avail_core::data_proof::{BoundedData, Message, TxDataRoots}; -use da_control::{AppDataFor, Call as DaCall}; -use frame_system::limits::BlockLength; +use da_control::Call as DaCall; use frame_system::{ CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, CheckSpecVersion, CheckTxVersion, CheckWeight, }; -use pallet_balances::Call as BalancesCall; +// use pallet_balances::Call as BalancesCall; use pallet_vector::Call as VectorCall; use avail_core::data_proof::AddressedMessage; use avail_core::data_proof::SubTrie; use binary_merkle_tree::{verify_proof, Leaf, MerkleProof}; use codec::{Compact, Encode}; +use da_commitment::build_kzg_commitments::build_da_commitments; use derive_more::Constructor; use hex_literal::hex; use pallet_transaction_payment::ChargeTransactionPayment; +use sp_core::keccak_256; use sp_core::H256; -use sp_keyring::Sr25519Keyring::{Alice, Bob}; +use sp_keyring::AccountKeyring::Alice; use sp_runtime::traits::Keccak256; use sp_runtime::{ generic::Era, @@ -57,11 +58,8 @@ pub fn calls_proof( extrinsics: &[Vec], leaf_idx: usize, call_type: SubTrie, - cols: u32, - rows: u32, ) -> Option { - let tx_data = - HeaderExtensionBuilderData::from_raw_extrinsics::(block, &extrinsics, cols, rows); + let tx_data = HeaderExtensionBuilderData::from_raw_extrinsics::(block, &extrinsics); let message = tx_data .bridge_messages .get(leaf_idx) @@ -138,28 +136,33 @@ fn signed_extrinsic(function: RuntimeCall) -> Vec { UncheckedExtrinsic::new_signed(function, alice.into(), signature, extra).encode() } -fn submit_data(data: Vec) -> Vec { - let data = AppDataFor::::truncate_from(data); - let function = DaCall::submit_data { +fn submit_blob_metadata(data: Vec) -> Vec { + let blob_hash = H256::from(keccak_256(&data)); + let commitment = build_da_commitments(&data, 1024, 4096, Seed::default()); + let function = DaCall::submit_blob_metadata { app_id: AppId(0), - data, + blob_hash, + size: data.len() as u64, + commitment, + eval_point_seed: None, + eval_claim: None, } .into(); signed_extrinsic(function) } -fn transfer_keep_alive() -> Vec { - let bob = Bob.to_account_id(); - let amount = 1 * AVAIL; - let function = BalancesCall::transfer_keep_alive { - dest: bob.into(), - value: amount, - } - .into(); +// fn transfer_keep_alive() -> Vec { +// let bob = Bob.to_account_id(); +// let amount = 1 * AVAIL; +// let function = BalancesCall::transfer_keep_alive { +// dest: bob.into(), +// value: amount, +// } +// .into(); - signed_extrinsic(function) -} +// signed_extrinsic(function) +// } fn bridge_msg(data: Vec) -> Vec { let message = Message::ArbitraryMessage(BoundedData::truncate_from(data)); @@ -201,19 +204,16 @@ fn empty_root() -> H256 { } // Data root tests -#[test_case(&[submit_data(hex!("abcd").to_vec())] => H256(hex!("f1f399f7e0d8c8ed712df0c21b4ec78f3b8533f1c3d0215e4023e1b7c80bfd91")); "submitted")] -#[test_case(&[submit_data(vec![])] => empty_root(); "empty submitted")] +#[test_case(&[submit_blob_metadata(hex!("abcd").to_vec())] => H256(hex!("f1f399f7e0d8c8ed712df0c21b4ec78f3b8533f1c3d0215e4023e1b7c80bfd91")); "submitted")] +// We wont allow empty blob to be submitted, so this case is not possible +// #[test_case(&[submit_blob_metadata(vec![])] => empty_root(); "empty submitted")] #[test_case(&[] => empty_root(); "empty submitted 2")] #[test_case(&[bridge_msg(hex!("47").to_vec())] => H256(hex!("df93f65f9f5adf3ac0d46e5a08432b96ef362bf229e1737939051884c5506e02")); "bridged data")] #[test_case(&[bridge_fungible_msg(H256::repeat_byte(1), 1_000_000)] => H256(hex!("e93394eeaedb2158a154a29b9333fe06451fbe82c9cff5b961a6d701782450bc")) ; "bridged fungible")] -#[test_case(&[submit_data(hex!("abcd").to_vec()), bridge_msg(hex!("47").to_vec())] => H256(hex!("c925bfccfc86f15523c5b40b2bd6d8a66fc51f3d41176d77be7928cb9e3831a7")); "submitted and bridged")] +#[test_case(&[submit_blob_metadata(hex!("abcd").to_vec()), bridge_msg(hex!("47").to_vec())] => H256(hex!("c925bfccfc86f15523c5b40b2bd6d8a66fc51f3d41176d77be7928cb9e3831a7")); "submitted and bridged")] fn data_root_filter(extrinsics: &[Vec]) -> H256 { new_test_ext().execute_with(|| { - let bl = BlockLength::default(); - HeaderExtensionBuilderData::from_raw_extrinsics::( - 0, extrinsics, bl.cols.0, bl.rows.0, - ) - .data_root() + HeaderExtensionBuilderData::from_raw_extrinsics::(0, extrinsics).data_root() }) } @@ -392,15 +392,9 @@ mod bridge_tests { let extrinsics = vec![ bridge_msg(b"123".to_vec()), bridge_fungible_msg(H256::zero(), 42_000_000_000_000_000_000u128), - submit_data(hex!("abcd").to_vec()), + submit_blob_metadata(hex!("abcd").to_vec()), ]; - let bl = BlockLength::default(); - let data = HeaderExtensionBuilderData::from_raw_extrinsics::( - 0, - &extrinsics, - bl.cols.0, - bl.rows.0, - ); + let data = HeaderExtensionBuilderData::from_raw_extrinsics::(0, &extrinsics); let expected = expected_send_arbitrary_data(); assert_eq!(data.bridge_messages, expected.bridge_messages); assert_eq!(data.roots().bridge_root, expected.roots().bridge_root); @@ -411,14 +405,8 @@ mod bridge_tests { #[test] fn bridge_message_is_empty() { new_test_ext().execute_with(|| { - let extrinsics: Vec> = vec![submit_data(hex!("abcd").to_vec())]; - let bl = BlockLength::default(); - let data = HeaderExtensionBuilderData::from_raw_extrinsics::( - 0, - &extrinsics, - bl.cols.0, - bl.rows.0, - ); + let extrinsics: Vec> = vec![submit_blob_metadata(hex!("abcd").to_vec())]; + let data = HeaderExtensionBuilderData::from_raw_extrinsics::(0, &extrinsics); let expected = HeaderExtensionBuilderData::default(); assert_eq!(data.bridge_messages, expected.bridge_messages); assert_eq!(data.roots().bridge_root, expected.roots().bridge_root); @@ -432,16 +420,10 @@ mod bridge_tests { let extrinsics = vec![ bridge_msg(b"123".to_vec()), bridge_fungible_msg(H256::zero(), 42_000_000_000_000_000_000u128), - submit_data(hex!("abcd").to_vec()), + submit_blob_metadata(hex!("abcd").to_vec()), bridge_failed_send_message_txs(vec![0, 1]), ]; - let bl = BlockLength::default(); - let data = HeaderExtensionBuilderData::from_raw_extrinsics::( - 0, - &extrinsics, - bl.cols.0, - bl.rows.0, - ); + let data = HeaderExtensionBuilderData::from_raw_extrinsics::(0, &extrinsics); let expected: HeaderExtensionBuilderData = HeaderExtensionBuilderData::default(); assert_eq!(data.bridge_messages, expected.bridge_messages); assert_eq!(data.roots().bridge_root, expected.roots().bridge_root); @@ -456,28 +438,19 @@ mod bridge_tests { bridge_msg(b"123".to_vec()), bridge_fungible_msg(H256::zero(), 42_000_000_000_000_000_000u128), bridge_fungible_msg(H256::zero(), 42_000_000_000_000_000_000u128), - submit_data(hex!("abcd").to_vec()), + submit_blob_metadata(hex!("abcd").to_vec()), bridge_failed_send_message_txs(vec![1, 2]), ]; - let bl = BlockLength::default(); - let data_1 = HeaderExtensionBuilderData::from_raw_extrinsics::( - 0, - &extrinsics_1, - bl.cols.0, - bl.rows.0, - ); + let data_1 = + HeaderExtensionBuilderData::from_raw_extrinsics::(0, &extrinsics_1); let extrinsics_2 = vec![ bridge_msg(b"123".to_vec()), - submit_data(hex!("abcd").to_vec()), + submit_blob_metadata(hex!("abcd").to_vec()), bridge_failed_send_message_txs(vec![]), ]; - let data_2 = HeaderExtensionBuilderData::from_raw_extrinsics::( - 0, - &extrinsics_2, - bl.cols.0, - bl.rows.0, - ); + let data_2 = + HeaderExtensionBuilderData::from_raw_extrinsics::(0, &extrinsics_2); assert_eq!(data_1.bridge_messages, data_2.bridge_messages); assert_eq!(data_1.roots(), data_2.roots()); @@ -493,13 +466,7 @@ mod bridge_tests { bridge_msg(b"123".to_vec()), bridge_msg(b"123".to_vec()), ]; - let bl = BlockLength::default(); - let data = HeaderExtensionBuilderData::from_raw_extrinsics::( - 0, - &extrinsics, - bl.cols.0, - bl.rows.0, - ); + let data = HeaderExtensionBuilderData::from_raw_extrinsics::(0, &extrinsics); let merkle_proof = data.bridged_proof_of(2).unwrap(); let proof = merkle_proof.proof; let expected_first_item_in_proof = H256(keccak_256(H256::zero().as_bytes())); @@ -521,11 +488,8 @@ mod data_root { fn get_calls_proof( extrinsics: &[Vec], leaf_idx: usize, - cols: u32, - rows: u32, ) -> Option<(MerkleProof>, H256)> { - let calls_proof = - calls_proof(0, &extrinsics, leaf_idx, SubTrie::DataSubmit, cols, rows).unwrap(); + let calls_proof = calls_proof(0, &extrinsics, leaf_idx, SubTrie::DataSubmit).unwrap(); let proof = calls_proof.proof; let root = calls_proof.root; @@ -548,10 +512,9 @@ mod data_root { hex!("40105d5bc10105c17fd72b93a8f73369e2ee6eee4d4714b7bf7bf3c2f156e601") ); - let extrinsics: Vec> = vec![submit_data("0".into())]; + let extrinsics: Vec> = vec![submit_blob_metadata("0".into())]; - let bl = BlockLength::default(); - let (da_proof, root) = get_calls_proof(&extrinsics, 0, bl.cols.0, bl.rows.0) + let (da_proof, root) = get_calls_proof(&extrinsics, 0) .expect("Proof not generated for the transaction index 0!"); assert_eq!(root, H256::zero()); @@ -569,14 +532,16 @@ mod data_root { hex!("db0ccc7a2d6559682303cc9322d4b79a7ad619f0c87d5f94723a33015550a64e"); let exp_proof_0 = hex!("4aeff0db81e3146828378be230d377356e57b6d599286b4b517dbf8941b3e1b2"); - let extrinsics: Vec> = vec![submit_data("0".into()), submit_data("1".into())]; + let extrinsics: Vec> = vec![ + submit_blob_metadata("0".into()), + submit_blob_metadata("1".into()), + ]; // leaf 0 keccak256(044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116d) // 40105d5bc10105c17fd72b93a8f73369e2ee6eee4d4714b7bf7bf3c2f156e601 // leaf 1 keccak256(c89efdaa54c0f20c7adf612882df0950f5a951637e0307cdcb4c672f298b8bc6) // 4aeff0db81e3146828378be230d377356e57b6d599286b4b517dbf8941b3e1b2 - let bl = BlockLength::default(); - let (da_proof, root) = get_calls_proof(&extrinsics, 0, bl.cols.0, bl.rows.0) + let (da_proof, root) = get_calls_proof(&extrinsics, 0) .expect("Proof not generated for the transaction index 0!"); assert_eq!(root, H256::zero()); @@ -592,10 +557,11 @@ mod data_root { fn test_left_data_proof_with_skipped_tx() { new_test_ext().execute_with(|| { let extrinsics: Vec> = vec![ - submit_data("0".into()), - submit_data("".into()), - submit_data("1".into()), - submit_data("2".into()), + submit_blob_metadata("0".into()), + // Again, these kind of tx is not allowed, so we can ignore + // submit_blob_metadata("".into()), + submit_blob_metadata("1".into()), + submit_blob_metadata("2".into()), ]; // leaf 0 keccak256(044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116d) @@ -613,8 +579,7 @@ mod data_root { // data_root keccak256(db0ccc7a2d6559682303cc9322d4b79a7ad619f0c87d5f94723a33015550a64e, 3c86bde3a90d18efbcf23e27e9b6714012aa055263fe903a72333aa9caa37f1b) // (877f9ed6aa67f160e9b9b7794bb851998d15b65d11bab3efc6ff444339a3d750) - let bl = BlockLength::default(); - let (da_proof, root) = get_calls_proof(&extrinsics, 0, bl.cols.0, bl.rows.0) + let (da_proof, root) = get_calls_proof(&extrinsics, 0) .expect("Proof not generated for the transaction index 0!"); let exp_proof_root = @@ -633,7 +598,7 @@ mod data_root { assert_eq!(exp_leaf_0, da_proof.leaf.as_slice()); assert_eq!(da_proof.number_of_leaves, 4); - let (da_proof, root) = get_calls_proof(&extrinsics, 1, bl.cols.0, bl.rows.0) + let (da_proof, root) = get_calls_proof(&extrinsics, 1) .expect("Proof not generated for the transaction index 0!"); let exp_proof_2: [H256; 2] = [ hex!("40105d5bc10105c17fd72b93a8f73369e2ee6eee4d4714b7bf7bf3c2f156e601").into(), @@ -646,7 +611,7 @@ mod data_root { assert_eq!(exp_proof_2, da_proof.proof.as_slice()); assert_eq!(da_proof.number_of_leaves, 4); - let (da_proof, root) = get_calls_proof(&extrinsics, 2, bl.cols.0, bl.rows.0) + let (da_proof, root) = get_calls_proof(&extrinsics, 2) .expect("Proof not generated for the transaction index 0!"); assert_eq!(root, H256::zero()); diff --git a/runtime/src/impls_tests.rs b/runtime/src/impls_tests.rs index 64750480a..9b45be21c 100644 --- a/runtime/src/impls_tests.rs +++ b/runtime/src/impls_tests.rs @@ -626,7 +626,7 @@ mod measure_full_block_size { use avail_core::{currency::AVAIL, from_substrate::keccak_256, AppId}; use codec::Encode; use da_control::{ - extensions::native::hosted_commitment_builder::build_da_commitments, BlobTxSummaryRuntime, + extensions::native::hosted_commitment_builder::build_kzg_commitments, BlobTxSummaryRuntime, }; use frame_support::{ // dispatch::GetDispatchInfo, @@ -719,7 +719,7 @@ mod measure_full_block_size { let rows = block_length.rows.0; let seed = kate::Seed::default(); let blob_hash = H256(keccak_256(&blob)); - let commitment = build_da_commitments(&blob, cols, rows, seed); + let commitment = build_kzg_commitments(&blob, cols, rows, seed); let mut blob_txs_summary: Vec = vec![]; let ownership = sample_ownerships(); @@ -731,6 +731,8 @@ mod measure_full_block_size { blob_hash, size: tx_size, commitment: commitment.clone(), + eval_point_seed: None, + eval_claim: None, }, ); @@ -770,6 +772,7 @@ mod measure_full_block_size { success: true, reason: None, ownership: ownership.clone(), + eval_proof: None, }); }, Err(e) => match e { diff --git a/runtime/src/transaction_filter.rs b/runtime/src/transaction_filter.rs index 7d84a9fbe..250fb1af6 100644 --- a/runtime/src/transaction_filter.rs +++ b/runtime/src/transaction_filter.rs @@ -1,17 +1,16 @@ use crate::{opaque_to_unchecked, unchecked_get_caller, AccountId, Runtime, RuntimeCall as Call}; use avail_base::header_extension::{ - BridgedData, ExtractedTxData, HeaderExtensionDataFilter, SubmittedData, + BridgedData, ExtractedTxData, HeaderExtensionDataFilter, PostInherentInfo, SubmittedData, }; use avail_core::data_proof::{tx_uid, AddressedMessage}; use sp_runtime::OpaqueExtrinsic; use da_control::Call as DACall; -use kate::Seed; use pallet_multisig::Call as MultisigCall; use pallet_proxy::Call as ProxyCall; use pallet_vector::Call as VectorCall; use sp_core::H256; -use sp_io::hashing::keccak_256; +use sp_std::collections::btree_map::BTreeMap; use sp_std::vec::Vec; const MAX_FILTER_ITERATIONS: usize = 3; @@ -20,12 +19,10 @@ const MAX_FILTER_ITERATIONS: usize = 3; /// Handles N levels of nesting in case those calls are wrapped in proxy / multisig calls. impl HeaderExtensionDataFilter for Runtime { fn filter( - failed_transactions: &[u32], + post_inherent_info: PostInherentInfo, opaque: OpaqueExtrinsic, block: u32, tx_index: usize, - cols: u32, - rows: u32, ) -> Option { let res = opaque_to_unchecked(&opaque); match res { @@ -37,7 +34,7 @@ impl HeaderExtensionDataFilter for Runtime { if nb_iterations > 0 { match final_call { Call::Vector(call) => filter_vector_call( - failed_transactions, + &post_inherent_info.failed, maybe_caller.as_ref(), call, block, @@ -48,14 +45,14 @@ impl HeaderExtensionDataFilter for Runtime { } else { match final_call { Call::Vector(call) => filter_vector_call( - failed_transactions, + &post_inherent_info.failed, maybe_caller.as_ref(), call, block, tx_index, ), Call::DataAvailability(call) => { - filter_da_call(call, tx_index, failed_transactions, cols, rows) + filter_da_call(call, tx_index, post_inherent_info) }, _ => None, } @@ -70,11 +67,12 @@ impl HeaderExtensionDataFilter for Runtime { } } - fn get_failed_transaction_ids(opaques: &[OpaqueExtrinsic]) -> Vec { - let mut failed_tx = Vec::new(); + fn get_data_from_post_inherents(opaques: &[OpaqueExtrinsic]) -> PostInherentInfo { + let mut failed = Vec::new(); + let mut eval_proofs = BTreeMap::new(); let len = opaques.len(); if len == 0 { - return failed_tx; + return PostInherentInfo::default(); } // Vector failed transactions @@ -83,7 +81,7 @@ impl HeaderExtensionDataFilter for Runtime { &unchecked_extrinsic.function { let failed_vector_tx = failed_txs.iter().map(|c| c.0).collect::>(); - failed_tx.extend(failed_vector_tx); + failed.extend(failed_vector_tx); }; }; @@ -96,18 +94,22 @@ impl HeaderExtensionDataFilter for Runtime { blob_txs_summary, }) = &unchecked_extrinsic.function { - let failed_tx_da: Vec = blob_txs_summary - .iter() - .filter(|summary| !summary.success) - .map(|summary| summary.tx_index) - .collect(); - - failed_tx.extend(failed_tx_da); + for summary in blob_txs_summary { + if let Some(proof) = &summary.eval_proof { + eval_proofs.insert(summary.tx_index, proof.clone()); + } + if !summary.success { + failed.push(summary.tx_index); + } + } }; } } - failed_tx + PostInherentInfo { + failed, + eval_proofs, + } } } @@ -115,46 +117,48 @@ impl HeaderExtensionDataFilter for Runtime { fn filter_da_call( call: &DACall, tx_index: usize, - failed_transactions: &[u32], - cols: u32, - rows: u32, + post_inherent_info: PostInherentInfo, ) -> Option { let tx_index = u32::try_from(tx_index).ok()?; - if failed_transactions.contains(&tx_index) { + if post_inherent_info.failed.contains(&tx_index) { return None; } - let (app_id, blob_hash, commitment) = match call { + let (app_id, blob_hash, size_bytes, commitment, eval_point_seed, eval_claim) = match call { DACall::submit_blob_metadata { app_id, blob_hash, commitment, - size: _, + size, + eval_point_seed, + eval_claim, } => { if commitment.is_empty() { return None; } - (*app_id, *blob_hash, commitment.clone()) - }, - DACall::submit_data { app_id, data } => { - if data.is_empty() { - return None; - } - let blob_hash = H256(keccak_256(data)); - let commitment = - da_control::extensions::native::hosted_commitment_builder::build_da_commitments( - data, - cols, - rows, - Seed::default(), - ); - (*app_id, blob_hash, commitment) + ( + *app_id, + *blob_hash, + *size, + commitment.clone(), + *eval_point_seed, + *eval_claim, + ) }, _ => return None, }; let tx_index = u32::try_from(tx_index).ok()?; - let submitted_data = Some(SubmittedData::new(app_id, tx_index, blob_hash, commitment)); + let submitted_data = Some(SubmittedData::new( + app_id, + tx_index, + blob_hash, + size_bytes, + commitment, + eval_point_seed, + eval_claim, + post_inherent_info.eval_proofs.get(&tx_index).cloned(), + )); Some(ExtractedTxData { submitted_data,