From ee302ec78c363f0384011d35c76907a9deebe93a Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Thu, 26 Sep 2024 14:47:03 -0300 Subject: [PATCH 01/36] Send uncoded commitment --- core/lib/l1_contract_interface/Cargo.toml | 5 +- .../src/i_executor/structures/blob_info.rs | 140 ++++++++++++++++++ .../structures/commit_batch_info.rs | 32 +++- .../src/i_executor/structures/mod.rs | 1 + 4 files changed, 169 insertions(+), 9 deletions(-) create mode 100644 core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index 8b68df854e7..e7bcf19d4ca 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -23,8 +23,11 @@ sha2.workspace = true sha3.workspace = true hex.workspace = true once_cell.workspace = true +rlp.workspace = true +serde.workspace = true +bincode.workspace = true [dev-dependencies] -serde.workspace = true serde_json.workspace = true serde_with = { workspace = true, features = ["base64", "hex"] } + diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs new file mode 100644 index 00000000000..18a433fdc7a --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs @@ -0,0 +1,140 @@ +use rlp::Decodable; +use rlp::DecoderError; +use rlp::Rlp; +use serde::{Serialize, Deserialize}; + +#[derive(Debug,Serialize, Deserialize)] +struct G1Commitment { + pub x: Vec, + pub y: Vec, +} + +impl Decodable for G1Commitment { + fn decode(rlp: &Rlp) -> Result { + let x: Vec = rlp.val_at(0)?; // Decode first element as Vec + let y: Vec = rlp.val_at(1)?; // Decode second element as Vec + + Ok(G1Commitment { x, y }) + } +} + +#[derive(Debug,Serialize, Deserialize)] +struct BlobQuorumParam { + pub quorum_number: u32, + pub adversary_threshold_percentage: u32, + pub confirmation_threshold_percentage: u32, + pub chunk_length: u32 +} + +impl Decodable for BlobQuorumParam { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobQuorumParam { + quorum_number: rlp.val_at(0)?, + adversary_threshold_percentage: rlp.val_at(1)?, + confirmation_threshold_percentage: rlp.val_at(2)?, + chunk_length: rlp.val_at(3)?, + }) + } +} + +#[derive(Debug,Serialize, Deserialize)] +struct BlobHeader { + pub commitment: G1Commitment, + pub data_length: u32, + pub blob_quorum_params: Vec +} + +impl Decodable for BlobHeader { + fn decode(rlp: &Rlp) -> Result { + let commitment: G1Commitment = rlp.val_at(0)?; + let data_length: u32 = rlp.val_at(1)?; + let blob_quorum_params: Vec = rlp.list_at(2)?; + + Ok(BlobHeader { + commitment, + data_length, + blob_quorum_params, + }) + } +} + +#[derive(Debug,Serialize, Deserialize)] +struct BatchHeader { + pub batch_root: Vec, + pub quorum_numbers: Vec, + pub quorum_signed_percentages: Vec, + pub reference_block_number: u32 +} + +impl Decodable for BatchHeader { + fn decode(rlp: &Rlp) -> Result { + Ok(BatchHeader { + batch_root: rlp.val_at(0)?, + quorum_numbers: rlp.val_at(1)?, + quorum_signed_percentages: rlp.val_at(2)?, + reference_block_number: rlp.val_at(3)?, + }) + } +} + +#[derive(Debug,Serialize, Deserialize)] +struct BatchMetadata { + pub batch_header: BatchHeader, + pub signatory_record_hash: Vec, + pub fee: Vec, + pub confirmation_block_number: u32, + pub batch_header_hash: Vec +} + +impl Decodable for BatchMetadata { + fn decode(rlp: &Rlp) -> Result { + let batch_header: BatchHeader = rlp.val_at(0)?; + + Ok(BatchMetadata { + batch_header, + signatory_record_hash: rlp.val_at(1)?, + fee: rlp.val_at(2)?, + confirmation_block_number: rlp.val_at(3)?, + batch_header_hash: rlp.val_at(4)?, + }) + } +} + +#[derive(Debug,Serialize, Deserialize)] +struct BlobVerificationProof { + pub batch_id: u32, + pub blob_index: u32, + pub batch_medatada: BatchMetadata, + pub inclusion_proof: Vec, + pub quorum_indexes: Vec +} + +impl Decodable for BlobVerificationProof { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobVerificationProof { + batch_id: rlp.val_at(0)?, + blob_index: rlp.val_at(1)?, + batch_medatada: rlp.val_at(2)?, + inclusion_proof: rlp.val_at(3)?, + quorum_indexes: rlp.val_at(4)?, + }) + } +} + +#[derive(Debug,Serialize, Deserialize)] +pub struct BlobInfo { + pub blob_header: BlobHeader, + pub blob_verification_proof: BlobVerificationProof +} + +impl Decodable for BlobInfo { + fn decode(rlp: &Rlp) -> Result { + let blob_header: BlobHeader = rlp.val_at(0)?; + let blob_verification_proof: BlobVerificationProof = rlp.val_at(1)?; + + Ok(BlobInfo { + blob_header, + blob_verification_proof, + }) + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 0d1ff91163f..42bcc3526a1 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -1,3 +1,4 @@ +use rlp::decode; use zksync_types::{ commitment::{ pre_boojum_serialize_commitments, serialize_commitments, L1BatchCommitmentMode, @@ -13,6 +14,8 @@ use crate::{ i_executor::commit::kzg::{KzgInfo, ZK_SYNC_BYTES_PER_BLOB}, Tokenizable, }; +use bincode; +use super::blob_info::BlobInfo; /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; @@ -217,14 +220,27 @@ impl Tokenizable for CommitBatchInfo<'_> { } (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { let mut operator_da_input = vec![PUBDATA_SOURCE_CUSTOM]; - operator_da_input.extend( - &self - .l1_batch_with_metadata - .metadata - .da_blob_id - .clone() - .unwrap_or_default() - ); + + let commitment = &self + .l1_batch_with_metadata + .metadata + .da_blob_id + .clone() + .unwrap_or_default(); + + let data = &hex::decode(commitment).unwrap()[3..]; + + let blob_info: BlobInfo = match decode(&data) { + Ok(blob_info) => blob_info, + Err(e) => panic!("Error decoding commitment: {}", e) + }; + + let bytes = match bincode::serialize(&blob_info) { + Ok(bytes) => bytes, + Err(e) => panic!("Error serializing commitment: {}", e) + }; + + operator_da_input.extend(bytes); operator_da_input } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index d1ed57e41f2..90c16d37c57 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -2,5 +2,6 @@ mod commit_batch_info; mod stored_batch_info; +mod blob_info; pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; From bc8c30e41fab2e29881a70920c135f402f090e0b Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Thu, 26 Sep 2024 17:44:49 -0300 Subject: [PATCH 02/36] Add call to verify --- Cargo.lock | 1277 +++++++++++++++++++++++-- core/node/da_clients/Cargo.toml | 4 + core/node/da_clients/src/blob_info.rs | 221 +++++ core/node/da_clients/src/eigen_da.rs | 36 + core/node/da_clients/src/lib.rs | 1 + 5 files changed, 1461 insertions(+), 78 deletions(-) create mode 100644 core/node/da_clients/src/blob_info.rs diff --git a/Cargo.lock b/Cargo.lock index 7a032bea5e8..005769ae56d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,16 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "addchain" version = "0.2.0" @@ -233,6 +243,15 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "ascii-canvas" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" +dependencies = [ + "term", +] + [[package]] name = "assert_matches" version = "1.5.0" @@ -404,6 +423,17 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "async_io_stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +dependencies = [ + "futures 0.3.30", + "pharos", + "rustc_version", +] + [[package]] name = "atoi" version = "2.0.0" @@ -436,6 +466,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "auto_impl" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.72", +] + [[package]] name = "autocfg" version = "1.1.0" @@ -603,6 +644,12 @@ dependencies = [ "serde", ] +[[package]] +name = "bech32" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" + [[package]] name = "beef" version = "0.5.2" @@ -689,6 +736,15 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + [[package]] name = "bit-vec" version = "0.6.3" @@ -942,6 +998,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ + "sha2 0.10.8", "tinyvec", ] @@ -1002,6 +1059,9 @@ name = "bytes" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +dependencies = [ + "serde", +] [[package]] name = "bytesize" @@ -1009,6 +1069,16 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" +[[package]] +name = "bzip2" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" +dependencies = [ + "bzip2-sys", + "libc", +] + [[package]] name = "bzip2-sys" version = "0.1.11+1.0.8" @@ -1051,6 +1121,20 @@ dependencies = [ "serde_json", ] +[[package]] +name = "cargo_metadata" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d886547e41f740c616ae73108f6eb70afe6d940c7bc697cb30f13daec073037" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "cast" version = "0.3.0" @@ -1393,6 +1477,58 @@ dependencies = [ "indexmap 1.9.3", ] +[[package]] +name = "coins-bip32" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b6be4a5df2098cd811f3194f64ddb96c267606bffd9689ac7b0160097b01ad3" +dependencies = [ + "bs58", + "coins-core", + "digest 0.10.7", + "hmac 0.12.1", + "k256 0.13.3", + "serde", + "sha2 0.10.8", + "thiserror", +] + +[[package]] +name = "coins-bip39" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8fba409ce3dc04f7d804074039eb68b960b0829161f8e06c95fea3f122528" +dependencies = [ + "bitvec", + "coins-bip32", + "hmac 0.12.1", + "once_cell", + "pbkdf2 0.12.2", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror", +] + +[[package]] +name = "coins-core" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5286a0843c21f8367f7be734f89df9b822e0321d8bcce8d6e735aadff7d74979" +dependencies = [ + "base64 0.21.5", + "bech32", + "bs58", + "digest 0.10.7", + "generic-array", + "hex", + "ripemd", + "serde", + "serde_derive", + "sha2 0.10.8", + "sha3 0.10.8", + "thiserror", +] + [[package]] name = "colorchoice" version = "1.0.0" @@ -1436,6 +1572,19 @@ dependencies = [ "windows-sys 0.45.0", ] +[[package]] +name = "const-hex" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +dependencies = [ + "cfg-if", + "cpufeatures", + "hex", + "proptest", + "serde", +] + [[package]] name = "const-oid" version = "0.9.5" @@ -1867,6 +2016,12 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "data-encoding" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" + [[package]] name = "debugid" version = "0.8.0" @@ -1874,7 +2029,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ "serde", - "uuid", + "uuid 1.5.0", ] [[package]] @@ -1980,6 +2135,48 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "dotenvy" version = "0.15.7" @@ -2127,6 +2324,15 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "ena" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" +dependencies = [ + "log", +] + [[package]] name = "encode_unicode" version = "0.3.6" @@ -2142,6 +2348,24 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enr" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a3d8dc56e02f954cac8eb489772c552c473346fc34f67412bb6244fd647f7e4" +dependencies = [ + "base64 0.21.5", + "bytes", + "hex", + "k256 0.13.3", + "log", + "rand 0.8.5", + "rlp", + "serde", + "sha3 0.10.8", + "zeroize", +] + [[package]] name = "enum_dispatch" version = "0.3.13" @@ -2199,67 +2423,341 @@ dependencies = [ ] [[package]] -name = "error-chain" -version = "0.12.4" +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "eth-keystore" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" +dependencies = [ + "aes", + "ctr", + "digest 0.10.7", + "hex", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "rand 0.8.5", + "scrypt", + "serde", + "serde_json", + "sha2 0.10.8", + "sha3 0.10.8", + "thiserror", + "uuid 0.8.2", +] + +[[package]] +name = "ethabi" +version = "18.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" +dependencies = [ + "ethereum-types", + "hex", + "once_cell", + "regex", + "serde", + "serde_json", + "sha3 0.10.8", + "thiserror", + "uint", +] + +[[package]] +name = "ethbloom" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-codec", + "impl-rlp", + "impl-serde", + "scale-info", + "tiny-keccak 2.0.2", +] + +[[package]] +name = "ethereum-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" +dependencies = [ + "ethbloom", + "fixed-hash", + "impl-codec", + "impl-rlp", + "impl-serde", + "primitive-types", + "scale-info", + "uint", +] + +[[package]] +name = "ethers" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "816841ea989f0c69e459af1cf23a6b0033b19a55424a1ea3a30099becdb8dec0" +dependencies = [ + "ethers-addressbook", + "ethers-contract", + "ethers-core", + "ethers-etherscan", + "ethers-middleware", + "ethers-providers", + "ethers-signers", + "ethers-solc", +] + +[[package]] +name = "ethers-addressbook" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5495afd16b4faa556c3bba1f21b98b4983e53c1755022377051a975c3b021759" +dependencies = [ + "ethers-core", + "once_cell", + "serde", + "serde_json", +] + +[[package]] +name = "ethers-contract" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fceafa3578c836eeb874af87abacfb041f92b4da0a78a5edd042564b8ecdaaa" +dependencies = [ + "const-hex", + "ethers-contract-abigen", + "ethers-contract-derive", + "ethers-core", + "ethers-providers", + "futures-util", + "once_cell", + "pin-project", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "ethers-contract-abigen" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04ba01fbc2331a38c429eb95d4a570166781f14290ef9fdb144278a90b5a739b" +dependencies = [ + "Inflector", + "const-hex", + "dunce", + "ethers-core", + "ethers-etherscan", + "eyre", + "prettyplease", + "proc-macro2 1.0.86", + "quote 1.0.36", + "regex", + "reqwest 0.11.22", + "serde", + "serde_json", + "syn 2.0.72", + "toml", + "walkdir", +] + +[[package]] +name = "ethers-contract-derive" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87689dcabc0051cde10caaade298f9e9093d65f6125c14575db3fd8c669a168f" +dependencies = [ + "Inflector", + "const-hex", + "ethers-contract-abigen", + "ethers-core", + "proc-macro2 1.0.86", + "quote 1.0.36", + "serde_json", + "syn 2.0.72", +] + +[[package]] +name = "ethers-core" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82d80cc6ad30b14a48ab786523af33b37f28a8623fc06afd55324816ef18fb1f" +dependencies = [ + "arrayvec 0.7.4", + "bytes", + "cargo_metadata 0.18.1", + "chrono", + "const-hex", + "elliptic-curve 0.13.8", + "ethabi", + "generic-array", + "k256 0.13.3", + "num_enum 0.7.2", + "once_cell", + "open-fastrlp", + "rand 0.8.5", + "rlp", + "serde", + "serde_json", + "strum", + "syn 2.0.72", + "tempfile", + "thiserror", + "tiny-keccak 2.0.2", + "unicode-xid 0.2.4", +] + +[[package]] +name = "ethers-etherscan" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79e5973c26d4baf0ce55520bd732314328cabe53193286671b47144145b9649" +dependencies = [ + "chrono", + "ethers-core", + "reqwest 0.11.22", + "semver", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "ethers-middleware" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48f9fdf09aec667c099909d91908d5eaf9be1bd0e2500ba4172c1d28bfaa43de" +dependencies = [ + "async-trait", + "auto_impl", + "ethers-contract", + "ethers-core", + "ethers-etherscan", + "ethers-providers", + "ethers-signers", + "futures-channel", + "futures-locks", + "futures-util", + "instant", + "reqwest 0.11.22", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-futures", + "url", +] + +[[package]] +name = "ethers-providers" +version = "2.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6434c9a33891f1effc9c75472e12666db2fa5a0fec4b29af6221680a6fe83ab2" +dependencies = [ + "async-trait", + "auto_impl", + "base64 0.21.5", + "bytes", + "const-hex", + "enr", + "ethers-core", + "futures-core", + "futures-timer", + "futures-util", + "hashers", + "http 0.2.9", + "instant", + "jsonwebtoken 8.3.0", + "once_cell", + "pin-project", + "reqwest 0.11.22", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-tungstenite", + "tracing", + "tracing-futures", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "ws_stream_wasm", +] + +[[package]] +name = "ethers-signers" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +checksum = "228875491c782ad851773b652dd8ecac62cda8571d3bc32a5853644dd26766c2" dependencies = [ - "version_check", + "async-trait", + "coins-bip32", + "coins-bip39", + "const-hex", + "elliptic-curve 0.13.8", + "eth-keystore", + "ethers-core", + "rand 0.8.5", + "sha2 0.10.8", + "thiserror", + "tracing", ] [[package]] -name = "etcetera" -version = "0.8.0" +name = "ethers-solc" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +checksum = "66244a771d9163282646dbeffe0e6eca4dda4146b6498644e678ac6089b11edd" dependencies = [ "cfg-if", + "const-hex", + "dirs", + "dunce", + "ethers-core", + "glob", "home", - "windows-sys 0.48.0", -] - -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types", - "hex", + "md-5", + "num_cpus", "once_cell", + "path-slash", + "rayon", "regex", + "semver", "serde", "serde_json", - "sha3 0.10.8", + "solang-parser", + "svm-rs", "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", "tiny-keccak 2.0.2", -] - -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", + "tokio", + "tracing", + "walkdir", + "yansi", ] [[package]] @@ -2293,6 +2791,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -2397,7 +2905,7 @@ checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" dependencies = [ "futures-core", "futures-sink", - "spin", + "spin 0.9.8", ] [[package]] @@ -2496,6 +3004,16 @@ dependencies = [ "zksync_bellman", ] +[[package]] +name = "fs2" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -2593,6 +3111,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "futures-locks" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" +dependencies = [ + "futures-channel", + "futures-task", +] + [[package]] name = "futures-macro" version = "0.3.30" @@ -2623,7 +3151,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" dependencies = [ "gloo-timers", - "send_wrapper", + "send_wrapper 0.4.0", ] [[package]] @@ -2645,6 +3173,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -2782,7 +3319,7 @@ dependencies = [ "google-cloud-metadata", "google-cloud-token", "home", - "jsonwebtoken", + "jsonwebtoken 9.3.0", "reqwest 0.12.5", "serde", "serde_json", @@ -2826,7 +3363,7 @@ dependencies = [ "regex", "reqwest 0.12.5", "reqwest-middleware", - "ring", + "ring 0.17.7", "serde", "serde_json", "sha2 0.10.8", @@ -2963,6 +3500,15 @@ dependencies = [ "serde", ] +[[package]] +name = "hashers" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" +dependencies = [ + "fxhash", +] + [[package]] name = "hashlink" version = "0.9.1" @@ -3385,6 +3931,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -3467,6 +4019,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.12.0" @@ -3785,6 +4346,20 @@ dependencies = [ "url", ] +[[package]] +name = "jsonwebtoken" +version = "8.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" +dependencies = [ + "base64 0.21.5", + "pem 1.1.1", + "ring 0.16.20", + "serde", + "serde_json", + "simple_asn1", +] + [[package]] name = "jsonwebtoken" version = "9.3.0" @@ -3793,8 +4368,8 @@ checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ "base64 0.21.5", "js-sys", - "pem", - "ring", + "pem 3.0.4", + "ring 0.17.7", "serde", "serde_json", "simple_asn1", @@ -3835,13 +4410,43 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "lalrpop" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cb077ad656299f160924eb2912aa147d7339ea7d69e1b5517326fdcec3c1ca" +dependencies = [ + "ascii-canvas", + "bit-set", + "ena", + "itertools 0.11.0", + "lalrpop-util", + "petgraph", + "regex", + "regex-syntax 0.8.2", + "string_cache", + "term", + "tiny-keccak 2.0.2", + "unicode-xid 0.2.4", + "walkdir", +] + +[[package]] +name = "lalrpop-util" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" +dependencies = [ + "regex-automata 0.4.7", +] + [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin", + "spin 0.9.8", ] [[package]] @@ -3878,6 +4483,16 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] + [[package]] name = "librocksdb-sys" version = "0.11.0+8.1.1" @@ -4248,7 +4863,7 @@ dependencies = [ "httparse", "memchr", "mime", - "spin", + "spin 0.9.8", "version_check", ] @@ -4276,6 +4891,12 @@ dependencies = [ "tempfile", ] +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + [[package]] name = "nix" version = "0.27.1" @@ -4542,6 +5163,31 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "open-fastrlp" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" +dependencies = [ + "arrayvec 0.7.4", + "auto_impl", + "bytes", + "ethereum-types", + "open-fastrlp-derive", +] + +[[package]] +name = "open-fastrlp-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" +dependencies = [ + "bytes", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + [[package]] name = "openssl" version = "0.10.66" @@ -4684,6 +5330,12 @@ dependencies = [ "tokio-stream", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "ordered-float" version = "2.10.1" @@ -4783,12 +5435,41 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "password-hash" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", +] + [[package]] name = "paste" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "path-slash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e91099d4268b0e11973f036e885d652fb0b21fedcf69738c627f94db6a44f42" + +[[package]] +name = "pbkdf2" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a0692ec44e4cf1ef28ca317f14f8f07da2d95ec3fa01f86e4467b725e60917" +dependencies = [ + "digest 0.10.7", + "hmac 0.12.1", + "password-hash", + "sha2 0.10.8", +] + [[package]] name = "pbkdf2" version = "0.12.2" @@ -4796,6 +5477,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" dependencies = [ "digest 0.10.7", + "hmac 0.12.1", ] [[package]] @@ -4804,6 +5486,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "pem" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +dependencies = [ + "base64 0.13.1", +] + [[package]] name = "pem" version = "3.0.4" @@ -4884,6 +5575,67 @@ dependencies = [ "indexmap 2.1.0", ] +[[package]] +name = "pharos" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +dependencies = [ + "futures 0.3.30", + "rustc_version", +] + +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_macros", + "phf_shared 0.11.2", +] + +[[package]] +name = "phf_generator" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +dependencies = [ + "phf_shared 0.11.2", + "rand 0.8.5", +] + +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator", + "phf_shared 0.11.2", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.72", +] + +[[package]] +name = "phf_shared" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" +dependencies = [ + "siphasher 0.3.11", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher 0.3.11", +] + [[package]] name = "pin-project" version = "1.1.3" @@ -5042,6 +5794,12 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + [[package]] name = "pretty_assertions" version = "1.4.0" @@ -5184,6 +5942,22 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "proptest" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +dependencies = [ + "bitflags 2.6.0", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha", + "rand_xorshift", + "regex-syntax 0.8.2", + "unarray", +] + [[package]] name = "prost" version = "0.12.1" @@ -5448,6 +6222,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rand_xoshiro" version = "0.6.0" @@ -5504,6 +6287,17 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom", + "libredox", + "thiserror", +] + [[package]] name = "regex" version = "1.10.6" @@ -5572,6 +6366,7 @@ dependencies = [ "http 0.2.9", "http-body 0.4.6", "hyper 0.14.29", + "hyper-rustls 0.24.2", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -5581,17 +6376,21 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls 0.21.12", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", "system-configuration", "tokio", "tokio-native-tls", + "tokio-rustls 0.24.1", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots 0.25.4", "winreg 0.50.0", ] @@ -5703,6 +6502,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted 0.7.1", + "web-sys", + "winapi", +] + [[package]] name = "ring" version = "0.17.7" @@ -5712,11 +6526,20 @@ dependencies = [ "cc", "getrandom", "libc", - "spin", - "untrusted", + "spin 0.9.8", + "untrusted 0.9.0", "windows-sys 0.48.0", ] +[[package]] +name = "ripemd" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "rkyv" version = "0.7.43" @@ -5732,7 +6555,7 @@ dependencies = [ "rkyv_derive", "seahash", "tinyvec", - "uuid", + "uuid 1.5.0", ] [[package]] @@ -5753,9 +6576,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", + "rlp-derive", "rustc-hex", ] +[[package]] +name = "rlp-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + [[package]] name = "rocksdb" version = "0.21.0" @@ -5855,7 +6690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring", + "ring 0.17.7", "rustls-webpki 0.101.7", "sct", ] @@ -5867,7 +6702,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring", + "ring 0.17.7", "rustls-pki-types", "rustls-webpki 0.102.4", "subtle", @@ -5883,7 +6718,7 @@ dependencies = [ "aws-lc-rs", "log", "once_cell", - "ring", + "ring 0.17.7", "rustls-pki-types", "rustls-webpki 0.102.4", "subtle", @@ -5957,7 +6792,7 @@ dependencies = [ "rustls-webpki 0.102.4", "security-framework", "security-framework-sys", - "webpki-roots", + "webpki-roots 0.26.0", "winapi", ] @@ -5973,8 +6808,8 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] @@ -5984,9 +6819,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "aws-lc-rs", - "ring", + "ring 0.17.7", "rustls-pki-types", - "untrusted", + "untrusted 0.9.0", ] [[package]] @@ -6012,6 +6847,15 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher", +] + [[package]] name = "same-file" version = "1.0.6" @@ -6181,14 +7025,26 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "scrypt" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" +dependencies = [ + "hmac 0.12.1", + "pbkdf2 0.11.0", + "salsa20", + "sha2 0.10.8", +] + [[package]] name = "sct" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.7", + "untrusted 0.9.0", ] [[package]] @@ -6306,6 +7162,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +[[package]] +name = "send_wrapper" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" + [[package]] name = "sentry" version = "0.31.7" @@ -6411,7 +7273,7 @@ dependencies = [ "thiserror", "time", "url", - "uuid", + "uuid 1.5.0", ] [[package]] @@ -6481,6 +7343,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -6689,6 +7560,12 @@ dependencies = [ "time", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "siphasher" version = "1.0.1" @@ -6712,7 +7589,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" dependencies = [ "bytecount", - "cargo_metadata", + "cargo_metadata 0.14.2", "error-chain", "glob", "pulldown-cmark", @@ -6789,7 +7666,7 @@ dependencies = [ "num-bigint 0.4.6", "num-rational", "num-traits", - "pbkdf2", + "pbkdf2 0.12.2", "pin-project", "poly1305", "rand 0.8.5", @@ -6800,7 +7677,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "sha3 0.10.8", - "siphasher", + "siphasher 1.0.1", "slab", "smallvec", "soketto 0.7.1", @@ -6839,7 +7716,7 @@ dependencies = [ "rand_chacha", "serde", "serde_json", - "siphasher", + "siphasher 1.0.1", "slab", "smol", "smoldot", @@ -6924,6 +7801,20 @@ dependencies = [ "sha1", ] +[[package]] +name = "solang-parser" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c425ce1c59f4b154717592f0bdf4715c3a1d55058883622d3157e1f0908a5b26" +dependencies = [ + "itertools 0.11.0", + "lalrpop", + "lalrpop-util", + "phf", + "thiserror", + "unicode-xid 0.2.4", +] + [[package]] name = "sp-core-hashing" version = "15.0.0" @@ -6938,6 +7829,12 @@ dependencies = [ "twox-hash", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + [[package]] name = "spin" version = "0.9.8" @@ -7204,6 +8101,19 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "string_cache" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +dependencies = [ + "new_debug_unreachable", + "once_cell", + "parking_lot", + "phf_shared 0.10.0", + "precomputed-hash", +] + [[package]] name = "stringprep" version = "0.1.4" @@ -7397,7 +8307,7 @@ dependencies = [ "hex", "hmac 0.12.1", "parity-scale-codec", - "pbkdf2", + "pbkdf2 0.12.2", "regex", "schnorrkel", "secrecy", @@ -7408,6 +8318,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "svm-rs" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11297baafe5fa0c99d5722458eac6a5e25c01eb1b8e5cd137f54079093daa7a4" +dependencies = [ + "dirs", + "fs2", + "hex", + "once_cell", + "reqwest 0.11.22", + "semver", + "serde", + "serde_json", + "sha2 0.10.8", + "thiserror", + "url", + "zip", +] + [[package]] name = "syn" version = "0.15.44" @@ -7524,6 +8454,17 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", +] + [[package]] name = "termcolor" version = "1.4.1" @@ -7818,6 +8759,21 @@ dependencies = [ "tokio-util", ] +[[package]] +name = "tokio-tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +dependencies = [ + "futures-util", + "log", + "rustls 0.21.12", + "tokio", + "tokio-rustls 0.24.1", + "tungstenite", + "webpki-roots 0.25.4", +] + [[package]] name = "tokio-util" version = "0.7.12" @@ -7832,11 +8788,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac2caab0bf757388c6c0ae23b3293fdb463fee59434529014f85e3263b995c28" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.22.16", +] + [[package]] name = "toml_datetime" version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +dependencies = [ + "serde", +] [[package]] name = "toml_edit" @@ -7846,7 +8817,7 @@ checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ "indexmap 2.1.0", "toml_datetime", - "winnow", + "winnow 0.5.17", ] [[package]] @@ -7857,7 +8828,7 @@ checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ "indexmap 2.1.0", "toml_datetime", - "winnow", + "winnow 0.5.17", ] [[package]] @@ -7868,7 +8839,20 @@ checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ "indexmap 2.1.0", "toml_datetime", - "winnow", + "winnow 0.5.17", +] + +[[package]] +name = "toml_edit" +version = "0.22.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "278f3d518e152219c994ce877758516bca5e118eaed6996192a774fb9fbf0788" +dependencies = [ + "indexmap 2.1.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow 0.6.20", ] [[package]] @@ -7983,6 +8967,16 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project", + "tracing", +] + [[package]] name = "tracing-log" version = "0.1.4" @@ -8082,6 +9076,26 @@ dependencies = [ "termcolor", ] +[[package]] +name = "tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http 0.2.9", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.21.12", + "sha1", + "thiserror", + "url", + "utf-8", +] + [[package]] name = "twox-hash" version = "1.6.3" @@ -8132,6 +9146,12 @@ dependencies = [ "libc", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "2.7.0" @@ -8218,6 +9238,12 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + [[package]] name = "untrusted" version = "0.9.0" @@ -8255,12 +9281,28 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom", + "serde", +] + [[package]] name = "uuid" version = "1.5.0" @@ -8486,7 +9528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8281d1d660cdf54c76a3efa9ddd0c270cada1383a995db3ccb43d166456c7" dependencies = [ "smallvec", - "spin", + "spin 0.9.8", "wasmi_arena", "wasmi_core", "wasmparser-nostd", @@ -8539,6 +9581,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.25.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" + [[package]] name = "webpki-roots" version = "0.26.0" @@ -8833,6 +9881,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -8853,6 +9910,25 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ws_stream_wasm" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" +dependencies = [ + "async_io_stream", + "futures 0.3.30", + "js-sys", + "log", + "pharos", + "rustc_version", + "send_wrapper 0.6.0", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wyz" version = "0.5.1" @@ -8935,6 +10011,26 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "zip" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" +dependencies = [ + "aes", + "byteorder", + "bzip2", + "constant_time_eq 0.1.5", + "crc32fast", + "crossbeam-utils", + "flate2", + "hmac 0.12.1", + "pbkdf2 0.11.0", + "sha1", + "time", + "zstd", +] + [[package]] name = "zk_evm" version = "0.131.0-rc.2" @@ -9670,6 +10766,7 @@ dependencies = [ "base58", "blake2 0.10.6", "blake2b_simd", + "ethers", "flate2", "futures 0.3.30", "hex", @@ -9677,6 +10774,7 @@ dependencies = [ "jsonrpsee 0.23.2", "parity-scale-codec", "reqwest 0.12.5", + "rlp", "scale-encode", "serde", "serde_json", @@ -9685,8 +10783,10 @@ dependencies = [ "tokio", "tracing", "zksync_config", + "zksync_contracts", "zksync_da_client", "zksync_env_config", + "zksync_eth_client", "zksync_object_store", "zksync_types", ] @@ -10026,8 +11126,10 @@ dependencies = [ name = "zksync_l1_contract_interface" version = "0.1.0" dependencies = [ + "bincode", "hex", "once_cell", + "rlp", "serde", "serde_json", "serde_with", @@ -11076,6 +12178,25 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.9+zstd.1.5.5" diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index b7e5510600f..5b5567a8aab 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -23,6 +23,8 @@ zksync_types.workspace = true zksync_object_store.workspace = true zksync_da_client.workspace = true zksync_env_config.workspace = true +zksync_eth_client.workspace = true +zksync_contracts.workspace = true futures.workspace = true # Avail dependencies @@ -41,3 +43,5 @@ subxt-signer = { workspace = true, features = ["sr25519", "native"] } # EigenDA dependencies reqwest = { version = "0.12" } http = "1" +rlp.workspace = true +ethers = { version = "2.0.0-rc.1", features = ["abigen", "etherscan"] } diff --git a/core/node/da_clients/src/blob_info.rs b/core/node/da_clients/src/blob_info.rs new file mode 100644 index 00000000000..f437b02db1b --- /dev/null +++ b/core/node/da_clients/src/blob_info.rs @@ -0,0 +1,221 @@ +use rlp::Decodable; +use rlp::DecoderError; +use rlp::Rlp; +use zksync_types::web3::contract::Tokenizable; +use zksync_types::web3::contract::Tokenize; +use zksync_types::ethabi::Token; +use zksync_types::U256; + +#[derive(Debug)] +pub struct G1Commitment { + pub x: Vec, + pub y: Vec, +} + +impl Decodable for G1Commitment { + fn decode(rlp: &Rlp) -> Result { + let x: Vec = rlp.val_at(0)?; // Decode first element as Vec + let y: Vec = rlp.val_at(1)?; // Decode second element as Vec + + Ok(G1Commitment { x, y }) + } +} + +impl Tokenize for G1Commitment { + fn into_tokens(self) -> Vec { + + let x = self.x.into_token(); + let y = self.y.into_token(); + + vec![x, y] + } +} + +#[derive(Debug)] +pub struct BlobQuorumParam { + pub quorum_number: u32, + pub adversary_threshold_percentage: u32, + pub confirmation_threshold_percentage: u32, + pub chunk_length: u32 +} + +impl Decodable for BlobQuorumParam { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobQuorumParam { + quorum_number: rlp.val_at(0)?, + adversary_threshold_percentage: rlp.val_at(1)?, + confirmation_threshold_percentage: rlp.val_at(2)?, + chunk_length: rlp.val_at(3)?, + }) + } +} + +impl Tokenize for BlobQuorumParam { + fn into_tokens(self) -> Vec { + + let quorum_number = Token::Uint(U256::from(self.quorum_number)); + let adversary_threshold_percentage = Token::Uint(U256::from(self.adversary_threshold_percentage)); + let confirmation_threshold_percentage = Token::Uint(U256::from(self.confirmation_threshold_percentage)); + let chunk_length = Token::Uint(U256::from(self.chunk_length)); + + vec![quorum_number, adversary_threshold_percentage,confirmation_threshold_percentage,chunk_length] + } +} + +#[derive(Debug)] +pub struct BlobHeader { + pub commitment: G1Commitment, + pub data_length: u32, + pub blob_quorum_params: Vec +} + +impl Decodable for BlobHeader { + fn decode(rlp: &Rlp) -> Result { + let commitment: G1Commitment = rlp.val_at(0)?; + let data_length: u32 = rlp.val_at(1)?; + let blob_quorum_params: Vec = rlp.list_at(2)?; + + Ok(BlobHeader { + commitment, + data_length, + blob_quorum_params, + }) + } +} + +impl Tokenize for BlobHeader { + fn into_tokens(self) -> Vec { + let commitment = self.commitment.into_tokens(); + let data_length = Token::Uint(U256::from(self.data_length)); + let blob_quorum_params = self.blob_quorum_params.into_iter().map(|quorum| Token::Array(quorum.into_tokens())).collect(); + + vec![Token::Array(commitment), data_length,Token::Array(blob_quorum_params)] + } +} + +#[derive(Debug)] +pub struct BatchHeader { + pub batch_root: Vec, + pub quorum_numbers: Vec, + pub quorum_signed_percentages: Vec, + pub reference_block_number: u32 +} + +impl Decodable for BatchHeader { + fn decode(rlp: &Rlp) -> Result { + Ok(BatchHeader { + batch_root: rlp.val_at(0)?, + quorum_numbers: rlp.val_at(1)?, + quorum_signed_percentages: rlp.val_at(2)?, + reference_block_number: rlp.val_at(3)?, + }) + } +} + +impl Tokenize for BatchHeader { + fn into_tokens(self) -> Vec { + let batch_root = self.batch_root.into_token(); + let quorum_numbers = self.quorum_numbers.into_token(); + let quorum_signed_percentages = self.quorum_signed_percentages.into_token(); + let reference_block_number = Token::Uint(U256::from(self.reference_block_number)); + + vec![batch_root, quorum_numbers,quorum_signed_percentages,reference_block_number] + } +} + +#[derive(Debug)] +pub struct BatchMetadata { + pub batch_header: BatchHeader, + pub signatory_record_hash: Vec, + pub fee: Vec, + pub confirmation_block_number: u32, + pub batch_header_hash: Vec +} + +impl Decodable for BatchMetadata { + fn decode(rlp: &Rlp) -> Result { + let batch_header: BatchHeader = rlp.val_at(0)?; + + Ok(BatchMetadata { + batch_header, + signatory_record_hash: rlp.val_at(1)?, + fee: rlp.val_at(2)?, + confirmation_block_number: rlp.val_at(3)?, + batch_header_hash: rlp.val_at(4)?, + }) + } +} + +impl Tokenize for BatchMetadata { + fn into_tokens(self) -> Vec { + let batch_header = self.batch_header.into_tokens(); + let signatory_record_hash = self.signatory_record_hash.into_token(); + let fee = self.fee.into_token(); + let confirmation_block_number = Token::Uint(U256::from(self.confirmation_block_number)); + let batch_header_hash = self.batch_header_hash.into_token(); + + vec![Token::Array(batch_header), signatory_record_hash,fee,confirmation_block_number,batch_header_hash] + } +} + +#[derive(Debug)] +pub struct BlobVerificationProof { + pub batch_id: u32, + pub blob_index: u32, + pub batch_medatada: BatchMetadata, + pub inclusion_proof: Vec, + pub quorum_indexes: Vec +} + +impl Decodable for BlobVerificationProof { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobVerificationProof { + batch_id: rlp.val_at(0)?, + blob_index: rlp.val_at(1)?, + batch_medatada: rlp.val_at(2)?, + inclusion_proof: rlp.val_at(3)?, + quorum_indexes: rlp.val_at(4)?, + }) + } +} + +impl Tokenize for BlobVerificationProof { + fn into_tokens(self) -> Vec { + let batch_id = Token::Uint(U256::from(self.batch_id)); + let blob_index = Token::Uint(U256::from(self.blob_index)); + let batch_medatada = self.batch_medatada.into_tokens(); + let inclusion_proof = self.inclusion_proof.into_token(); + let quorum_indexes = self.quorum_indexes.into_token(); + + vec![batch_id, blob_index,Token::Array(batch_medatada),inclusion_proof,quorum_indexes] + } +} + +#[derive(Debug)] +pub struct BlobInfo { + pub blob_header: BlobHeader, + pub blob_verification_proof: BlobVerificationProof +} + +impl Decodable for BlobInfo { + fn decode(rlp: &Rlp) -> Result { + let blob_header: BlobHeader = rlp.val_at(0)?; + let blob_verification_proof: BlobVerificationProof = rlp.val_at(1)?; + + Ok(BlobInfo { + blob_header, + blob_verification_proof, + }) + } +} + +impl Tokenize for BlobInfo { + fn into_tokens(self) -> Vec { + let blob_header = self.blob_header.into_tokens(); + let blob_verification_proof = self.blob_verification_proof.into_tokens(); + + vec![Token::Array(blob_header),Token::Array(blob_verification_proof)] + } +} + + diff --git a/core/node/da_clients/src/eigen_da.rs b/core/node/da_clients/src/eigen_da.rs index f2eff70faa3..375fb62f250 100644 --- a/core/node/da_clients/src/eigen_da.rs +++ b/core/node/da_clients/src/eigen_da.rs @@ -1,11 +1,18 @@ use std::fmt::Debug; use async_trait::async_trait; +use rlp::decode; use zksync_config::configs::da_client::eigen_da::EigenDAConfig; use zksync_da_client::{ types::{self, DAError, InclusionData}, DataAvailabilityClient, }; +use zksync_eth_client::{ + CallFunctionArgs, ContractCallError, EthInterface, +}; +use zksync_types::{blob, Address, U256}; + +use crate::blob_info::BlobInfo; #[derive(Clone, Debug)] pub struct EigenDAClient { @@ -23,6 +30,29 @@ impl EigenDAClient { }) } } +impl EigenDAClient { + pub async fn verify_blob( + &self, + verifier_address: Address, + eth_client: &dyn EthInterface, + commitment: String, + ) -> Result { + let data = &hex::decode(commitment).unwrap()[3..]; + + let blob_info: BlobInfo = match decode(&data) { + Ok(blob_info) => blob_info, + Err(e) => panic!("Error decoding commitment: {}", e) + }; + + CallFunctionArgs::new("verifyBlob", blob_info) + .for_contract( + verifier_address, + &zksync_contracts::hyperchain_contract(), + ) + .call(eth_client) + .await + } +} #[async_trait] impl DataAvailabilityClient for EigenDAClient { @@ -45,6 +75,12 @@ impl DataAvailabilityClient for EigenDAClient { .await .map_err(to_non_retriable_da_error)? .to_vec(); + + self.verify_blob( + self.config.verifier_address, //todo + self.config.eth_client.as_ref(), //todo + hex::encode(request_id), + ); Ok(types::DispatchResponse { blob_id: hex::encode(request_id), }) diff --git a/core/node/da_clients/src/lib.rs b/core/node/da_clients/src/lib.rs index 6b2622c64e2..07bac46e923 100644 --- a/core/node/da_clients/src/lib.rs +++ b/core/node/da_clients/src/lib.rs @@ -2,3 +2,4 @@ pub mod avail; pub mod eigen_da; pub mod no_da; pub mod object_store; +pub mod blob_info; From a8b625817cae05c10e7c446f8aa3fc41ec3623da Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Fri, 27 Sep 2024 11:11:06 -0300 Subject: [PATCH 03/36] Add verify blob to eigenda client --- core/bin/zksync_server/src/node_builder.rs | 2 +- core/lib/config/src/configs/contracts.rs | 2 ++ core/node/da_clients/src/eigen_da.rs | 18 +++++++++--------- .../layers/da_clients/eigen_da.rs | 10 ++++++---- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index cfc499a7cdc..9f2bc3e90dc 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -519,7 +519,7 @@ impl MainNodeBuilder { .add_layer(ObjectStorageClientWiringLayer::new(config)); } DAClient::EigenDA(config) => { - self.node.add_layer(EigenDAWiringLayer::new(config)); + self.node.add_layer(EigenDAWiringLayer::new(config, self.contracts_config.eigenda_verifier_addr)); } } diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index b68720ebaef..fa99b4139ef 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -40,6 +40,7 @@ pub struct ContractsConfig { // Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer. pub base_token_addr: Option
, pub chain_admin_addr: Option
, + pub eigenda_verifier_addr: Option
, } impl ContractsConfig { @@ -61,6 +62,7 @@ impl ContractsConfig { base_token_addr: Some(Address::repeat_byte(0x14)), ecosystem_contracts: Some(EcosystemContracts::for_tests()), chain_admin_addr: Some(Address::repeat_byte(0x18)), + eigenda_verifier_addr: Some(Address::repeat_byte(0x19)), } } } diff --git a/core/node/da_clients/src/eigen_da.rs b/core/node/da_clients/src/eigen_da.rs index 375fb62f250..be655f37f3f 100644 --- a/core/node/da_clients/src/eigen_da.rs +++ b/core/node/da_clients/src/eigen_da.rs @@ -8,7 +8,7 @@ use zksync_da_client::{ DataAvailabilityClient, }; use zksync_eth_client::{ - CallFunctionArgs, ContractCallError, EthInterface, + clients::{DynClient, L1}, CallFunctionArgs, ContractCallError, EthInterface }; use zksync_types::{blob, Address, U256}; @@ -18,23 +18,25 @@ use crate::blob_info::BlobInfo; pub struct EigenDAClient { client: reqwest::Client, config: EigenDAConfig, + eth_client: Box>, + verifier_address: Address, } impl EigenDAClient { pub const BLOB_SIZE_LIMIT_IN_BYTES: usize = 2 * 1024 * 1024; // 2MB - pub async fn new(config: EigenDAConfig) -> anyhow::Result { + pub async fn new(config: EigenDAConfig, eth_client: Box>, verifier_address: Address) -> anyhow::Result { Ok(Self { client: reqwest::Client::new(), config, + eth_client, + verifier_address, }) } } impl EigenDAClient { pub async fn verify_blob( &self, - verifier_address: Address, - eth_client: &dyn EthInterface, commitment: String, ) -> Result { let data = &hex::decode(commitment).unwrap()[3..]; @@ -46,10 +48,10 @@ impl EigenDAClient { CallFunctionArgs::new("verifyBlob", blob_info) .for_contract( - verifier_address, - &zksync_contracts::hyperchain_contract(), + self.verifier_address, //todo + &zksync_contracts::hyperchain_contract(), // todo ) - .call(eth_client) + .call(&self.eth_client) .await } } @@ -77,8 +79,6 @@ impl DataAvailabilityClient for EigenDAClient { .to_vec(); self.verify_blob( - self.config.verifier_address, //todo - self.config.eth_client.as_ref(), //todo hex::encode(request_id), ); Ok(types::DispatchResponse { diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs index b7c1025309c..4a91305f0cc 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs @@ -11,11 +11,12 @@ use crate::{ #[derive(Debug, Default)] pub struct EigenDAWiringLayer { config: EigenDAConfig, + verifier_address: String, } impl EigenDAWiringLayer { - pub fn new(config: EigenDAConfig) -> Self { - Self { config } + pub fn new(config: EigenDAConfig, verifier_address: Address) -> Self { + Self { config, verifier_address } } } @@ -34,9 +35,10 @@ impl WiringLayer for EigenDAWiringLayer { "eigen_da_client_layer" } - async fn wire(self, _: Self::Input) -> Result { + async fn wire(self, input: Self::Input) -> Result { + let EthInterfaceResource(query_client) = input.eth_client; let client: Box = - Box::new(EigenDAClient::new(self.config).await?); + Box::new(EigenDAClient::new(self.config,query_client, self.verifier_address).await?); Ok(Self::Output { client: DAClientResource(client), From d40d0335ed0b394b28cf212b3e962b179081d148 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Fri, 27 Sep 2024 11:11:17 -0300 Subject: [PATCH 04/36] Revert "Send uncoded commitment" This reverts commit ee302ec78c363f0384011d35c76907a9deebe93a. --- core/lib/l1_contract_interface/Cargo.toml | 5 +- .../src/i_executor/structures/blob_info.rs | 140 ------------------ .../structures/commit_batch_info.rs | 32 +--- .../src/i_executor/structures/mod.rs | 1 - 4 files changed, 9 insertions(+), 169 deletions(-) delete mode 100644 core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index e7bcf19d4ca..8b68df854e7 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -23,11 +23,8 @@ sha2.workspace = true sha3.workspace = true hex.workspace = true once_cell.workspace = true -rlp.workspace = true -serde.workspace = true -bincode.workspace = true [dev-dependencies] +serde.workspace = true serde_json.workspace = true serde_with = { workspace = true, features = ["base64", "hex"] } - diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs deleted file mode 100644 index 18a433fdc7a..00000000000 --- a/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs +++ /dev/null @@ -1,140 +0,0 @@ -use rlp::Decodable; -use rlp::DecoderError; -use rlp::Rlp; -use serde::{Serialize, Deserialize}; - -#[derive(Debug,Serialize, Deserialize)] -struct G1Commitment { - pub x: Vec, - pub y: Vec, -} - -impl Decodable for G1Commitment { - fn decode(rlp: &Rlp) -> Result { - let x: Vec = rlp.val_at(0)?; // Decode first element as Vec - let y: Vec = rlp.val_at(1)?; // Decode second element as Vec - - Ok(G1Commitment { x, y }) - } -} - -#[derive(Debug,Serialize, Deserialize)] -struct BlobQuorumParam { - pub quorum_number: u32, - pub adversary_threshold_percentage: u32, - pub confirmation_threshold_percentage: u32, - pub chunk_length: u32 -} - -impl Decodable for BlobQuorumParam { - fn decode(rlp: &Rlp) -> Result { - Ok(BlobQuorumParam { - quorum_number: rlp.val_at(0)?, - adversary_threshold_percentage: rlp.val_at(1)?, - confirmation_threshold_percentage: rlp.val_at(2)?, - chunk_length: rlp.val_at(3)?, - }) - } -} - -#[derive(Debug,Serialize, Deserialize)] -struct BlobHeader { - pub commitment: G1Commitment, - pub data_length: u32, - pub blob_quorum_params: Vec -} - -impl Decodable for BlobHeader { - fn decode(rlp: &Rlp) -> Result { - let commitment: G1Commitment = rlp.val_at(0)?; - let data_length: u32 = rlp.val_at(1)?; - let blob_quorum_params: Vec = rlp.list_at(2)?; - - Ok(BlobHeader { - commitment, - data_length, - blob_quorum_params, - }) - } -} - -#[derive(Debug,Serialize, Deserialize)] -struct BatchHeader { - pub batch_root: Vec, - pub quorum_numbers: Vec, - pub quorum_signed_percentages: Vec, - pub reference_block_number: u32 -} - -impl Decodable for BatchHeader { - fn decode(rlp: &Rlp) -> Result { - Ok(BatchHeader { - batch_root: rlp.val_at(0)?, - quorum_numbers: rlp.val_at(1)?, - quorum_signed_percentages: rlp.val_at(2)?, - reference_block_number: rlp.val_at(3)?, - }) - } -} - -#[derive(Debug,Serialize, Deserialize)] -struct BatchMetadata { - pub batch_header: BatchHeader, - pub signatory_record_hash: Vec, - pub fee: Vec, - pub confirmation_block_number: u32, - pub batch_header_hash: Vec -} - -impl Decodable for BatchMetadata { - fn decode(rlp: &Rlp) -> Result { - let batch_header: BatchHeader = rlp.val_at(0)?; - - Ok(BatchMetadata { - batch_header, - signatory_record_hash: rlp.val_at(1)?, - fee: rlp.val_at(2)?, - confirmation_block_number: rlp.val_at(3)?, - batch_header_hash: rlp.val_at(4)?, - }) - } -} - -#[derive(Debug,Serialize, Deserialize)] -struct BlobVerificationProof { - pub batch_id: u32, - pub blob_index: u32, - pub batch_medatada: BatchMetadata, - pub inclusion_proof: Vec, - pub quorum_indexes: Vec -} - -impl Decodable for BlobVerificationProof { - fn decode(rlp: &Rlp) -> Result { - Ok(BlobVerificationProof { - batch_id: rlp.val_at(0)?, - blob_index: rlp.val_at(1)?, - batch_medatada: rlp.val_at(2)?, - inclusion_proof: rlp.val_at(3)?, - quorum_indexes: rlp.val_at(4)?, - }) - } -} - -#[derive(Debug,Serialize, Deserialize)] -pub struct BlobInfo { - pub blob_header: BlobHeader, - pub blob_verification_proof: BlobVerificationProof -} - -impl Decodable for BlobInfo { - fn decode(rlp: &Rlp) -> Result { - let blob_header: BlobHeader = rlp.val_at(0)?; - let blob_verification_proof: BlobVerificationProof = rlp.val_at(1)?; - - Ok(BlobInfo { - blob_header, - blob_verification_proof, - }) - } -} diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 42bcc3526a1..0d1ff91163f 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -1,4 +1,3 @@ -use rlp::decode; use zksync_types::{ commitment::{ pre_boojum_serialize_commitments, serialize_commitments, L1BatchCommitmentMode, @@ -14,8 +13,6 @@ use crate::{ i_executor::commit::kzg::{KzgInfo, ZK_SYNC_BYTES_PER_BLOB}, Tokenizable, }; -use bincode; -use super::blob_info::BlobInfo; /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; @@ -220,27 +217,14 @@ impl Tokenizable for CommitBatchInfo<'_> { } (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { let mut operator_da_input = vec![PUBDATA_SOURCE_CUSTOM]; - - let commitment = &self - .l1_batch_with_metadata - .metadata - .da_blob_id - .clone() - .unwrap_or_default(); - - let data = &hex::decode(commitment).unwrap()[3..]; - - let blob_info: BlobInfo = match decode(&data) { - Ok(blob_info) => blob_info, - Err(e) => panic!("Error decoding commitment: {}", e) - }; - - let bytes = match bincode::serialize(&blob_info) { - Ok(bytes) => bytes, - Err(e) => panic!("Error serializing commitment: {}", e) - }; - - operator_da_input.extend(bytes); + operator_da_input.extend( + &self + .l1_batch_with_metadata + .metadata + .da_blob_id + .clone() + .unwrap_or_default() + ); operator_da_input } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index 90c16d37c57..d1ed57e41f2 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -2,6 +2,5 @@ mod commit_batch_info; mod stored_batch_info; -mod blob_info; pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; From a0b21d8c2d97f4f229aec84ed36d67a97717356d Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Fri, 27 Sep 2024 18:21:01 -0300 Subject: [PATCH 05/36] Update submodule --- Cargo.lock | 2 -- contracts | 2 +- .../src/implementations/layers/da_clients/eigen_da.rs | 3 ++- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 005769ae56d..ac134d1286c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11126,10 +11126,8 @@ dependencies = [ name = "zksync_l1_contract_interface" version = "0.1.0" dependencies = [ - "bincode", "hex", "once_cell", - "rlp", "serde", "serde_json", "serde_with", diff --git a/contracts b/contracts index 071aa0e4e9d..c3da19643d5 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 071aa0e4e9d98f7869c38d88792386bf639b901d +Subproject commit c3da19643d5ea823006679ede6ec925b27ad1f37 diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs index 4a91305f0cc..8ef5e68ac11 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs @@ -1,6 +1,7 @@ use zksync_config::configs::da_client::eigen_da::EigenDAConfig; use zksync_da_client::DataAvailabilityClient; use zksync_da_clients::eigen_da::EigenDAClient; +use zksync_types::Address; use crate::{ implementations::resources::da_client::DAClientResource, @@ -11,7 +12,7 @@ use crate::{ #[derive(Debug, Default)] pub struct EigenDAWiringLayer { config: EigenDAConfig, - verifier_address: String, + verifier_address: Address, } impl EigenDAWiringLayer { From 66ee57c820609f5cd05497f94b2d811e3d95a342 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Mon, 30 Sep 2024 10:42:47 -0300 Subject: [PATCH 06/36] Update submodule --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index c3da19643d5..28989078aef 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit c3da19643d5ea823006679ede6ec925b27ad1f37 +Subproject commit 28989078aef3ff360b13aa988687f7a06c56f54a From df7be16e268eec807ce4e82c669249d63392424b Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Mon, 30 Sep 2024 11:28:31 -0300 Subject: [PATCH 07/36] Fix submodule --- contracts | 2 +- core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/contracts.rs | 2 ++ core/lib/protobuf_config/src/contracts.rs | 7 +++++++ core/lib/protobuf_config/src/proto/config/contracts.proto | 1 + core/node/da_clients/src/eigen_da.rs | 4 ++-- 6 files changed, 14 insertions(+), 3 deletions(-) diff --git a/contracts b/contracts index 28989078aef..ebb94203bcb 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 28989078aef3ff360b13aa988687f7a06c56f54a +Subproject commit ebb94203bcb75b224d74b0522d7870545a68d1e4 diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 4a2858b9cbf..a4640c1e0b0 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -258,6 +258,7 @@ impl Distribution for EncodeDist { ecosystem_contracts: self.sample(rng), base_token_addr: self.sample_opt(|| rng.gen()), chain_admin_addr: self.sample_opt(|| rng.gen()), + eigenda_verifier_addr: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 298c43b80cc..2f3b64db8a3 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -72,6 +72,7 @@ mod tests { }), base_token_addr: Some(SHARED_BRIDGE_ETHER_TOKEN_ADDRESS), chain_admin_addr: Some(addr("0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff")), + eigenda_verifier_addr: None, } } @@ -98,6 +99,7 @@ CONTRACTS_STATE_TRANSITION_PROXY_ADDR="0xd90f1c081c6117241624e97cb6147257c3cb209 CONTRACTS_TRANSPARENT_PROXY_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347e5" CONTRACTS_BASE_TOKEN_ADDR="0x0000000000000000000000000000000000000001" CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" +CONTRACTS_EIGENDA_VERIFIER_ADDR="" "#; lock.set_env(config); diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index 84c40436750..cd904b63202 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -107,6 +107,12 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("chain_admin_addr")?, + eigenda_verifier_addr: l1 + .eigenda_verifier_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("eigenda_verifier_addr")?, }) } @@ -139,6 +145,7 @@ impl ProtoRepr for proto::Contracts { multicall3_addr: Some(format!("{:?}", this.l1_multicall3_addr)), base_token_addr: this.base_token_addr.map(|a| format!("{:?}", a)), chain_admin_addr: this.chain_admin_addr.map(|a| format!("{:?}", a)), + eigenda_verifier_addr: this.eigenda_verifier_addr.map(|a| format!("{:?}", a)), }), l2: Some(proto::L2 { testnet_paymaster_addr: this.l2_testnet_paymaster_addr.map(|a| format!("{:?}", a)), diff --git a/core/lib/protobuf_config/src/proto/config/contracts.proto b/core/lib/protobuf_config/src/proto/config/contracts.proto index f4488c7901a..d4f8d561160 100644 --- a/core/lib/protobuf_config/src/proto/config/contracts.proto +++ b/core/lib/protobuf_config/src/proto/config/contracts.proto @@ -17,6 +17,7 @@ message L1 { optional string multicall3_addr = 6; // required; H160 optional string base_token_addr = 7; // required; H160 optional string chain_admin_addr = 8; // required; H160 + optional string eigenda_verifier_addr = 9 ; // optional; h160 } message L2 { diff --git a/core/node/da_clients/src/eigen_da.rs b/core/node/da_clients/src/eigen_da.rs index be655f37f3f..ab38ca7eead 100644 --- a/core/node/da_clients/src/eigen_da.rs +++ b/core/node/da_clients/src/eigen_da.rs @@ -79,8 +79,8 @@ impl DataAvailabilityClient for EigenDAClient { .to_vec(); self.verify_blob( - hex::encode(request_id), - ); + hex::encode(request_id.clone()), + ).await?; Ok(types::DispatchResponse { blob_id: hex::encode(request_id), }) From ebee0988fb4e2a6e22f73a75b6993af5badb032f Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:46:04 -0300 Subject: [PATCH 08/36] Deploy eigenda verifier --- contracts | 2 +- .../config/src/forge_interface/deploy_ecosystem/input.rs | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/contracts b/contracts index ebb94203bcb..69730940956 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit ebb94203bcb75b224d74b0522d7870545a68d1e4 +Subproject commit 6973094095635397686dd404499d47d4e301c316 diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 30ec0eeb9c4..1db56a210ca 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -31,6 +31,7 @@ pub struct InitialDeploymentConfig { pub validator_timelock_execution_delay: u64, pub token_weth_address: Address, pub bridgehub_create_new_chain_salt: u64, + pub eigen_service_manager: Address, } impl Default for InitialDeploymentConfig { @@ -53,6 +54,7 @@ impl Default for InitialDeploymentConfig { // toml crate u64 support is backed by i64 implementation // https://github.com/toml-rs/toml/issues/705 bridgehub_create_new_chain_salt: rand::thread_rng().gen_range(0..=i64::MAX) as u64, + eigen_service_manager: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(), } } } @@ -161,6 +163,7 @@ impl DeployL1Config { priority_tx_max_gas_limit: initial_deployment_config.priority_tx_max_gas_limit, validator_timelock_execution_delay: initial_deployment_config .validator_timelock_execution_delay, + eigen_service_manager: initial_deployment_config.eigen_service_manager, }, tokens: TokensDeployL1Config { token_weth_address: initial_deployment_config.token_weth_address, @@ -194,6 +197,7 @@ pub struct ContractsDeployL1Config { pub diamond_init_minimal_l2_gas_price: u64, pub bootloader_hash: H256, pub default_aa_hash: H256, + pub eigen_service_manager: Address, } #[derive(Debug, Deserialize, Serialize, Clone)] From 4dc9e16263a0e74bd568ad03c52c0f7eeb4532ab Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Mon, 30 Sep 2024 15:35:36 -0300 Subject: [PATCH 09/36] Add call to verify --- contracts | 2 +- core/bin/zksync_server/src/node_builder.rs | 2 +- core/lib/contracts/src/lib.rs | 9 ++++++++- core/node/da_clients/src/eigen_da.rs | 12 ++++++++---- .../implementations/layers/da_clients/eigen_da.rs | 11 +++++++++-- zk_toolbox/crates/config/src/contracts.rs | 2 ++ .../src/forge_interface/deploy_ecosystem/output.rs | 1 + 7 files changed, 30 insertions(+), 9 deletions(-) diff --git a/contracts b/contracts index 69730940956..57152774692 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 6973094095635397686dd404499d47d4e301c316 +Subproject commit 5715277469275a78b99813fb900fce14443cacb4 diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 9f2bc3e90dc..a6c58aa2d84 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -519,7 +519,7 @@ impl MainNodeBuilder { .add_layer(ObjectStorageClientWiringLayer::new(config)); } DAClient::EigenDA(config) => { - self.node.add_layer(EigenDAWiringLayer::new(config, self.contracts_config.eigenda_verifier_addr)); + self.node.add_layer(EigenDAWiringLayer::new(config, self.contracts_config.eigenda_verifier_addr.unwrap())); } } diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a60d9fbf181..7536b3e01f0 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -63,7 +63,10 @@ const LOADNEXT_CONTRACT_FILE: &str = "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/LoadnextContract.json"; const LOADNEXT_SIMPLE_CONTRACT_FILE: &str = "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/Foo.json"; - +const EIGENDA_VERIFIER_CONTRACT_FILE: (&str, &str) = ( + "eigenda", + "EigendaVerifier.sol/EigendaVerifier.json", +); fn home_path() -> PathBuf { Workspace::locate().core() } @@ -162,6 +165,10 @@ pub fn verifier_contract() -> Contract { load_contract_for_both_compilers(VERIFIER_CONTRACT_FILE) } +pub fn eigenda_verifier_contract() -> Contract { + load_contract_for_both_compilers(EIGENDA_VERIFIER_CONTRACT_FILE) +} + #[derive(Debug, Clone)] pub struct TestContract { /// Contract bytecode to be used for sending deploy transaction. diff --git a/core/node/da_clients/src/eigen_da.rs b/core/node/da_clients/src/eigen_da.rs index ab38ca7eead..8b57eabbdc4 100644 --- a/core/node/da_clients/src/eigen_da.rs +++ b/core/node/da_clients/src/eigen_da.rs @@ -38,7 +38,7 @@ impl EigenDAClient { pub async fn verify_blob( &self, commitment: String, - ) -> Result { + ) -> Result { let data = &hex::decode(commitment).unwrap()[3..]; let blob_info: BlobInfo = match decode(&data) { @@ -48,11 +48,14 @@ impl EigenDAClient { CallFunctionArgs::new("verifyBlob", blob_info) .for_contract( - self.verifier_address, //todo - &zksync_contracts::hyperchain_contract(), // todo + self.verifier_address, + &zksync_contracts::eigenda_verifier_contract(), ) .call(&self.eth_client) - .await + .await.map_err(|e| DAError { + error: e.into(), + is_retriable: true, + }) } } @@ -81,6 +84,7 @@ impl DataAvailabilityClient for EigenDAClient { self.verify_blob( hex::encode(request_id.clone()), ).await?; + Ok(types::DispatchResponse { blob_id: hex::encode(request_id), }) diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs index 8ef5e68ac11..f29f4e1d312 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs @@ -1,10 +1,11 @@ use zksync_config::configs::da_client::eigen_da::EigenDAConfig; use zksync_da_client::DataAvailabilityClient; use zksync_da_clients::eigen_da::EigenDAClient; +use zksync_node_framework_derive::FromContext; use zksync_types::Address; use crate::{ - implementations::resources::da_client::DAClientResource, + implementations::resources::{da_client::DAClientResource, eth_interface::EthInterfaceResource}, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; @@ -27,9 +28,15 @@ pub struct Output { pub client: DAClientResource, } +#[derive(Debug, FromContext)] +#[context(crate = crate)] +pub struct Input { + pub eth_client: EthInterfaceResource, +} + #[async_trait::async_trait] impl WiringLayer for EigenDAWiringLayer { - type Input = (); + type Input = Input; type Output = Output; fn layer_name(&self) -> &'static str { diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zk_toolbox/crates/config/src/contracts.rs index 8296aa18852..43fcf314470 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zk_toolbox/crates/config/src/contracts.rs @@ -71,6 +71,7 @@ impl ContractsConfig { .diamond_cut_data .clone_from(&deploy_l1_output.contracts_config.diamond_cut_data); self.l1.chain_admin_addr = deploy_l1_output.deployed_addresses.chain_admin; + self.l1.eigenda_verifier_addr = deploy_l1_output.deployed_addresses.eigenda_verifier_addr; } pub fn set_chain_contracts(&mut self, register_chain_output: &RegisterChainOutput) { @@ -151,6 +152,7 @@ pub struct L1Contracts { pub verifier_addr: Address, pub validator_timelock_addr: Address, pub base_token_addr: Address, + pub eigenda_verifier_addr: Address, } #[derive(Debug, Serialize, Deserialize, Clone, Default)] diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs index 7f35cf0357c..5e51634356c 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -48,6 +48,7 @@ pub struct DeployL1DeployedAddressesOutput { pub bridgehub: L1BridgehubOutput, pub bridges: L1BridgesOutput, pub state_transition: L1StateTransitionOutput, + pub eigenda_verifier_addr: Address, } #[derive(Debug, Deserialize, Serialize, Clone)] From 93fa37334238b2f5f225cf9b680afe54d2445b5b Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Mon, 30 Sep 2024 16:23:55 -0300 Subject: [PATCH 10/36] Fix tokenize params --- core/node/da_clients/src/blob_info.rs | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/core/node/da_clients/src/blob_info.rs b/core/node/da_clients/src/blob_info.rs index f437b02db1b..8460fc399fd 100644 --- a/core/node/da_clients/src/blob_info.rs +++ b/core/node/da_clients/src/blob_info.rs @@ -24,8 +24,8 @@ impl Decodable for G1Commitment { impl Tokenize for G1Commitment { fn into_tokens(self) -> Vec { - let x = self.x.into_token(); - let y = self.y.into_token(); + let x = Token::Uint(U256::from_big_endian(&self.x)); + let y = Token::Uint(U256::from_big_endian(&self.y)); vec![x, y] } @@ -87,9 +87,9 @@ impl Tokenize for BlobHeader { fn into_tokens(self) -> Vec { let commitment = self.commitment.into_tokens(); let data_length = Token::Uint(U256::from(self.data_length)); - let blob_quorum_params = self.blob_quorum_params.into_iter().map(|quorum| Token::Array(quorum.into_tokens())).collect(); + let blob_quorum_params = self.blob_quorum_params.into_iter().map(|quorum| Token::Tuple(quorum.into_tokens())).collect(); - vec![Token::Array(commitment), data_length,Token::Array(blob_quorum_params)] + vec![Token::Tuple(commitment), data_length,Token::Array(blob_quorum_params)] } } @@ -114,7 +114,7 @@ impl Decodable for BatchHeader { impl Tokenize for BatchHeader { fn into_tokens(self) -> Vec { - let batch_root = self.batch_root.into_token(); + let batch_root = Token::FixedBytes(self.batch_root); let quorum_numbers = self.quorum_numbers.into_token(); let quorum_signed_percentages = self.quorum_signed_percentages.into_token(); let reference_block_number = Token::Uint(U256::from(self.reference_block_number)); @@ -149,12 +149,10 @@ impl Decodable for BatchMetadata { impl Tokenize for BatchMetadata { fn into_tokens(self) -> Vec { let batch_header = self.batch_header.into_tokens(); - let signatory_record_hash = self.signatory_record_hash.into_token(); - let fee = self.fee.into_token(); + let signatory_record_hash = Token::FixedBytes(self.signatory_record_hash); let confirmation_block_number = Token::Uint(U256::from(self.confirmation_block_number)); - let batch_header_hash = self.batch_header_hash.into_token(); - vec![Token::Array(batch_header), signatory_record_hash,fee,confirmation_block_number,batch_header_hash] + vec![Token::Tuple(batch_header), signatory_record_hash,confirmation_block_number] } } @@ -187,7 +185,7 @@ impl Tokenize for BlobVerificationProof { let inclusion_proof = self.inclusion_proof.into_token(); let quorum_indexes = self.quorum_indexes.into_token(); - vec![batch_id, blob_index,Token::Array(batch_medatada),inclusion_proof,quorum_indexes] + vec![batch_id, blob_index,Token::Tuple(batch_medatada),inclusion_proof,quorum_indexes] } } @@ -214,7 +212,7 @@ impl Tokenize for BlobInfo { let blob_header = self.blob_header.into_tokens(); let blob_verification_proof = self.blob_verification_proof.into_tokens(); - vec![Token::Array(blob_header),Token::Array(blob_verification_proof)] + vec![Token::Tuple(vec![Token::Tuple(blob_header),Token::Tuple(blob_verification_proof)])] } } From df8b5721ab284c8036110e1c354b868d46c4bcee Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Mon, 30 Sep 2024 18:36:40 -0300 Subject: [PATCH 11/36] Update contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 57152774692..aec2b24ae69 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 5715277469275a78b99813fb900fce14443cacb4 +Subproject commit aec2b24ae693a3381653c7b718c829034d879393 From d7f92c3ef16f1585af12c0fc0b8a5d76ec658e52 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Fri, 4 Oct 2024 17:18:36 -0300 Subject: [PATCH 12/36] Remove panic --- core/node/da_clients/src/eigen_da.rs | 31 ++++++++++++++-------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/core/node/da_clients/src/eigen_da.rs b/core/node/da_clients/src/eigen_da.rs index 8b57eabbdc4..05d79b730a5 100644 --- a/core/node/da_clients/src/eigen_da.rs +++ b/core/node/da_clients/src/eigen_da.rs @@ -8,7 +8,8 @@ use zksync_da_client::{ DataAvailabilityClient, }; use zksync_eth_client::{ - clients::{DynClient, L1}, CallFunctionArgs, ContractCallError, EthInterface + clients::{DynClient, L1}, + CallFunctionArgs, ContractCallError, EthInterface, }; use zksync_types::{blob, Address, U256}; @@ -25,7 +26,11 @@ pub struct EigenDAClient { impl EigenDAClient { pub const BLOB_SIZE_LIMIT_IN_BYTES: usize = 2 * 1024 * 1024; // 2MB - pub async fn new(config: EigenDAConfig, eth_client: Box>, verifier_address: Address) -> anyhow::Result { + pub async fn new( + config: EigenDAConfig, + eth_client: Box>, + verifier_address: Address, + ) -> anyhow::Result { Ok(Self { client: reqwest::Client::new(), config, @@ -35,24 +40,22 @@ impl EigenDAClient { } } impl EigenDAClient { - pub async fn verify_blob( - &self, - commitment: String, - ) -> Result { + pub async fn verify_blob(&self, commitment: String) -> Result { let data = &hex::decode(commitment).unwrap()[3..]; - let blob_info: BlobInfo = match decode(&data) { - Ok(blob_info) => blob_info, - Err(e) => panic!("Error decoding commitment: {}", e) - }; + let blob_info: BlobInfo = decode(&data).map_err(|e| DAError { + error: e.into(), + is_retriable: true, + })?; CallFunctionArgs::new("verifyBlob", blob_info) .for_contract( - self.verifier_address, + self.verifier_address, &zksync_contracts::eigenda_verifier_contract(), ) .call(&self.eth_client) - .await.map_err(|e| DAError { + .await + .map_err(|e| DAError { error: e.into(), is_retriable: true, }) @@ -81,9 +84,7 @@ impl DataAvailabilityClient for EigenDAClient { .map_err(to_non_retriable_da_error)? .to_vec(); - self.verify_blob( - hex::encode(request_id.clone()), - ).await?; + self.verify_blob(hex::encode(request_id.clone())).await?; Ok(types::DispatchResponse { blob_id: hex::encode(request_id), From e46b31ff9b9639e809ab8a7b1b271c86554ecdb2 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Mon, 7 Oct 2024 11:36:41 -0300 Subject: [PATCH 13/36] Update contracts --- contracts | 2 +- core/node/da_clients/src/eigen_da.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contracts b/contracts index aec2b24ae69..9f98e7bb897 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit aec2b24ae693a3381653c7b718c829034d879393 +Subproject commit 9f98e7bb8973c59f280c4bf177fdafff8cbfb477 diff --git a/core/node/da_clients/src/eigen_da.rs b/core/node/da_clients/src/eigen_da.rs index 05d79b730a5..fcdcde9192d 100644 --- a/core/node/da_clients/src/eigen_da.rs +++ b/core/node/da_clients/src/eigen_da.rs @@ -40,7 +40,7 @@ impl EigenDAClient { } } impl EigenDAClient { - pub async fn verify_blob(&self, commitment: String) -> Result { + pub async fn verify_blob(&self, commitment: String) -> Result { let data = &hex::decode(commitment).unwrap()[3..]; let blob_info: BlobInfo = decode(&data).map_err(|e| DAError { From 894ff204edcc19c3b35d3fcf4dbf31cc9072db4b Mon Sep 17 00:00:00 2001 From: Juan Munoz Date: Tue, 8 Oct 2024 11:28:47 -0300 Subject: [PATCH 14/36] update contracts submodule --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 3ec30fa71cb..ece4eb73efd 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 3ec30fa71cb123d2b25ade27a8248d0b320e505f +Subproject commit ece4eb73efd31ce73ead0936a2fa43b0f777d41a From b4282345f87c03d91495ee9975aa4d0d9281e440 Mon Sep 17 00:00:00 2001 From: Juan Munoz Date: Tue, 8 Oct 2024 11:49:14 -0300 Subject: [PATCH 15/36] remove eigen proxy from general setup, add it to doc steps --- docker-compose.yml | 6 ------ eigenda-integration.md | 24 +++++++++++++++++++++++- etc/env/file_based/general.yaml | 12 ++---------- 3 files changed, 25 insertions(+), 17 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index d805ccc844e..0046b0d5db9 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -55,9 +55,3 @@ services: profiles: - runner network_mode: host - - eigenda-proxy: - image: ghcr.io/layr-labs/eigenda-proxy - ports: - - "4242:4242" - command: ./eigenda-proxy --addr 0.0.0.0 --port 4242 --memstore.enabled --eigenda-max-blob-length "2MiB" diff --git a/eigenda-integration.md b/eigenda-integration.md index 7291971b304..a80676deea1 100644 --- a/eigenda-integration.md +++ b/eigenda-integration.md @@ -1,5 +1,27 @@ # Zksync-era <> EigenDA Integration +## Common changes + +Changes needed both for local and mainnet/testnet setup. + +1. Add `da_client` to general config: + +```yaml +da_client: + eigen_da: + api_node_url: http://127.0.0.1:4242 +``` + +2. Add `eigenda-proxy` to the `docker-compose.yml` file: + +```yaml +eigenda-proxy: + image: ghcr.io/layr-labs/eigenda-proxy + ports: + - "4242:4242" + command: ./eigenda-proxy --addr 0.0.0.0 --port 4242 --memstore.enabled --eigenda-max-blob-length "2MiB" +``` + ## Local Setup 1. Install `zk_inception` & `zk_supervisor` @@ -72,7 +94,7 @@ zk_supervisor test integration --chain eigen_da Access Grafana at [http://localhost:3000/](http://localhost:3000/), go to dashboards and select `EigenDA`. -## Holesky Setup +## Mainnet/Testnet setup ### Modify localhost chain id number diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 8f1792e8907..28b421ad915 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -41,7 +41,7 @@ api: estimate_gas_scale_factor: 1.3 estimate_gas_acceptable_overestimation: 5000 max_tx_size: 1000000 - api_namespaces: [ en,eth,net,web3,zks,pubsub,debug ] + api_namespaces: [en, eth, net, web3, zks, pubsub, debug] state_keeper: transaction_slots: 8192 max_allowed_l2_tx_gas_limit: 15000000000 @@ -104,7 +104,7 @@ eth: aggregated_block_execute_deadline: 10 timestamp_criteria_max_allowed_lag: 30 max_eth_tx_data_size: 120000 - aggregated_proof_sizes: [ 1 ] + aggregated_proof_sizes: [1] max_aggregated_tx_gas: 15000000 max_acceptable_priority_fee_in_gwei: 100000000000 pubdata_sending_mode: BLOBS @@ -121,7 +121,6 @@ eth: confirmations_for_eth_event: 0 eth_node_poll_interval: 300 - snapshot_creator: object_store: file_backed: @@ -130,7 +129,6 @@ snapshot_creator: concurrent_queries_count: 25 storage_logs_chunk_size: 1000000 - prover: prover_object_store: file_backed: @@ -289,7 +287,6 @@ prover_job_monitor: witness_job_queuer_run_interval_ms: 10000 http_port: 3074 - base_token_adjuster: price_polling_interval_ms: 30000 price_cache_update_interval_ms: 2000 @@ -301,7 +298,6 @@ external_price_api_client: forced_numerator: 3 forced_denominator: 2 - house_keeper: l1_batch_metrics_reporting_interval_ms: 10000 @@ -375,7 +371,3 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 - -da_client: - eigen_da: - api_node_url: http://127.0.0.1:4242 From 5bea05d7080e52f501ff4c3f8fb605d7e7b83e0b Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 12:15:34 -0300 Subject: [PATCH 16/36] Remove formats (#293) * Remove unneeded formatting * Add script explanations * Remove observability changes --- backup-ecosystem.sh | 3 + core/lib/dal/src/base_token_dal.rs | 8 +- core/lib/dal/src/blocks_web3_dal.rs | 63 +- core/lib/dal/src/consensus_dal.rs | 22 +- core/lib/dal/src/contract_verification_dal.rs | 74 +- core/lib/dal/src/data_availability_dal.rs | 8 +- core/lib/dal/src/eth_sender_dal.rs | 70 +- core/lib/dal/src/eth_watcher_dal.rs | 20 +- core/lib/dal/src/events_dal.rs | 8 +- core/lib/dal/src/events_web3_dal.rs | 45 +- core/lib/dal/src/factory_deps_dal.rs | 6 +- core/lib/dal/src/proof_generation_dal.rs | 8 +- core/lib/dal/src/protocol_versions_dal.rs | 24 +- core/lib/dal/src/pruning_dal/mod.rs | 121 +-- core/lib/dal/src/snapshot_recovery_dal.rs | 26 +- core/lib/dal/src/snapshots_creator_dal.rs | 24 +- core/lib/dal/src/snapshots_dal.rs | 28 +- core/lib/dal/src/storage_logs_dal.rs | 57 +- core/lib/dal/src/storage_logs_dedup_dal.rs | 12 +- core/lib/dal/src/storage_web3_dal.rs | 2 +- core/lib/dal/src/system_dal.rs | 20 +- core/lib/dal/src/tee_proof_generation_dal.rs | 22 +- .../src/tee_verifier_input_producer_dal.rs | 12 +- core/lib/dal/src/tokens_dal.rs | 2 +- core/lib/dal/src/tokens_web3_dal.rs | 4 +- core/lib/dal/src/transactions_dal.rs | 716 +++++++++--------- core/lib/dal/src/transactions_web3_dal.rs | 48 +- core/lib/dal/src/vm_runner_dal.rs | 82 +- infrastructure/zk/src/init.ts | 42 +- .../crates/lib/prover_dal/src/cli_test_dal.rs | 60 +- .../src/fri_gpu_prover_queue_dal.rs | 40 +- .../src/fri_proof_compressor_dal.rs | 46 +- .../src/fri_protocol_versions_dal.rs | 6 +- .../lib/prover_dal/src/fri_prover_dal.rs | 166 ++-- .../src/fri_witness_generator_dal.rs | 276 +++---- restore-ecosystem.sh | 3 + zk_toolbox/crates/common/src/git.rs | 6 - zk_toolbox/crates/config/src/consts.rs | 2 +- .../commands/ecosystem/setup_observability.rs | 2 - 39 files changed, 1117 insertions(+), 1067 deletions(-) diff --git a/backup-ecosystem.sh b/backup-ecosystem.sh index dbdf82e3a38..848884ae2d7 100755 --- a/backup-ecosystem.sh +++ b/backup-ecosystem.sh @@ -1,5 +1,8 @@ #!/bin/bash +# This script backs up the Postgres databases and chain configuration files for a given ecosystem. +# With it you can make a testnet deployment and save the L2 state for later recovery. + # Check if the ecosystem name was provided as an argument if [ -z "$1" ]; then echo "Usage: ./backup-ecosystem ECOSYSTEM_NAME" diff --git a/core/lib/dal/src/base_token_dal.rs b/core/lib/dal/src/base_token_dal.rs index a8bf51d0c60..52389bdcb62 100644 --- a/core/lib/dal/src/base_token_dal.rs +++ b/core/lib/dal/src/base_token_dal.rs @@ -21,11 +21,13 @@ impl BaseTokenDal<'_, '_> { let row = sqlx::query!( r#" INSERT INTO - base_token_ratios (numerator, denominator, ratio_timestamp, created_at, updated_at) + base_token_ratios ( + numerator, denominator, ratio_timestamp, created_at, updated_at + ) VALUES - ($1, $2, $3, NOW(), NOW()) + ($1, $2, $3, NOW(), NOW()) RETURNING - id + id "#, BigDecimal::from_u64(numerator.get()), BigDecimal::from_u64(denominator.get()), diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 3d17a919a07..904e167d1a6 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -52,9 +52,11 @@ impl BlocksWeb3Dal<'_, '_> { transactions.hash AS "tx_hash?" FROM miniblocks - LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1 - LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number - LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number + LEFT JOIN + miniblocks prev_miniblock + ON prev_miniblock.number = miniblocks.number - 1 + LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number + LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number WHERE miniblocks.number = $1 ORDER BY @@ -184,8 +186,10 @@ impl BlocksWeb3Dal<'_, '_> { transactions.refunded_gas AS "transaction_refunded_gas?" FROM miniblocks - LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1 - LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number + LEFT JOIN + miniblocks prev_miniblock + ON prev_miniblock.number = miniblocks.number - 1 + LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number WHERE miniblocks.number > $1 ORDER BY @@ -559,7 +563,7 @@ impl BlocksWeb3Dal<'_, '_> { call_trace FROM call_traces - INNER JOIN transactions ON tx_hash = transactions.hash + INNER JOIN transactions ON tx_hash = transactions.hash WHERE transactions.miniblock_number = $1 ORDER BY @@ -673,16 +677,19 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.fee_account_address FROM miniblocks - LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number - LEFT JOIN eth_txs_history AS commit_tx ON ( + LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number + LEFT JOIN eth_txs_history AS commit_tx + ON ( l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL ) - LEFT JOIN eth_txs_history AS prove_tx ON ( + LEFT JOIN eth_txs_history AS prove_tx + ON ( l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL ) - LEFT JOIN eth_txs_history AS execute_tx ON ( + LEFT JOIN eth_txs_history AS execute_tx + ON ( l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL ) @@ -708,18 +715,19 @@ impl BlocksWeb3Dal<'_, '_> { StorageL1BatchDetails, r#" WITH - mb AS ( - SELECT - l1_gas_price, - l2_fair_gas_price, - fair_pubdata_price - FROM - miniblocks - WHERE - l1_batch_number = $1 - LIMIT - 1 - ) + mb AS ( + SELECT + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price + FROM + miniblocks + WHERE + l1_batch_number = $1 + LIMIT + 1 + ) + SELECT l1_batches.number, l1_batches.timestamp, @@ -739,16 +747,19 @@ impl BlocksWeb3Dal<'_, '_> { l1_batches.default_aa_code_hash FROM l1_batches - INNER JOIN mb ON TRUE - LEFT JOIN eth_txs_history AS commit_tx ON ( + INNER JOIN mb ON TRUE + LEFT JOIN eth_txs_history AS commit_tx + ON ( l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL ) - LEFT JOIN eth_txs_history AS prove_tx ON ( + LEFT JOIN eth_txs_history AS prove_tx + ON ( l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL ) - LEFT JOIN eth_txs_history AS execute_tx ON ( + LEFT JOIN eth_txs_history AS execute_tx + ON ( l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL ) diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index f2e499ce562..dd976f22086 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -165,9 +165,9 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - consensus_replica_state (fake_key, global_config, genesis, state) + consensus_replica_state (fake_key, global_config, genesis, state) VALUES - (TRUE, $1, $2, $3) + (TRUE, $1, $2, $3) "#, global_config, genesis, @@ -479,9 +479,9 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - miniblocks_consensus (number, certificate) + miniblocks_consensus (number, certificate) VALUES - ($1, $2) + ($1, $2) "#, i64::try_from(header.number.0).context("overflow")?, zksync_protobuf::serde::Serialize @@ -507,15 +507,15 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - l1_batches_consensus_committees (l1_batch_number, attesters, updated_at) + l1_batches_consensus_committees (l1_batch_number, attesters, updated_at) VALUES - ($1, $2, NOW()) + ($1, $2, NOW()) ON CONFLICT (l1_batch_number) DO UPDATE SET - l1_batch_number = $1, - attesters = $2, - updated_at = NOW() + l1_batch_number = $1, + attesters = $2, + updated_at = NOW() "#, i64::try_from(number.0).context("overflow")?, committee @@ -582,9 +582,9 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at) + l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at) VALUES - ($1, $2, NOW(), NOW()) + ($1, $2, NOW(), NOW()) "#, i64::try_from(cert.message.number.0).context("overflow")?, // Unwrap is ok, because serialization should always succeed. diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index 97af7880d16..291e60a50d9 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -67,25 +67,25 @@ impl ContractVerificationDal<'_, '_> { sqlx::query!( r#" INSERT INTO - contract_verification_requests ( - contract_address, - source_code, - contract_name, - zk_compiler_version, - compiler_version, - optimization_used, - optimizer_mode, - constructor_arguments, - is_system, - force_evmla, - status, - created_at, - updated_at - ) + contract_verification_requests ( + contract_address, + source_code, + contract_name, + zk_compiler_version, + compiler_version, + optimization_used, + optimizer_mode, + constructor_arguments, + is_system, + force_evmla, + status, + created_at, + updated_at + ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, 'queued', NOW(), NOW()) + ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, 'queued', NOW(), NOW()) RETURNING - id + id "#, query.contract_address.as_bytes(), // Serialization should always succeed. @@ -143,20 +143,20 @@ impl ContractVerificationDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - id, - contract_address, - source_code, - contract_name, - zk_compiler_version, - compiler_version, - optimization_used, - optimizer_mode, - constructor_arguments, - is_system, - force_evmla + id, + contract_address, + source_code, + contract_name, + zk_compiler_version, + compiler_version, + optimization_used, + optimizer_mode, + constructor_arguments, + is_system, + force_evmla "#, &processing_timeout ) @@ -198,13 +198,13 @@ impl ContractVerificationDal<'_, '_> { sqlx::query!( r#" INSERT INTO - contracts_verification_info (address, verification_info) + contracts_verification_info (address, verification_info) VALUES - ($1, $2) + ($1, $2) ON CONFLICT (address) DO UPDATE SET - verification_info = $2 + verification_info = $2 "#, address.as_bytes(), &verification_info_json @@ -313,8 +313,8 @@ impl ContractVerificationDal<'_, '_> { LIMIT 1 ) deploy_event - JOIN factory_deps ON factory_deps.bytecode_hash = deploy_event.topic3 - LEFT JOIN transactions ON transactions.hash = deploy_event.tx_hash + JOIN factory_deps ON factory_deps.bytecode_hash = deploy_event.topic3 + LEFT JOIN transactions ON transactions.hash = deploy_event.tx_hash WHERE deploy_event.miniblock_number <= ( SELECT @@ -433,15 +433,15 @@ impl ContractVerificationDal<'_, '_> { sqlx::query!( r#" INSERT INTO - compiler_versions (VERSION, compiler, created_at, updated_at) + compiler_versions (version, compiler, created_at, updated_at) SELECT u.version, $2, NOW(), NOW() FROM - UNNEST($1::TEXT[]) AS u (VERSION) - ON CONFLICT (VERSION, compiler) DO NOTHING + UNNEST($1::TEXT []) AS u (version) + ON CONFLICT (version, compiler) DO NOTHING "#, &versions, &compiler, diff --git a/core/lib/dal/src/data_availability_dal.rs b/core/lib/dal/src/data_availability_dal.rs index 24048ec4fa1..41dd7efe273 100644 --- a/core/lib/dal/src/data_availability_dal.rs +++ b/core/lib/dal/src/data_availability_dal.rs @@ -28,9 +28,9 @@ impl DataAvailabilityDal<'_, '_> { let update_result = sqlx::query!( r#" INSERT INTO - data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at) + data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at) VALUES - ($1, $2, $3, NOW(), NOW()) + ($1, $2, $3, NOW(), NOW()) ON CONFLICT DO NOTHING "#, i64::from(number.0), @@ -187,7 +187,9 @@ impl DataAvailabilityDal<'_, '_> { pubdata_input FROM l1_batches - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NULL AND number != 0 diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index a66a513175b..4ce76547ac9 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -43,7 +43,7 @@ impl EthSenderDal<'_, '_> { COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history - JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id + JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id WHERE eth_txs_history.sent_at_block IS NOT NULL AND eth_txs.from_addr IS NOT DISTINCT FROM $1 @@ -174,7 +174,7 @@ impl EthSenderDal<'_, '_> { COALESCE(MAX(eth_tx_id), 0) FROM eth_txs_history - JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id + JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id WHERE eth_txs_history.sent_at_block IS NOT NULL AND eth_txs.from_addr IS NOT DISTINCT FROM $2 @@ -208,7 +208,7 @@ impl EthSenderDal<'_, '_> { eth_txs.nonce FROM eth_txs_history - JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id + JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id WHERE eth_txs_history.sent_at_block IS NULL AND eth_txs.confirmed_eth_tx_history_id IS NULL @@ -238,22 +238,22 @@ impl EthSenderDal<'_, '_> { StorageEthTx, r#" INSERT INTO - eth_txs ( - raw_tx, - nonce, - tx_type, - contract_address, - predicted_gas_cost, - created_at, - updated_at, - from_addr, - blob_sidecar, - is_gateway - ) + eth_txs ( + raw_tx, + nonce, + tx_type, + contract_address, + predicted_gas_cost, + created_at, + updated_at, + from_addr, + blob_sidecar, + is_gateway + ) VALUES - ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, $8) + ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, $8) RETURNING - * + * "#, raw_tx, nonce as i64, @@ -290,23 +290,23 @@ impl EthSenderDal<'_, '_> { Ok(sqlx::query!( r#" INSERT INTO - eth_txs_history ( - eth_tx_id, - base_fee_per_gas, - priority_fee_per_gas, - tx_hash, - signed_raw_tx, - created_at, - updated_at, - blob_base_fee_per_gas, - sent_at_block, - sent_at - ) + eth_txs_history ( + eth_tx_id, + base_fee_per_gas, + priority_fee_per_gas, + tx_hash, + signed_raw_tx, + created_at, + updated_at, + blob_base_fee_per_gas, + sent_at_block, + sent_at + ) VALUES - ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, NOW()) + ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, NOW()) ON CONFLICT (tx_hash) DO NOTHING RETURNING - id + id "#, eth_tx_id as i32, base_fee_per_gas, @@ -376,8 +376,8 @@ impl EthSenderDal<'_, '_> { WHERE tx_hash = $1 RETURNING - id, - eth_tx_id + id, + eth_tx_id "#, tx_hash, ) @@ -551,7 +551,7 @@ impl EthSenderDal<'_, '_> { eth_txs.blob_sidecar FROM eth_txs_history - LEFT JOIN eth_txs ON eth_tx_id = eth_txs.id + LEFT JOIN eth_txs ON eth_tx_id = eth_txs.id WHERE eth_tx_id = $1 ORDER BY @@ -602,7 +602,7 @@ impl EthSenderDal<'_, '_> { eth_txs.blob_sidecar FROM eth_txs_history - LEFT JOIN eth_txs ON eth_tx_id = eth_txs.id + LEFT JOIN eth_txs ON eth_tx_id = eth_txs.id WHERE eth_tx_id = $1 ORDER BY diff --git a/core/lib/dal/src/eth_watcher_dal.rs b/core/lib/dal/src/eth_watcher_dal.rs index 3220868decf..bdfc7f24c7b 100644 --- a/core/lib/dal/src/eth_watcher_dal.rs +++ b/core/lib/dal/src/eth_watcher_dal.rs @@ -30,8 +30,8 @@ impl EthWatcherDal<'_, '_> { FROM processed_events WHERE - TYPE = $1 - AND chain_id = $2 + type = $1 + AND chain_id = $2 "#, event_type as EventType, chain_id.0 as i64 @@ -48,13 +48,13 @@ impl EthWatcherDal<'_, '_> { sqlx::query!( r#" INSERT INTO - processed_events ( - TYPE, - chain_id, - next_block_to_process - ) + processed_events ( + type, + chain_id, + next_block_to_process + ) VALUES - ($1, $2, $3) + ($1, $2, $3) "#, event_type as EventType, chain_id.0 as i64, @@ -82,8 +82,8 @@ impl EthWatcherDal<'_, '_> { SET next_block_to_process = $3 WHERE - TYPE = $1 - AND chain_id = $2 + type = $1 + AND chain_id = $2 "#, event_type as EventType, chain_id.0 as i64, diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 4050acf7135..487871e3be7 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -217,17 +217,17 @@ impl EventsDal<'_, '_> { topic4, value, NULL::bytea AS "block_hash", - NULL::BIGINT AS "l1_batch_number?", + NULL::bigint AS "l1_batch_number?", miniblock_number, tx_hash, tx_index_in_block, event_index_in_block, event_index_in_tx, - NULL::BIGINT AS "block_timestamp?" + NULL::bigint AS "block_timestamp?" FROM events WHERE - tx_hash = ANY ($1) + tx_hash = ANY($1) ORDER BY miniblock_number ASC, event_index_in_block ASC @@ -319,7 +319,7 @@ impl EventsDal<'_, '_> { FROM l2_to_l1_logs WHERE - tx_hash = ANY ($1) + tx_hash = ANY($1) ORDER BY tx_index_in_l1_batch ASC, log_index_in_tx ASC diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index fc21cc36460..8b7eb96b714 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -189,27 +189,28 @@ impl EventsWeb3Dal<'_, '_> { StorageWeb3Log, r#" WITH - events_select AS ( - SELECT - address, - topic1, - topic2, - topic3, - topic4, - value, - miniblock_number, - tx_hash, - tx_index_in_block, - event_index_in_block, - event_index_in_tx - FROM - events - WHERE - miniblock_number > $1 - ORDER BY - miniblock_number ASC, - event_index_in_block ASC - ) + events_select AS ( + SELECT + address, + topic1, + topic2, + topic3, + topic4, + value, + miniblock_number, + tx_hash, + tx_index_in_block, + event_index_in_block, + event_index_in_tx + FROM + events + WHERE + miniblock_number > $1 + ORDER BY + miniblock_number ASC, + event_index_in_block ASC + ) + SELECT miniblocks.hash AS "block_hash?", address AS "address!", @@ -227,7 +228,7 @@ impl EventsWeb3Dal<'_, '_> { miniblocks.timestamp AS "block_timestamp" FROM events_select - INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number + INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number ORDER BY miniblock_number ASC, event_index_in_block ASC diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 02ce32306cf..36dfaa1a466 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -31,7 +31,7 @@ impl FactoryDepsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - factory_deps (bytecode_hash, bytecode, miniblock_number, created_at, updated_at) + factory_deps (bytecode_hash, bytecode, miniblock_number, created_at, updated_at) SELECT u.bytecode_hash, u.bytecode, @@ -39,7 +39,7 @@ impl FactoryDepsDal<'_, '_> { NOW(), NOW() FROM - UNNEST($1::bytea[], $2::bytea[]) AS u (bytecode_hash, bytecode) + UNNEST($1::bytea [], $2::bytea []) AS u (bytecode_hash, bytecode) ON CONFLICT (bytecode_hash) DO NOTHING "#, &bytecode_hashes as &[&[u8]], @@ -136,7 +136,7 @@ impl FactoryDepsDal<'_, '_> { FROM factory_deps WHERE - bytecode_hash = ANY ($1) + bytecode_hash = ANY($1) "#, &hashes_as_bytes as &[&[u8]], ) diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index dada6c69ed3..7943a930277 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -55,7 +55,7 @@ impl ProofGenerationDal<'_, '_> { l1_batch_number FROM proof_generation_details - LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number + LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number WHERE ( vm_run_data_blob_url IS NOT NULL @@ -75,7 +75,7 @@ impl ProofGenerationDal<'_, '_> { 1 ) RETURNING - proof_generation_details.l1_batch_number + proof_generation_details.l1_batch_number "#, &processing_timeout, ) @@ -254,9 +254,9 @@ impl ProofGenerationDal<'_, '_> { let result = sqlx::query!( r#" INSERT INTO - proof_generation_details (l1_batch_number, status, created_at, updated_at) + proof_generation_details (l1_batch_number, status, created_at, updated_at) VALUES - ($1, 'unpicked', NOW(), NOW()) + ($1, 'unpicked', NOW(), NOW()) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(l1_batch_number.0), diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 8cb5094fd49..72ae811ce76 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -40,16 +40,16 @@ impl ProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - protocol_versions ( - id, - timestamp, - bootloader_code_hash, - default_account_code_hash, - upgrade_tx_hash, - created_at - ) + protocol_versions ( + id, + timestamp, + bootloader_code_hash, + default_account_code_hash, + upgrade_tx_hash, + created_at + ) VALUES - ($1, $2, $3, $4, $5, NOW()) + ($1, $2, $3, $4, $5, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, @@ -71,9 +71,9 @@ impl ProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at) + protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at) VALUES - ($1, $2, $3, NOW()) + ($1, $2, $3, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, @@ -236,7 +236,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_patches.snark_wrapper_vk_hash FROM protocol_versions - JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id + JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id WHERE id = $1 ORDER BY diff --git a/core/lib/dal/src/pruning_dal/mod.rs b/core/lib/dal/src/pruning_dal/mod.rs index 0d1584ebba3..bcd9fdcfc3e 100644 --- a/core/lib/dal/src/pruning_dal/mod.rs +++ b/core/lib/dal/src/pruning_dal/mod.rs @@ -45,32 +45,34 @@ impl PruningDal<'_, '_> { let pruning_info = sqlx::query!( r#" WITH - soft AS ( - SELECT - pruned_l1_batch, - pruned_miniblock - FROM - pruning_log - WHERE - TYPE = 'Soft' - ORDER BY - pruned_l1_batch DESC - LIMIT - 1 - ), - hard AS ( - SELECT - pruned_l1_batch, - pruned_miniblock - FROM - pruning_log - WHERE - TYPE = 'Hard' - ORDER BY - pruned_l1_batch DESC - LIMIT - 1 - ) + soft AS ( + SELECT + pruned_l1_batch, + pruned_miniblock + FROM + pruning_log + WHERE + type = 'Soft' + ORDER BY + pruned_l1_batch DESC + LIMIT + 1 + ), + + hard AS ( + SELECT + pruned_l1_batch, + pruned_miniblock + FROM + pruning_log + WHERE + type = 'Hard' + ORDER BY + pruned_l1_batch DESC + LIMIT + 1 + ) + SELECT soft.pruned_l1_batch AS last_soft_pruned_l1_batch, soft.pruned_miniblock AS last_soft_pruned_miniblock, @@ -78,7 +80,7 @@ impl PruningDal<'_, '_> { hard.pruned_miniblock AS last_hard_pruned_miniblock FROM soft - FULL JOIN hard ON TRUE + FULL JOIN hard ON TRUE "# ) .map(|row| PruningInfo { @@ -110,15 +112,15 @@ impl PruningDal<'_, '_> { sqlx::query!( r#" INSERT INTO - pruning_log ( - pruned_l1_batch, - pruned_miniblock, - TYPE, - created_at, - updated_at - ) + pruning_log ( + pruned_l1_batch, + pruned_miniblock, + type, + created_at, + updated_at + ) VALUES - ($1, $2, $3, NOW(), NOW()) + ($1, $2, $3, NOW(), NOW()) "#, i64::from(last_l1_batch_to_prune.0), i64::from(last_l2_block_to_prune.0), @@ -319,25 +321,28 @@ impl PruningDal<'_, '_> { let execution_result = sqlx::query!( r#" WITH - new_logs AS MATERIALIZED ( - SELECT DISTINCT - ON (hashed_key) hashed_key, - miniblock_number, - operation_number - FROM - storage_logs - WHERE - miniblock_number BETWEEN $1 AND $2 - ORDER BY - hashed_key, - miniblock_number DESC, - operation_number DESC - ) + new_logs AS MATERIALIZED ( + SELECT DISTINCT + ON (hashed_key) + hashed_key, + miniblock_number, + operation_number + FROM + storage_logs + WHERE + miniblock_number BETWEEN $1 AND $2 + ORDER BY + hashed_key, + miniblock_number DESC, + operation_number DESC + ) + DELETE FROM storage_logs USING new_logs WHERE storage_logs.hashed_key = new_logs.hashed_key AND storage_logs.miniblock_number <= $2 - AND (storage_logs.miniblock_number, storage_logs.operation_number) < (new_logs.miniblock_number, new_logs.operation_number) + AND (storage_logs.miniblock_number, storage_logs.operation_number) + < (new_logs.miniblock_number, new_logs.operation_number) "#, i64::from(l2_blocks_to_prune.start().0), i64::from(l2_blocks_to_prune.end().0) @@ -392,15 +397,15 @@ impl PruningDal<'_, '_> { sqlx::query!( r#" INSERT INTO - pruning_log ( - pruned_l1_batch, - pruned_miniblock, - TYPE, - created_at, - updated_at - ) + pruning_log ( + pruned_l1_batch, + pruned_miniblock, + type, + created_at, + updated_at + ) VALUES - ($1, $2, $3, NOW(), NOW()) + ($1, $2, $3, NOW(), NOW()) "#, i64::from(last_l1_batch_to_prune.0), i64::from(last_l2_block_to_prune.0), diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs index 0b0c0e1735f..72cde14b9c5 100644 --- a/core/lib/dal/src/snapshot_recovery_dal.rs +++ b/core/lib/dal/src/snapshot_recovery_dal.rs @@ -18,20 +18,20 @@ impl SnapshotRecoveryDal<'_, '_> { sqlx::query!( r#" INSERT INTO - snapshot_recovery ( - l1_batch_number, - l1_batch_timestamp, - l1_batch_root_hash, - miniblock_number, - miniblock_timestamp, - miniblock_hash, - protocol_version, - storage_logs_chunks_processed, - updated_at, - created_at - ) + snapshot_recovery ( + l1_batch_number, + l1_batch_timestamp, + l1_batch_root_hash, + miniblock_number, + miniblock_timestamp, + miniblock_hash, + protocol_version, + storage_logs_chunks_processed, + updated_at, + created_at + ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW()) + ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW()) "#, i64::from(status.l1_batch_number.0), status.l1_batch_timestamp as i64, diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs index 700e3812018..fdfc2ccf469 100644 --- a/core/lib/dal/src/snapshots_creator_dal.rs +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -64,7 +64,7 @@ impl SnapshotsCreatorDal<'_, '_> { ( SELECT hashed_key, - MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op + MAX(ARRAY[miniblock_number, operation_number]::INT []) AS op FROM storage_logs WHERE @@ -76,10 +76,12 @@ impl SnapshotsCreatorDal<'_, '_> { ORDER BY hashed_key ) AS keys - INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key - AND storage_logs.miniblock_number = keys.op[1] - AND storage_logs.operation_number = keys.op[2] - INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key + INNER JOIN storage_logs + ON + keys.hashed_key = storage_logs.hashed_key + AND storage_logs.miniblock_number = keys.op[1] + AND storage_logs.operation_number = keys.op[2] + INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key WHERE initial_writes.l1_batch_number <= $2 "#, @@ -130,7 +132,7 @@ impl SnapshotsCreatorDal<'_, '_> { ( SELECT hashed_key, - MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op + MAX(ARRAY[miniblock_number, operation_number]::INT []) AS op FROM storage_logs WHERE @@ -142,10 +144,12 @@ impl SnapshotsCreatorDal<'_, '_> { ORDER BY hashed_key ) AS keys - INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key - AND storage_logs.miniblock_number = keys.op[1] - AND storage_logs.operation_number = keys.op[2] - INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key + INNER JOIN storage_logs + ON + keys.hashed_key = storage_logs.hashed_key + AND storage_logs.miniblock_number = keys.op[1] + AND storage_logs.operation_number = keys.op[2] + INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key WHERE initial_writes.l1_batch_number <= $2 "#, diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs index 950fb6501a4..5ddad933aad 100644 --- a/core/lib/dal/src/snapshots_dal.rs +++ b/core/lib/dal/src/snapshots_dal.rs @@ -54,16 +54,16 @@ impl SnapshotsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - snapshots ( - VERSION, - l1_batch_number, - storage_logs_filepaths, - factory_deps_filepath, - created_at, - updated_at - ) + snapshots ( + version, + l1_batch_number, + storage_logs_filepaths, + factory_deps_filepath, + created_at, + updated_at + ) VALUES - ($1, $2, ARRAY_FILL(''::TEXT, ARRAY[$3::INTEGER]), $4, NOW(), NOW()) + ($1, $2, ARRAY_FILL(''::TEXT, ARRAY[$3::INTEGER]), $4, NOW(), NOW()) "#, version as i32, l1_batch_number.0 as i32, @@ -115,7 +115,7 @@ impl SnapshotsDal<'_, '_> { FROM snapshots WHERE - NOT (''::TEXT = ANY (storage_logs_filepaths)) + NOT (''::TEXT = ANY(storage_logs_filepaths)) ORDER BY l1_batch_number DESC "# @@ -198,10 +198,10 @@ impl SnapshotsDal<'_, '_> { WHERE l1_batch_number > $1 RETURNING - VERSION, - l1_batch_number, - factory_deps_filepath, - storage_logs_filepaths + version, + l1_batch_number, + factory_deps_filepath, + storage_logs_filepaths "#, last_retained_l1_batch_number.0 as i32 ) diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index 50f41f31333..adad6eb7e1d 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -186,7 +186,8 @@ impl StorageLogsDal<'_, '_> { ) -> DalResult> { let rows = sqlx::query!( r#" - SELECT DISTINCT + SELECT + DISTINCT hashed_key FROM storage_logs @@ -290,13 +291,14 @@ impl StorageLogsDal<'_, '_> { let rows = sqlx::query!( r#" SELECT DISTINCT - ON (hashed_key) hashed_key, + ON (hashed_key) + hashed_key, miniblock_number, value FROM storage_logs WHERE - hashed_key = ANY ($1) + hashed_key = ANY($1) AND miniblock_number <= $2 AND miniblock_number <= COALESCE( ( @@ -539,11 +541,11 @@ impl StorageLogsDal<'_, '_> { SELECT hashed_key, l1_batch_number, - INDEX + index FROM initial_writes WHERE - hashed_key = ANY ($1::bytea[]) + hashed_key = ANY($1::bytea []) "#, &hashed_keys as &[&[u8]], ) @@ -621,7 +623,7 @@ impl StorageLogsDal<'_, '_> { 1 ) AS "value?" FROM - UNNEST($1::bytea[]) AS u (hashed_key) + UNNEST($1::bytea []) AS u (hashed_key) "#, &hashed_keys as &[&[u8]], i64::from(l2_block_number.0) @@ -717,32 +719,33 @@ impl StorageLogsDal<'_, '_> { let rows = sqlx::query!( r#" WITH - sl AS ( - SELECT - ( - SELECT - ARRAY[hashed_key, value] AS kv - FROM - storage_logs - WHERE - storage_logs.miniblock_number = $1 - AND storage_logs.hashed_key >= u.start_key - AND storage_logs.hashed_key <= u.end_key - ORDER BY - storage_logs.hashed_key - LIMIT - 1 - ) - FROM - UNNEST($2::bytea[], $3::bytea[]) AS u (start_key, end_key) - ) + sl AS ( + SELECT + ( + SELECT + ARRAY[hashed_key, value] AS kv + FROM + storage_logs + WHERE + storage_logs.miniblock_number = $1 + AND storage_logs.hashed_key >= u.start_key + AND storage_logs.hashed_key <= u.end_key + ORDER BY + storage_logs.hashed_key + LIMIT + 1 + ) + FROM + UNNEST($2::bytea [], $3::bytea []) AS u (start_key, end_key) + ) + SELECT sl.kv[1] AS "hashed_key?", sl.kv[2] AS "value?", initial_writes.index FROM sl - LEFT OUTER JOIN initial_writes ON initial_writes.hashed_key = sl.kv[1] + LEFT OUTER JOIN initial_writes ON initial_writes.hashed_key = sl.kv[1] "#, i64::from(l2_block_number.0), &start_keys as &[&[u8]], @@ -779,7 +782,7 @@ impl StorageLogsDal<'_, '_> { initial_writes.index FROM storage_logs - INNER JOIN initial_writes ON storage_logs.hashed_key = initial_writes.hashed_key + INNER JOIN initial_writes ON storage_logs.hashed_key = initial_writes.hashed_key WHERE storage_logs.miniblock_number = $1 AND storage_logs.hashed_key >= $2::bytea diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index f02ac6c4cf4..a94c237bc5b 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -93,7 +93,7 @@ impl StorageLogsDedupDal<'_, '_> { sqlx::query!( r#" INSERT INTO - initial_writes (hashed_key, INDEX, l1_batch_number, created_at, updated_at) + initial_writes (hashed_key, index, l1_batch_number, created_at, updated_at) SELECT u.hashed_key, u.index, @@ -101,7 +101,7 @@ impl StorageLogsDedupDal<'_, '_> { NOW(), NOW() FROM - UNNEST($1::bytea[], $2::BIGINT[]) AS u (hashed_key, INDEX) + UNNEST($1::bytea [], $2::bigint []) AS u (hashed_key, index) "#, &hashed_keys as &[&[u8]], &indices, @@ -202,13 +202,13 @@ impl StorageLogsDedupDal<'_, '_> { r#" SELECT hashed_key, - INDEX + index FROM initial_writes WHERE l1_batch_number = $1 ORDER BY - INDEX + index "#, i64::from(l1_batch_number.0) ) @@ -279,7 +279,7 @@ impl StorageLogsDedupDal<'_, '_> { FROM initial_writes WHERE - hashed_key = ANY ($1) + hashed_key = ANY($1) "#, &hashed_keys as &[&[u8]], ) @@ -299,7 +299,7 @@ impl StorageLogsDedupDal<'_, '_> { SELECT hashed_key, l1_batch_number, - INDEX + index FROM initial_writes "# diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index f54ac766ee8..f3a20ac39fa 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -253,7 +253,7 @@ impl StorageWeb3Dal<'_, '_> { LIMIT 1 ) t - JOIN factory_deps ON value = factory_deps.bytecode_hash + JOIN factory_deps ON value = factory_deps.bytecode_hash WHERE value != $3 "#, diff --git a/core/lib/dal/src/system_dal.rs b/core/lib/dal/src/system_dal.rs index 9ef375a8d56..105665fa2ec 100644 --- a/core/lib/dal/src/system_dal.rs +++ b/core/lib/dal/src/system_dal.rs @@ -30,8 +30,8 @@ impl SystemDal<'_, '_> { EXTRACT( seconds FROM - NOW() - PG_LAST_XACT_REPLAY_TIMESTAMP() - )::INT AS LAG + NOW() - PG_LAST_XACT_REPLAY_TIMESTAMP() + )::INT AS lag "# ) .instrument("get_replication_lag") @@ -49,10 +49,18 @@ impl SystemDal<'_, '_> { r#" SELECT table_name, - PG_TABLE_SIZE(('public.' || QUOTE_IDENT(table_name))::regclass) AS table_size, - PG_INDEXES_SIZE(('public.' || QUOTE_IDENT(table_name))::regclass) AS indexes_size, - PG_RELATION_SIZE(('public.' || QUOTE_IDENT(table_name))::regclass) AS relation_size, - PG_TOTAL_RELATION_SIZE(('public.' || QUOTE_IDENT(table_name))::regclass) AS total_size + PG_TABLE_SIZE( + ('public.' || QUOTE_IDENT(table_name))::regclass + ) AS table_size, + PG_INDEXES_SIZE( + ('public.' || QUOTE_IDENT(table_name))::regclass + ) AS indexes_size, + PG_RELATION_SIZE( + ('public.' || QUOTE_IDENT(table_name))::regclass + ) AS relation_size, + PG_TOTAL_RELATION_SIZE( + ('public.' || QUOTE_IDENT(table_name))::regclass + ) AS total_size FROM information_schema.tables WHERE diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index cc6b87a07ac..db56b9d0e3e 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -53,7 +53,9 @@ impl TeeProofGenerationDal<'_, '_> { proofs.l1_batch_number FROM tee_proof_generation_details AS proofs - JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number + JOIN + tee_verifier_input_producer_jobs AS inputs + ON proofs.l1_batch_number = inputs.l1_batch_number WHERE inputs.status = $3 AND ( @@ -69,10 +71,10 @@ impl TeeProofGenerationDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - tee_proof_generation_details.l1_batch_number + tee_proof_generation_details.l1_batch_number "#, TeeProofGenerationJobStatus::PickedByProver.to_string(), tee_type.to_string(), @@ -183,9 +185,11 @@ impl TeeProofGenerationDal<'_, '_> { let query = sqlx::query!( r#" INSERT INTO - tee_proof_generation_details (l1_batch_number, tee_type, status, created_at, updated_at) + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at + ) VALUES - ($1, $2, $3, NOW(), NOW()) + ($1, $2, $3, NOW(), NOW()) ON CONFLICT (l1_batch_number, tee_type) DO NOTHING "#, batch_number, @@ -208,9 +212,9 @@ impl TeeProofGenerationDal<'_, '_> { let query = sqlx::query!( r#" INSERT INTO - tee_attestations (pubkey, attestation) + tee_attestations (pubkey, attestation) VALUES - ($1, $2) + ($1, $2) ON CONFLICT (pubkey) DO NOTHING "#, pubkey, @@ -274,7 +278,9 @@ impl TeeProofGenerationDal<'_, '_> { proofs.l1_batch_number FROM tee_proof_generation_details AS proofs - JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number + JOIN + tee_verifier_input_producer_jobs AS inputs + ON proofs.l1_batch_number = inputs.l1_batch_number WHERE inputs.status = $1 AND proofs.status = $2 diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs index 4a178fd5225..dddb451a2d7 100644 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ b/core/lib/dal/src/tee_verifier_input_producer_dal.rs @@ -55,9 +55,11 @@ impl TeeVerifierInputProducerDal<'_, '_> { sqlx::query!( r#" INSERT INTO - tee_verifier_input_producer_jobs (l1_batch_number, status, created_at, updated_at) + tee_verifier_input_producer_jobs ( + l1_batch_number, status, created_at, updated_at + ) VALUES - ($1, $2, NOW(), NOW()) + ($1, $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(l1_batch_number.0), @@ -104,10 +106,10 @@ impl TeeVerifierInputProducerDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - tee_verifier_input_producer_jobs.l1_batch_number + tee_verifier_input_producer_jobs.l1_batch_number "#, TeeVerifierInputProducerJobStatus::InProgress as TeeVerifierInputProducerJobStatus, TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, @@ -197,7 +199,7 @@ impl TeeVerifierInputProducerDal<'_, '_> { l1_batch_number = $2 AND status != $5 RETURNING - tee_verifier_input_producer_jobs.attempts + tee_verifier_input_producer_jobs.attempts "#, TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, i64::from(l1_batch_number.0), diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index b34b913c45c..218e152fa82 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -110,7 +110,7 @@ impl TokensDal<'_, '_> { r#" DELETE FROM tokens WHERE - l2_address = ANY ($1) + l2_address = ANY($1) "#, &token_addresses_to_be_removed as &[_] ) diff --git a/core/lib/dal/src/tokens_web3_dal.rs b/core/lib/dal/src/tokens_web3_dal.rs index 00c7a69385d..794de18163e 100644 --- a/core/lib/dal/src/tokens_web3_dal.rs +++ b/core/lib/dal/src/tokens_web3_dal.rs @@ -43,7 +43,7 @@ impl TokensWeb3Dal<'_, '_> { SELECT l1_address, l2_address, - NAME, + name, symbol, decimals FROM @@ -95,7 +95,7 @@ impl TokensWeb3Dal<'_, '_> { SELECT l1_address, l2_address, - NAME, + name, symbol, decimals FROM diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 9f63427491a..67c965312bd 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -88,53 +88,53 @@ impl TransactionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - transactions ( - hash, - is_priority, - initiator_address, - gas_limit, - max_fee_per_gas, - gas_per_pubdata_limit, - data, - priority_op_id, - full_fee, - layer_2_tip_fee, - contract_address, - l1_block_number, - value, - paymaster, - paymaster_input, - tx_format, - l1_tx_mint, - l1_tx_refund_recipient, - received_at, - created_at, - updated_at - ) + transactions ( + hash, + is_priority, + initiator_address, + gas_limit, + max_fee_per_gas, + gas_per_pubdata_limit, + data, + priority_op_id, + full_fee, + layer_2_tip_fee, + contract_address, + l1_block_number, + value, + paymaster, + paymaster_input, + tx_format, + l1_tx_mint, + l1_tx_refund_recipient, + received_at, + created_at, + updated_at + ) VALUES - ( - $1, - TRUE, - $2, - $3, - $4, - $5, - $6, - $7, - $8, - $9, - $10, - $11, - $12, - $13, - $14, - $15, - $16, - $17, - $18, - NOW(), - NOW() - ) + ( + $1, + TRUE, + $2, + $3, + $4, + $5, + $6, + $7, + $8, + $9, + $10, + $11, + $12, + $13, + $14, + $15, + $16, + $17, + $18, + NOW(), + NOW() + ) ON CONFLICT (hash) DO NOTHING "#, tx_hash_bytes, @@ -190,49 +190,49 @@ impl TransactionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - transactions ( - hash, - is_priority, - initiator_address, - gas_limit, - max_fee_per_gas, - gas_per_pubdata_limit, - data, - upgrade_id, - contract_address, - l1_block_number, - value, - paymaster, - paymaster_input, - tx_format, - l1_tx_mint, - l1_tx_refund_recipient, - received_at, - created_at, - updated_at - ) + transactions ( + hash, + is_priority, + initiator_address, + gas_limit, + max_fee_per_gas, + gas_per_pubdata_limit, + data, + upgrade_id, + contract_address, + l1_block_number, + value, + paymaster, + paymaster_input, + tx_format, + l1_tx_mint, + l1_tx_refund_recipient, + received_at, + created_at, + updated_at + ) VALUES - ( - $1, - TRUE, - $2, - $3, - $4, - $5, - $6, - $7, - $8, - $9, - $10, - $11, - $12, - $13, - $14, - $15, - $16, - NOW(), - NOW() - ) + ( + $1, + TRUE, + $2, + $3, + $4, + $5, + $6, + $7, + $8, + $9, + $10, + $11, + $12, + $13, + $14, + $15, + $16, + NOW(), + NOW() + ) ON CONFLICT (hash) DO NOTHING "#, tx_hash, @@ -326,86 +326,101 @@ impl TransactionsDal<'_, '_> { let query_result = sqlx::query!( r#" INSERT INTO - transactions ( - hash, - is_priority, - initiator_address, - nonce, - signature, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - gas_per_pubdata_limit, - input, - data, - tx_format, - contract_address, - value, - paymaster, - paymaster_input, - execution_info, - received_at, - created_at, - updated_at - ) + transactions ( + hash, + is_priority, + initiator_address, + nonce, + signature, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + gas_per_pubdata_limit, + input, + data, + tx_format, + contract_address, + value, + paymaster, + paymaster_input, + execution_info, + received_at, + created_at, + updated_at + ) VALUES - ( - $1, - FALSE, - $2, - $3, - $4, - $5, - $6, - $7, - $8, - $9, - $10, - $11, - $12, - $13, - $14, - $15, - JSONB_BUILD_OBJECT('gas_used', $16::BIGINT, 'storage_writes', $17::INT, 'contracts_used', $18::INT), - $19, - NOW(), - NOW() - ) + ( + $1, + FALSE, + $2, + $3, + $4, + $5, + $6, + $7, + $8, + $9, + $10, + $11, + $12, + $13, + $14, + $15, + JSONB_BUILD_OBJECT( + 'gas_used', + $16::BIGINT, + 'storage_writes', + $17::INT, + 'contracts_used', + $18::INT + ), + $19, + NOW(), + NOW() + ) ON CONFLICT (initiator_address, nonce) DO UPDATE SET - hash = $1, - signature = $4, - gas_limit = $5, - max_fee_per_gas = $6, - max_priority_fee_per_gas = $7, - gas_per_pubdata_limit = $8, - input = $9, - data = $10, - tx_format = $11, - contract_address = $12, - value = $13, - paymaster = $14, - paymaster_input = $15, - execution_info = JSONB_BUILD_OBJECT('gas_used', $16::BIGINT, 'storage_writes', $17::INT, 'contracts_used', $18::INT), - in_mempool = FALSE, - received_at = $19, - created_at = NOW(), - updated_at = NOW(), - error = NULL + hash = $1, + signature = $4, + gas_limit = $5, + max_fee_per_gas = $6, + max_priority_fee_per_gas = $7, + gas_per_pubdata_limit = $8, + input = $9, + data = $10, + tx_format = $11, + contract_address = $12, + value = $13, + paymaster = $14, + paymaster_input = $15, + execution_info + = JSONB_BUILD_OBJECT( + 'gas_used', + $16::BIGINT, + 'storage_writes', + $17::INT, + 'contracts_used', + $18::INT + ), + in_mempool = FALSE, + received_at = $19, + created_at = NOW(), + updated_at = NOW(), + error = NULL WHERE - transactions.is_priority = FALSE - AND transactions.miniblock_number IS NULL + transactions.is_priority = FALSE + AND transactions.miniblock_number IS NULL RETURNING - ( - SELECT - hash - FROM - transactions - WHERE - transactions.initiator_address = $2 - AND transactions.nonce = $3 - ) IS NOT NULL AS "is_replaced!" + ( + SELECT + hash + FROM + transactions + WHERE + transactions.initiator_address = $2 + AND transactions.nonce = $3 + ) IS NOT NULL AS "is_replaced!" "#, tx_hash.as_bytes(), initiator_address.as_bytes(), @@ -489,8 +504,8 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($1::INT[]) AS l1_batch_tx_index, - UNNEST($2::BYTEA[]) AS hash + UNNEST($1::INT []) AS l1_batch_tx_index, + UNNEST($2::BYTEA []) AS hash ) AS data_table WHERE transactions.hash = data_table.hash @@ -535,7 +550,7 @@ impl TransactionsDal<'_, '_> { r#" DELETE FROM transactions WHERE - hash = ANY ($1) + hash = ANY($1) "#, &tx_hashes as &[&[u8]], ) @@ -604,12 +619,12 @@ impl TransactionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - call_traces (tx_hash, call_trace) + call_traces (tx_hash, call_trace) SELECT u.tx_hash, u.call_trace FROM - UNNEST($1::bytea[], $2::bytea[]) AS u (tx_hash, call_trace) + UNNEST($1::bytea [], $2::bytea []) AS u (tx_hash, call_trace) "#, &call_traces_tx_hashes as &[&[u8]], &bytea_call_traces @@ -735,33 +750,33 @@ impl TransactionsDal<'_, '_> { let query = sqlx::query!( r#" INSERT INTO - transactions ( - hash, - is_priority, - initiator_address, - nonce, - signature, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - gas_per_pubdata_limit, - input, - data, - tx_format, - contract_address, - value, - paymaster, - paymaster_input, - execution_info, - miniblock_number, - index_in_block, - error, - effective_gas_price, - refunded_gas, - received_at, - created_at, - updated_at - ) + transactions ( + hash, + is_priority, + initiator_address, + nonce, + signature, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + gas_per_pubdata_limit, + input, + data, + tx_format, + contract_address, + value, + paymaster, + paymaster_input, + execution_info, + miniblock_number, + index_in_block, + error, + effective_gas_price, + refunded_gas, + received_at, + created_at, + updated_at + ) SELECT data_table.hash, FALSE, @@ -791,26 +806,26 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($1::bytea[]) AS hash, - UNNEST($2::bytea[]) AS initiator_address, - UNNEST($3::INT[]) AS nonce, - UNNEST($4::bytea[]) AS signature, - UNNEST($5::NUMERIC[]) AS gas_limit, - UNNEST($6::NUMERIC[]) AS max_fee_per_gas, - UNNEST($7::NUMERIC[]) AS max_priority_fee_per_gas, - UNNEST($8::NUMERIC[]) AS gas_per_pubdata_limit, - UNNEST($9::bytea[]) AS input, - UNNEST($10::jsonb[]) AS data, - UNNEST($11::INT[]) AS tx_format, - UNNEST($12::bytea[]) AS contract_address, - UNNEST($13::NUMERIC[]) AS value, - UNNEST($14::bytea[]) AS paymaster, - UNNEST($15::bytea[]) AS paymaster_input, - UNNEST($16::jsonb[]) AS new_execution_info, - UNNEST($17::INTEGER[]) AS index_in_block, - UNNEST($18::VARCHAR[]) AS error, - UNNEST($19::NUMERIC[]) AS effective_gas_price, - UNNEST($20::BIGINT[]) AS refunded_gas + UNNEST($1::bytea []) AS hash, + UNNEST($2::bytea []) AS initiator_address, + UNNEST($3::int []) AS nonce, + UNNEST($4::bytea []) AS signature, + UNNEST($5::numeric []) AS gas_limit, + UNNEST($6::numeric []) AS max_fee_per_gas, + UNNEST($7::numeric []) AS max_priority_fee_per_gas, + UNNEST($8::numeric []) AS gas_per_pubdata_limit, + UNNEST($9::bytea []) AS input, + UNNEST($10::jsonb []) AS data, + UNNEST($11::int []) AS tx_format, + UNNEST($12::bytea []) AS contract_address, + UNNEST($13::numeric []) AS value, + UNNEST($14::bytea []) AS paymaster, + UNNEST($15::bytea []) AS paymaster_input, + UNNEST($16::jsonb []) AS new_execution_info, + UNNEST($17::integer []) AS index_in_block, + UNNEST($18::varchar []) AS error, + UNNEST($19::numeric []) AS effective_gas_price, + UNNEST($20::bigint []) AS refunded_gas ) AS data_table "#, &l2_hashes as &[&[u8]], @@ -974,29 +989,32 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($1::bytea[]) AS initiator_address, - UNNEST($2::INT[]) AS nonce, - UNNEST($3::bytea[]) AS hash, - UNNEST($4::bytea[]) AS signature, - UNNEST($5::NUMERIC[]) AS gas_limit, - UNNEST($6::NUMERIC[]) AS max_fee_per_gas, - UNNEST($7::NUMERIC[]) AS max_priority_fee_per_gas, - UNNEST($8::NUMERIC[]) AS gas_per_pubdata_limit, - UNNEST($9::INT[]) AS tx_format, - UNNEST($10::INTEGER[]) AS index_in_block, - UNNEST($11::VARCHAR[]) AS error, - UNNEST($12::NUMERIC[]) AS effective_gas_price, - UNNEST($13::jsonb[]) AS new_execution_info, - UNNEST($14::bytea[]) AS input, - UNNEST($15::jsonb[]) AS data, - UNNEST($16::BIGINT[]) AS refunded_gas, - UNNEST($17::NUMERIC[]) AS value, - UNNEST($18::bytea[]) AS contract_address, - UNNEST($19::bytea[]) AS paymaster, - UNNEST($20::bytea[]) AS paymaster_input + UNNEST($1::bytea []) AS initiator_address, + UNNEST($2::int []) AS nonce, + UNNEST($3::bytea []) AS hash, + UNNEST($4::bytea []) AS signature, + UNNEST($5::numeric []) AS gas_limit, + UNNEST($6::numeric []) AS max_fee_per_gas, + UNNEST($7::numeric []) AS max_priority_fee_per_gas, + UNNEST($8::numeric []) AS gas_per_pubdata_limit, + UNNEST($9::int []) AS tx_format, + UNNEST($10::integer []) AS index_in_block, + UNNEST($11::varchar []) AS error, + UNNEST($12::numeric []) AS effective_gas_price, + UNNEST($13::jsonb []) AS new_execution_info, + UNNEST($14::bytea []) AS input, + UNNEST($15::jsonb []) AS data, + UNNEST($16::bigint []) AS refunded_gas, + UNNEST($17::numeric []) AS value, + UNNEST($18::bytea []) AS contract_address, + UNNEST($19::bytea []) AS paymaster, + UNNEST($20::bytea []) AS paymaster_input ) AS data_table_temp - JOIN transactions ON transactions.initiator_address = data_table_temp.initiator_address - AND transactions.nonce = data_table_temp.nonce + JOIN transactions + ON + transactions.initiator_address + = data_table_temp.initiator_address + AND transactions.nonce = data_table_temp.nonce ORDER BY transactions.hash ) AS data_table @@ -1122,35 +1140,35 @@ impl TransactionsDal<'_, '_> { let query = sqlx::query!( r#" INSERT INTO - transactions ( - hash, - is_priority, - initiator_address, - gas_limit, - max_fee_per_gas, - gas_per_pubdata_limit, - data, - priority_op_id, - full_fee, - layer_2_tip_fee, - contract_address, - l1_block_number, - value, - paymaster, - paymaster_input, - tx_format, - l1_tx_mint, - l1_tx_refund_recipient, - miniblock_number, - index_in_block, - error, - execution_info, - refunded_gas, - effective_gas_price, - received_at, - created_at, - updated_at - ) + transactions ( + hash, + is_priority, + initiator_address, + gas_limit, + max_fee_per_gas, + gas_per_pubdata_limit, + data, + priority_op_id, + full_fee, + layer_2_tip_fee, + contract_address, + l1_block_number, + value, + paymaster, + paymaster_input, + tx_format, + l1_tx_mint, + l1_tx_refund_recipient, + miniblock_number, + index_in_block, + error, + execution_info, + refunded_gas, + effective_gas_price, + received_at, + created_at, + updated_at + ) SELECT data_table.hash, TRUE, @@ -1182,26 +1200,26 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($1::bytea[]) AS hash, - UNNEST($2::bytea[]) AS initiator_address, - UNNEST($3::NUMERIC[]) AS gas_limit, - UNNEST($4::NUMERIC[]) AS max_fee_per_gas, - UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit, - UNNEST($6::jsonb[]) AS data, - UNNEST($7::BIGINT[]) AS priority_op_id, - UNNEST($8::NUMERIC[]) AS full_fee, - UNNEST($9::NUMERIC[]) AS layer_2_tip_fee, - UNNEST($10::bytea[]) AS contract_address, - UNNEST($11::INT[]) AS l1_block_number, - UNNEST($12::NUMERIC[]) AS value, - UNNEST($13::INTEGER[]) AS tx_format, - UNNEST($14::NUMERIC[]) AS l1_tx_mint, - UNNEST($15::bytea[]) AS l1_tx_refund_recipient, - UNNEST($16::INT[]) AS index_in_block, - UNNEST($17::VARCHAR[]) AS error, - UNNEST($18::jsonb[]) AS execution_info, - UNNEST($19::BIGINT[]) AS refunded_gas, - UNNEST($20::NUMERIC[]) AS effective_gas_price + UNNEST($1::bytea []) AS hash, + UNNEST($2::bytea []) AS initiator_address, + UNNEST($3::numeric []) AS gas_limit, + UNNEST($4::numeric []) AS max_fee_per_gas, + UNNEST($5::numeric []) AS gas_per_pubdata_limit, + UNNEST($6::jsonb []) AS data, + UNNEST($7::bigint []) AS priority_op_id, + UNNEST($8::numeric []) AS full_fee, + UNNEST($9::numeric []) AS layer_2_tip_fee, + UNNEST($10::bytea []) AS contract_address, + UNNEST($11::int []) AS l1_block_number, + UNNEST($12::numeric []) AS value, + UNNEST($13::integer []) AS tx_format, + UNNEST($14::numeric []) AS l1_tx_mint, + UNNEST($15::bytea []) AS l1_tx_refund_recipient, + UNNEST($16::int []) AS index_in_block, + UNNEST($17::varchar []) AS error, + UNNEST($18::jsonb []) AS execution_info, + UNNEST($19::bigint []) AS refunded_gas, + UNNEST($20::numeric []) AS effective_gas_price ) AS data_table "#, &l1_hashes as &[&[u8]], @@ -1300,12 +1318,12 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($2::bytea[]) AS hash, - UNNEST($3::INTEGER[]) AS index_in_block, - UNNEST($4::VARCHAR[]) AS error, - UNNEST($5::jsonb[]) AS new_execution_info, - UNNEST($6::BIGINT[]) AS refunded_gas, - UNNEST($7::NUMERIC[]) AS effective_gas_price + UNNEST($2::bytea []) AS hash, + UNNEST($3::integer []) AS index_in_block, + UNNEST($4::varchar []) AS error, + UNNEST($5::jsonb []) AS new_execution_info, + UNNEST($6::bigint []) AS refunded_gas, + UNNEST($7::numeric []) AS effective_gas_price ) AS data_table WHERE transactions.hash = data_table.hash @@ -1413,33 +1431,33 @@ impl TransactionsDal<'_, '_> { let query = sqlx::query!( r#" INSERT INTO - transactions ( - hash, - is_priority, - initiator_address, - gas_limit, - max_fee_per_gas, - gas_per_pubdata_limit, - data, - upgrade_id, - contract_address, - l1_block_number, - value, - paymaster, - paymaster_input, - tx_format, - l1_tx_mint, - l1_tx_refund_recipient, - miniblock_number, - index_in_block, - error, - execution_info, - refunded_gas, - effective_gas_price, - received_at, - created_at, - updated_at - ) + transactions ( + hash, + is_priority, + initiator_address, + gas_limit, + max_fee_per_gas, + gas_per_pubdata_limit, + data, + upgrade_id, + contract_address, + l1_block_number, + value, + paymaster, + paymaster_input, + tx_format, + l1_tx_mint, + l1_tx_refund_recipient, + miniblock_number, + index_in_block, + error, + execution_info, + refunded_gas, + effective_gas_price, + received_at, + created_at, + updated_at + ) SELECT data_table.hash, TRUE, @@ -1469,24 +1487,24 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($1::bytea[]) AS hash, - UNNEST($2::bytea[]) AS initiator_address, - UNNEST($3::NUMERIC[]) AS gas_limit, - UNNEST($4::NUMERIC[]) AS max_fee_per_gas, - UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit, - UNNEST($6::jsonb[]) AS data, - UNNEST($7::INT[]) AS upgrade_id, - UNNEST($8::bytea[]) AS contract_address, - UNNEST($9::INT[]) AS l1_block_number, - UNNEST($10::NUMERIC[]) AS value, - UNNEST($11::INTEGER[]) AS tx_format, - UNNEST($12::NUMERIC[]) AS l1_tx_mint, - UNNEST($13::bytea[]) AS l1_tx_refund_recipient, - UNNEST($14::INT[]) AS index_in_block, - UNNEST($15::VARCHAR[]) AS error, - UNNEST($16::jsonb[]) AS execution_info, - UNNEST($17::BIGINT[]) AS refunded_gas, - UNNEST($18::NUMERIC[]) AS effective_gas_price + UNNEST($1::bytea []) AS hash, + UNNEST($2::bytea []) AS initiator_address, + UNNEST($3::numeric []) AS gas_limit, + UNNEST($4::numeric []) AS max_fee_per_gas, + UNNEST($5::numeric []) AS gas_per_pubdata_limit, + UNNEST($6::jsonb []) AS data, + UNNEST($7::int []) AS upgrade_id, + UNNEST($8::bytea []) AS contract_address, + UNNEST($9::int []) AS l1_block_number, + UNNEST($10::numeric []) AS value, + UNNEST($11::integer []) AS tx_format, + UNNEST($12::numeric []) AS l1_tx_mint, + UNNEST($13::bytea []) AS l1_tx_refund_recipient, + UNNEST($14::int []) AS index_in_block, + UNNEST($15::varchar []) AS error, + UNNEST($16::jsonb []) AS execution_info, + UNNEST($17::bigint []) AS refunded_gas, + UNNEST($18::numeric []) AS effective_gas_price ) AS data_table "#, &upgrade_hashes as &[&[u8]], @@ -1585,12 +1603,12 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($2::bytea[]) AS hash, - UNNEST($3::INTEGER[]) AS index_in_block, - UNNEST($4::VARCHAR[]) AS error, - UNNEST($5::jsonb[]) AS new_execution_info, - UNNEST($6::BIGINT[]) AS refunded_gas, - UNNEST($7::NUMERIC[]) AS effective_gas_price + UNNEST($2::bytea []) AS hash, + UNNEST($3::integer []) AS index_in_block, + UNNEST($4::varchar []) AS error, + UNNEST($5::jsonb []) AS new_execution_info, + UNNEST($6::bigint []) AS refunded_gas, + UNNEST($7::numeric []) AS effective_gas_price ) AS data_table WHERE transactions.hash = data_table.hash @@ -1651,7 +1669,7 @@ impl TransactionsDal<'_, '_> { WHERE miniblock_number > $1 RETURNING - hash + hash "#, i64::from(l2_block_number.0) ) @@ -1665,7 +1683,7 @@ impl TransactionsDal<'_, '_> { r#" DELETE FROM call_traces WHERE - tx_hash = ANY ($1) + tx_hash = ANY($1) "#, &tx_hashes as &[&[u8]] ) @@ -1688,7 +1706,7 @@ impl TransactionsDal<'_, '_> { AND is_priority = FALSE AND error IS NULL RETURNING - hash + hash "#, stuck_tx_timeout ) @@ -1717,7 +1735,7 @@ impl TransactionsDal<'_, '_> { SET in_mempool = FALSE FROM - UNNEST($1::bytea[]) AS s (address) + UNNEST($1::bytea []) AS s (address) WHERE transactions.in_mempool = TRUE AND transactions.initiator_address = s.address @@ -1735,7 +1753,7 @@ impl TransactionsDal<'_, '_> { DELETE FROM transactions WHERE in_mempool = TRUE - AND initiator_address = ANY ($1) + AND initiator_address = ANY($1) "#, &purged_addresses as &[&[u8]] ) @@ -1786,7 +1804,7 @@ impl TransactionsDal<'_, '_> { WHERE transactions.hash = subquery2.hash RETURNING - transactions.* + transactions.* "#, limit as i32, BigDecimal::from(fee_per_gas), @@ -2121,7 +2139,7 @@ impl TransactionsDal<'_, '_> { index_in_block FROM transactions - INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number + INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number WHERE transactions.hash = $1 "#, diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 3ec1f7e6d0c..dcf5f25f104 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -49,19 +49,20 @@ impl TransactionsWeb3Dal<'_, '_> { StorageTransactionReceipt, r#" WITH - events AS ( - SELECT DISTINCT - ON (events.tx_hash) * - FROM - events - WHERE - events.address = $1 - AND events.topic1 = $2 - AND events.tx_hash = ANY ($3) - ORDER BY - events.tx_hash, - events.event_index_in_tx DESC - ) + events AS ( + SELECT DISTINCT + ON (events.tx_hash) * + FROM + events + WHERE + events.address = $1 + AND events.topic1 = $2 + AND events.tx_hash = ANY($3) + ORDER BY + events.tx_hash, + events.event_index_in_tx DESC + ) + SELECT transactions.hash AS tx_hash, transactions.index_in_block, @@ -81,10 +82,10 @@ impl TransactionsWeb3Dal<'_, '_> { miniblocks.timestamp AS "block_timestamp?" FROM transactions - JOIN miniblocks ON miniblocks.number = transactions.miniblock_number - LEFT JOIN events ON events.tx_hash = transactions.hash + JOIN miniblocks ON miniblocks.number = transactions.miniblock_number + LEFT JOIN events ON events.tx_hash = transactions.hash WHERE - transactions.hash = ANY ($3) + transactions.hash = ANY($3) AND transactions.data != '{}'::jsonb "#, // ^ Filter out transactions with pruned data, which would lead to potentially incomplete / bogus @@ -302,17 +303,20 @@ impl TransactionsWeb3Dal<'_, '_> { execute_tx.tx_hash AS "eth_execute_tx_hash?" FROM transactions - LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number - LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number - LEFT JOIN eth_txs_history AS commit_tx ON ( + LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number + LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number + LEFT JOIN eth_txs_history AS commit_tx + ON ( l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id AND commit_tx.confirmed_at IS NOT NULL ) - LEFT JOIN eth_txs_history AS prove_tx ON ( + LEFT JOIN eth_txs_history AS prove_tx + ON ( l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id AND prove_tx.confirmed_at IS NOT NULL ) - LEFT JOIN eth_txs_history AS execute_tx ON ( + LEFT JOIN eth_txs_history AS execute_tx + ON ( l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id AND execute_tx.confirmed_at IS NOT NULL ) @@ -439,7 +443,7 @@ impl TransactionsWeb3Dal<'_, '_> { transactions.* FROM transactions - INNER JOIN miniblocks ON miniblocks.number = transactions.miniblock_number + INNER JOIN miniblocks ON miniblocks.number = transactions.miniblock_number WHERE miniblocks.number BETWEEN $1 AND $2 ORDER BY diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index 37ef1517d54..b12b0218680 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -37,25 +37,27 @@ impl VmRunnerDal<'_, '_> { let row = sqlx::query!( r#" WITH - available_batches AS ( - SELECT - MAX(number) AS "last_batch" - FROM - l1_batches - ), - processed_batches AS ( - SELECT - COALESCE(MAX(l1_batch_number), $1) + $2 AS "last_ready_batch" - FROM - vm_runner_protective_reads - WHERE - time_taken IS NOT NULL - ) + available_batches AS ( + SELECT + MAX(number) AS "last_batch" + FROM + l1_batches + ), + + processed_batches AS ( + SELECT + COALESCE(MAX(l1_batch_number), $1) + $2 AS "last_ready_batch" + FROM + vm_runner_protective_reads + WHERE + time_taken IS NOT NULL + ) + SELECT LEAST(last_batch, last_ready_batch) AS "last_ready_batch!" FROM available_batches - FULL JOIN processed_batches ON TRUE + FULL JOIN processed_batches ON TRUE "#, default_batch.0 as i32, window_size as i32 @@ -74,14 +76,16 @@ impl VmRunnerDal<'_, '_> { sqlx::query!( r#" INSERT INTO - vm_runner_protective_reads (l1_batch_number, created_at, updated_at, processing_started_at) + vm_runner_protective_reads ( + l1_batch_number, created_at, updated_at, processing_started_at + ) VALUES - ($1, NOW(), NOW(), NOW()) + ($1, NOW(), NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE SET - updated_at = NOW(), - processing_started_at = NOW() + updated_at = NOW(), + processing_started_at = NOW() "#, i64::from(l1_batch_number.0), ) @@ -196,25 +200,27 @@ impl VmRunnerDal<'_, '_> { let row = sqlx::query!( r#" WITH - available_batches AS ( - SELECT - MAX(number) AS "last_batch" - FROM - l1_batches - ), - processed_batches AS ( - SELECT - COALESCE(MAX(l1_batch_number), $1) + $2 AS "last_ready_batch" - FROM - vm_runner_bwip - WHERE - time_taken IS NOT NULL - ) + available_batches AS ( + SELECT + MAX(number) AS "last_batch" + FROM + l1_batches + ), + + processed_batches AS ( + SELECT + COALESCE(MAX(l1_batch_number), $1) + $2 AS "last_ready_batch" + FROM + vm_runner_bwip + WHERE + time_taken IS NOT NULL + ) + SELECT LEAST(last_batch, last_ready_batch) AS "last_ready_batch!" FROM available_batches - FULL JOIN processed_batches ON TRUE + FULL JOIN processed_batches ON TRUE "#, default_batch.0 as i32, window_size as i32 @@ -233,14 +239,14 @@ impl VmRunnerDal<'_, '_> { sqlx::query!( r#" INSERT INTO - vm_runner_bwip (l1_batch_number, created_at, updated_at, processing_started_at) + vm_runner_bwip (l1_batch_number, created_at, updated_at, processing_started_at) VALUES - ($1, NOW(), NOW(), NOW()) + ($1, NOW(), NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE SET - updated_at = NOW(), - processing_started_at = NOW() + updated_at = NOW(), + processing_started_at = NOW() "#, i64::from(l1_batch_number.0), ) diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index 96194b169e9..6dbad67b489 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -15,13 +15,9 @@ import * as run from './run'; import * as server from './server'; import { createVolumes, up } from './up'; -const fs = require('fs'); -const yaml = require('yaml'); - // Checks if all required tools are installed with the correct versions -const checkEnv = async (runObservability: boolean): Promise => { +const checkEnv = async (): Promise => { const tools = ['node', 'yarn', 'docker', 'cargo']; - for (const tool of tools) { await utils.exec(`which ${tool}`); } @@ -41,36 +37,6 @@ const submoduleUpdate = async (): Promise => { await utils.exec('git submodule update'); }; -// clone dockprom and zksync-era dashboards -const setupObservability = async (): Promise => { - // clone dockprom, era-observability repos and export era dashboards to dockprom - await utils.spawn( - `rm -rf ./target/dockprom && git clone https://github.com/stefanprodan/dockprom.git ./target/dockprom \ - && rm -rf ./target/era-observability && git clone https://github.com/matter-labs/era-observability.git ./target/era-observability \ - && cp ./target/era-observability/dashboards/* ./target/dockprom/grafana/provisioning/dashboards - ` - ); - - const fileContents = fs.readFileSync('./target/dockprom/prometheus/prometheus.yml', 'utf8'); - let config = yaml.parse(fileContents); - config.scrape_configs.push({ - job_name: 'proxy-blob-retriever', - scrape_interval: '5s', - honor_labels: true, - static_configs: [{ targets: ['host.docker.internal:7070'] }] - }); - config.scrape_configs.push({ - job_name: 'zksync', - scrape_interval: '5s', - honor_labels: true, - static_configs: [{ targets: ['host.docker.internal:3312'] }] - }); - const newYaml = yaml.stringify(config); - fs.writeFileSync('./target/dockprom/prometheus/prometheus.yml', newYaml, 'utf8'); - - await utils.spawn('cp EigenDA.json ./target/dockprom/grafana/provisioning/dashboards/EigenDA.json'); -}; - // Sets up docker environment and compiles contracts type InitSetupOptions = { skipEnvSetup: boolean; @@ -84,10 +50,6 @@ const initSetup = async ({ runObservability, deploymentMode }: InitSetupOptions): Promise => { - if (runObservability) { - await announced('Pulling observability repos', setupObservability()); - } - await announced( `Initializing in ${deploymentMode == contract.DeploymentMode.Validium ? 'Validium mode' : 'Roll-up mode'}` ); @@ -96,7 +58,7 @@ const initSetup = async ({ } if (!process.env.CI && !skipEnvSetup) { await announced('Pulling images', docker.pull()); - await announced('Checking environment', checkEnv(runObservability)); + await announced('Checking environment', checkEnv()); await announced('Checking git hooks', env.gitHooks()); await announced('Create volumes', createVolumes()); await announced('Setting up containers', up(runObservability)); diff --git a/prover/crates/lib/prover_dal/src/cli_test_dal.rs b/prover/crates/lib/prover_dal/src/cli_test_dal.rs index d0841820337..850e92b4136 100644 --- a/prover/crates/lib/prover_dal/src/cli_test_dal.rs +++ b/prover/crates/lib/prover_dal/src/cli_test_dal.rs @@ -51,20 +51,20 @@ impl CliTestDal<'_, '_> { sqlx::query!( r#" INSERT INTO - leaf_aggregation_witness_jobs_fri ( - l1_batch_number, - circuit_id, - status, - number_of_basic_circuits, - created_at, - updated_at - ) + leaf_aggregation_witness_jobs_fri ( + l1_batch_number, + circuit_id, + status, + number_of_basic_circuits, + created_at, + updated_at + ) VALUES - ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW()) + ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id) DO UPDATE SET - status = $3 + status = $3 "#, batch_number.0 as i64, circuit_id as i16, @@ -84,13 +84,15 @@ impl CliTestDal<'_, '_> { sqlx::query!( r#" INSERT INTO - node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at) + node_aggregation_witness_jobs_fri ( + l1_batch_number, circuit_id, status, created_at, updated_at + ) VALUES - ($1, $2, 'waiting_for_proofs', NOW(), NOW()) + ($1, $2, 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id, depth) DO UPDATE SET - status = $3 + status = $3 "#, batch_number.0 as i64, circuit_id as i16, @@ -105,13 +107,15 @@ impl CliTestDal<'_, '_> { sqlx::query!( r#" INSERT INTO - recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at) + recursion_tip_witness_jobs_fri ( + l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at + ) VALUES - ($1, 'waiting_for_proofs', 1, NOW(), NOW()) + ($1, 'waiting_for_proofs', 1, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE SET - status = $2 + status = $2 "#, batch_number.0 as i64, status.to_string(), @@ -129,19 +133,19 @@ impl CliTestDal<'_, '_> { sqlx::query!( r#" INSERT INTO - scheduler_witness_jobs_fri ( - l1_batch_number, - scheduler_partial_input_blob_url, - status, - created_at, - updated_at - ) + scheduler_witness_jobs_fri ( + l1_batch_number, + scheduler_partial_input_blob_url, + status, + created_at, + updated_at + ) VALUES - ($1, '', 'waiting_for_proofs', NOW(), NOW()) + ($1, '', 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE SET - status = $2 + status = $2 "#, batch_number.0 as i64, status.to_string(), @@ -159,13 +163,13 @@ impl CliTestDal<'_, '_> { sqlx::query!( r#" INSERT INTO - proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at) + proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at) VALUES - ($1, $2, NOW(), NOW()) + ($1, $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE SET - status = $2 + status = $2 "#, batch_number.0 as i64, status.to_string(), diff --git a/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs b/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs index 12f719d6401..63ae446d472 100644 --- a/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs @@ -52,10 +52,10 @@ impl FriGpuProverQueueDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - gpu_prover_queue_fri.* + gpu_prover_queue_fri.* "#, &processing_timeout, i16::from(specialized_prover_group_id), @@ -84,28 +84,28 @@ impl FriGpuProverQueueDal<'_, '_> { sqlx::query!( r#" INSERT INTO - gpu_prover_queue_fri ( - instance_host, - instance_port, - instance_status, - specialized_prover_group_id, - zone, - created_at, - updated_at, - protocol_version, - protocol_version_patch - ) + gpu_prover_queue_fri ( + instance_host, + instance_port, + instance_status, + specialized_prover_group_id, + zone, + created_at, + updated_at, + protocol_version, + protocol_version_patch + ) VALUES - (CAST($1::TEXT AS INET), $2, 'available', $3, $4, NOW(), NOW(), $5, $6) + (CAST($1::TEXT AS INET), $2, 'available', $3, $4, NOW(), NOW(), $5, $6) ON CONFLICT (instance_host, instance_port, zone) DO UPDATE SET - instance_status = 'available', - specialized_prover_group_id = $3, - zone = $4, - updated_at = NOW(), - protocol_version = $5, - protocol_version_patch = $6 + instance_status = 'available', + specialized_prover_group_id = $3, + zone = $4, + updated_at = NOW(), + protocol_version = $5, + protocol_version_patch = $6 "#, address.host.to_string(), i32::from(address.port), diff --git a/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs b/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs index 31b121e51e4..205dbc784c4 100644 --- a/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_proof_compressor_dal.rs @@ -27,17 +27,17 @@ impl FriProofCompressorDal<'_, '_> { sqlx::query!( r#" INSERT INTO - proof_compression_jobs_fri ( - l1_batch_number, - fri_proof_blob_url, - status, - created_at, - updated_at, - protocol_version, - protocol_version_patch - ) + proof_compression_jobs_fri ( + l1_batch_number, + fri_proof_blob_url, + status, + created_at, + updated_at, + protocol_version, + protocol_version_patch + ) VALUES - ($1, $2, $3, NOW(), NOW(), $4, $5) + ($1, $2, $3, NOW(), NOW(), $4, $5) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(block_number.0), @@ -80,10 +80,10 @@ impl FriProofCompressorDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - proof_compression_jobs_fri.l1_batch_number + proof_compression_jobs_fri.l1_batch_number "#, ProofCompressionJobStatus::InProgress.to_string(), ProofCompressionJobStatus::Queued.to_string(), @@ -246,11 +246,11 @@ impl FriProofCompressorDal<'_, '_> { protocol_version_patch, COUNT(*) FILTER ( WHERE - status = 'queued' + status = 'queued' ) AS queued, COUNT(*) FILTER ( WHERE - status = 'in_progress' + status = 'in_progress' ) AS in_progress FROM proof_compression_jobs_fri @@ -328,11 +328,11 @@ impl FriProofCompressorDal<'_, '_> { AND attempts < $2 ) RETURNING - l1_batch_number, - status, - attempts, - error, - picked_by + l1_batch_number, + status, + attempts, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -435,10 +435,10 @@ impl FriProofCompressorDal<'_, '_> { OR status = 'failed' ) RETURNING - status, - attempts, - error, - picked_by + status, + attempts, + error, + picked_by "#, i64::from(block_number.0), max_attempts as i32, diff --git a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs index 8c1c497eede..7128344e5b3 100644 --- a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs @@ -20,9 +20,11 @@ impl FriProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - prover_fri_protocol_versions (id, snark_wrapper_vk_hash, created_at, protocol_version_patch) + prover_fri_protocol_versions ( + id, snark_wrapper_vk_hash, created_at, protocol_version_patch + ) VALUES - ($1, $2, NOW(), $3) + ($1, $2, NOW(), $3) ON CONFLICT (id, protocol_version_patch) DO NOTHING "#, id.minor as i32, diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 71d0c11728b..a0420b05612 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -93,16 +93,16 @@ impl FriProverDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - prover_jobs_fri.id, - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.aggregation_round, - prover_jobs_fri.sequence_number, - prover_jobs_fri.depth, - prover_jobs_fri.is_node_final_proof + prover_jobs_fri.id, + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, + prover_jobs_fri.sequence_number, + prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof "#, protocol_version.minor as i32, protocol_version.patch.0 as i32, @@ -154,16 +154,16 @@ impl FriProverDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - prover_jobs_fri.id, - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.aggregation_round, - prover_jobs_fri.sequence_number, - prover_jobs_fri.depth, - prover_jobs_fri.is_node_final_proof + prover_jobs_fri.id, + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, + prover_jobs_fri.sequence_number, + prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof "#, protocol_version.minor as i32, protocol_version.patch.0 as i32, @@ -216,25 +216,25 @@ impl FriProverDal<'_, '_> { SELECT * FROM - UNNEST($1::SMALLINT[], $2::SMALLINT[]) - ) AS tuple (circuit_id, ROUND) - JOIN LATERAL ( - SELECT - * - FROM - prover_jobs_fri AS pj - WHERE - pj.status = 'queued' - AND pj.protocol_version = $3 - AND pj.protocol_version_patch = $4 - AND pj.circuit_id = tuple.circuit_id - AND pj.aggregation_round = tuple.round - ORDER BY - pj.l1_batch_number ASC, - pj.id ASC - LIMIT - 1 - ) AS pj ON TRUE + UNNEST($1::SMALLINT [], $2::SMALLINT []) + ) AS tuple (circuit_id, round) + JOIN LATERAL ( + SELECT + * + FROM + prover_jobs_fri AS pj + WHERE + pj.status = 'queued' + AND pj.protocol_version = $3 + AND pj.protocol_version_patch = $4 + AND pj.circuit_id = tuple.circuit_id + AND pj.aggregation_round = tuple.round + ORDER BY + pj.l1_batch_number ASC, + pj.id ASC + LIMIT + 1 + ) AS pj ON TRUE ORDER BY pj.l1_batch_number ASC, pj.aggregation_round DESC, @@ -242,16 +242,16 @@ impl FriProverDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - prover_jobs_fri.id, - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.aggregation_round, - prover_jobs_fri.sequence_number, - prover_jobs_fri.depth, - prover_jobs_fri.is_node_final_proof + prover_jobs_fri.id, + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, + prover_jobs_fri.sequence_number, + prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof "#, &circuit_ids[..], &aggregation_rounds[..], @@ -332,13 +332,13 @@ impl FriProverDal<'_, '_> { WHERE id = $3 RETURNING - prover_jobs_fri.id, - prover_jobs_fri.l1_batch_number, - prover_jobs_fri.circuit_id, - prover_jobs_fri.aggregation_round, - prover_jobs_fri.sequence_number, - prover_jobs_fri.depth, - prover_jobs_fri.is_node_final_proof + prover_jobs_fri.id, + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, + prover_jobs_fri.sequence_number, + prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof "#, duration_to_naive_time(time_taken), blob_url, @@ -394,15 +394,15 @@ impl FriProverDal<'_, '_> { AND attempts < $2 ) FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - id, - status, - attempts, - circuit_id, - error, - picked_by + id, + status, + attempts, + circuit_id, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -438,26 +438,28 @@ impl FriProverDal<'_, '_> { sqlx::query!( r#" INSERT INTO - prover_jobs_fri ( - l1_batch_number, - circuit_id, - circuit_blob_url, - aggregation_round, - sequence_number, - depth, - is_node_final_proof, - protocol_version, - status, - created_at, - updated_at, - protocol_version_patch - ) + prover_jobs_fri ( + l1_batch_number, + circuit_id, + circuit_blob_url, + aggregation_round, + sequence_number, + depth, + is_node_final_proof, + protocol_version, + status, + created_at, + updated_at, + protocol_version_patch + ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', NOW(), NOW(), $9) - ON CONFLICT (l1_batch_number, aggregation_round, circuit_id, depth, sequence_number) DO + ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', NOW(), NOW(), $9) + ON CONFLICT ( + l1_batch_number, aggregation_round, circuit_id, depth, sequence_number + ) DO UPDATE SET - updated_at = NOW() + updated_at = NOW() "#, i64::from(l1_batch_number.0), i16::from(circuit_id), @@ -530,11 +532,11 @@ impl FriProverDal<'_, '_> { protocol_version_patch AS "protocol_version_patch!", COUNT(*) FILTER ( WHERE - status = 'queued' + status = 'queued' ) AS queued, COUNT(*) FILTER ( WHERE - status = 'in_progress' + status = 'in_progress' ) AS in_progress FROM prover_jobs_fri @@ -828,12 +830,12 @@ impl FriProverDal<'_, '_> { OR status = 'failed' ) RETURNING - id, - status, - attempts, - circuit_id, - error, - picked_by + id, + status, + attempts, + circuit_id, + error, + picked_by "#, i64::from(block_number.0), max_attempts as i32, diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 2040b444044..cd96edc21b0 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -49,17 +49,17 @@ impl FriWitnessGeneratorDal<'_, '_> { sqlx::query!( r#" INSERT INTO - witness_inputs_fri ( - l1_batch_number, - witness_inputs_blob_url, - protocol_version, - status, - created_at, - updated_at, - protocol_version_patch - ) + witness_inputs_fri ( + l1_batch_number, + witness_inputs_blob_url, + protocol_version, + status, + created_at, + updated_at, + protocol_version_patch + ) VALUES - ($1, $2, $3, 'queued', NOW(), NOW(), $4) + ($1, $2, $3, 'queued', NOW(), NOW(), $4) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(block_number.0), @@ -103,10 +103,10 @@ impl FriWitnessGeneratorDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - witness_inputs_fri.l1_batch_number + witness_inputs_fri.l1_batch_number "#, protocol_version.minor as i32, picked_by, @@ -207,11 +207,11 @@ impl FriWitnessGeneratorDal<'_, '_> { AND attempts < $2 ) RETURNING - l1_batch_number, - status, - attempts, - error, - picked_by + l1_batch_number, + status, + attempts, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -256,23 +256,23 @@ impl FriWitnessGeneratorDal<'_, '_> { sqlx::query!( r#" INSERT INTO - leaf_aggregation_witness_jobs_fri ( - l1_batch_number, - circuit_id, - closed_form_inputs_blob_url, - number_of_basic_circuits, - protocol_version, - status, - created_at, - updated_at, - protocol_version_patch - ) + leaf_aggregation_witness_jobs_fri ( + l1_batch_number, + circuit_id, + closed_form_inputs_blob_url, + number_of_basic_circuits, + protocol_version, + status, + created_at, + updated_at, + protocol_version_patch + ) VALUES - ($1, $2, $3, $4, $5, 'waiting_for_proofs', NOW(), NOW(), $6) + ($1, $2, $3, $4, $5, 'waiting_for_proofs', NOW(), NOW(), $6) ON CONFLICT (l1_batch_number, circuit_id) DO UPDATE SET - updated_at = NOW() + updated_at = NOW() "#, i64::from(block_number.0), i16::from(*circuit_id), @@ -299,21 +299,21 @@ impl FriWitnessGeneratorDal<'_, '_> { sqlx::query!( r#" INSERT INTO - recursion_tip_witness_jobs_fri ( - l1_batch_number, - status, - number_of_final_node_jobs, - protocol_version, - created_at, - updated_at, - protocol_version_patch - ) + recursion_tip_witness_jobs_fri ( + l1_batch_number, + status, + number_of_final_node_jobs, + protocol_version, + created_at, + updated_at, + protocol_version_patch + ) VALUES - ($1, 'waiting_for_proofs', $2, $3, NOW(), NOW(), $4) + ($1, 'waiting_for_proofs', $2, $3, NOW(), NOW(), $4) ON CONFLICT (l1_batch_number) DO UPDATE SET - updated_at = NOW() + updated_at = NOW() "#, block_number.0 as i64, closed_form_inputs_and_urls.len() as i32, @@ -327,21 +327,21 @@ impl FriWitnessGeneratorDal<'_, '_> { sqlx::query!( r#" INSERT INTO - scheduler_witness_jobs_fri ( - l1_batch_number, - scheduler_partial_input_blob_url, - protocol_version, - status, - created_at, - updated_at, - protocol_version_patch - ) + scheduler_witness_jobs_fri ( + l1_batch_number, + scheduler_partial_input_blob_url, + protocol_version, + status, + created_at, + updated_at, + protocol_version_patch + ) VALUES - ($1, $2, $3, 'waiting_for_proofs', NOW(), NOW(), $4) + ($1, $2, $3, 'waiting_for_proofs', NOW(), NOW(), $4) ON CONFLICT (l1_batch_number) DO UPDATE SET - updated_at = NOW() + updated_at = NOW() "#, i64::from(block_number.0), scheduler_partial_input_blob_url, @@ -386,10 +386,10 @@ impl FriWitnessGeneratorDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - leaf_aggregation_witness_jobs_fri.* + leaf_aggregation_witness_jobs_fri.* "#, protocol_version.minor as i32, protocol_version.patch.0 as i32, @@ -464,8 +464,10 @@ impl FriWitnessGeneratorDal<'_, '_> { prover_jobs_fri.circuit_id FROM prover_jobs_fri - JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number - AND prover_jobs_fri.circuit_id = lawj.circuit_id + JOIN leaf_aggregation_witness_jobs_fri lawj + ON + prover_jobs_fri.l1_batch_number = lawj.l1_batch_number + AND prover_jobs_fri.circuit_id = lawj.circuit_id WHERE lawj.status = 'waiting_for_proofs' AND prover_jobs_fri.status = 'successful' @@ -478,8 +480,8 @@ impl FriWitnessGeneratorDal<'_, '_> { COUNT(*) = lawj.number_of_basic_circuits ) RETURNING - l1_batch_number, - circuit_id; + l1_batch_number, + circuit_id; "#, ) .fetch_all(self.storage.conn()) @@ -552,10 +554,10 @@ impl FriWitnessGeneratorDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - node_aggregation_witness_jobs_fri.* + node_aggregation_witness_jobs_fri.* "#, protocol_version.minor as i32, protocol_version.patch.0 as i32, @@ -617,24 +619,24 @@ impl FriWitnessGeneratorDal<'_, '_> { sqlx::query!( r#" INSERT INTO - node_aggregation_witness_jobs_fri ( - l1_batch_number, - circuit_id, - depth, - aggregations_url, - number_of_dependent_jobs, - protocol_version, - status, - created_at, - updated_at, - protocol_version_patch - ) + node_aggregation_witness_jobs_fri ( + l1_batch_number, + circuit_id, + depth, + aggregations_url, + number_of_dependent_jobs, + protocol_version, + status, + created_at, + updated_at, + protocol_version_patch + ) VALUES - ($1, $2, $3, $4, $5, $6, 'waiting_for_proofs', NOW(), NOW(), $7) + ($1, $2, $3, $4, $5, $6, 'waiting_for_proofs', NOW(), NOW(), $7) ON CONFLICT (l1_batch_number, circuit_id, depth) DO UPDATE SET - updated_at = NOW() + updated_at = NOW() "#, i64::from(block_number.0), i16::from(circuit_id), @@ -663,9 +665,11 @@ impl FriWitnessGeneratorDal<'_, '_> { prover_jobs_fri.depth FROM prover_jobs_fri - JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number - AND prover_jobs_fri.circuit_id = nawj.circuit_id - AND prover_jobs_fri.depth = nawj.depth + JOIN node_aggregation_witness_jobs_fri nawj + ON + prover_jobs_fri.l1_batch_number = nawj.l1_batch_number + AND prover_jobs_fri.circuit_id = nawj.circuit_id + AND prover_jobs_fri.depth = nawj.depth WHERE nawj.status = 'waiting_for_proofs' AND prover_jobs_fri.status = 'successful' @@ -680,9 +684,9 @@ impl FriWitnessGeneratorDal<'_, '_> { COUNT(*) = nawj.number_of_dependent_jobs ) RETURNING - l1_batch_number, - circuit_id, - depth; + l1_batch_number, + circuit_id, + depth; "#, ) .fetch_all(self.storage.conn()) @@ -707,9 +711,11 @@ impl FriWitnessGeneratorDal<'_, '_> { prover_jobs_fri.depth FROM prover_jobs_fri - JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number - AND prover_jobs_fri.circuit_id = nawj.circuit_id - AND prover_jobs_fri.depth = nawj.depth + JOIN node_aggregation_witness_jobs_fri nawj + ON + prover_jobs_fri.l1_batch_number = nawj.l1_batch_number + AND prover_jobs_fri.circuit_id = nawj.circuit_id + AND prover_jobs_fri.depth = nawj.depth WHERE nawj.status = 'waiting_for_proofs' AND prover_jobs_fri.status = 'successful' @@ -723,9 +729,9 @@ impl FriWitnessGeneratorDal<'_, '_> { COUNT(*) = nawj.number_of_dependent_jobs ) RETURNING - l1_batch_number, - circuit_id, - depth; + l1_batch_number, + circuit_id, + depth; "#, ) .fetch_all(self.storage.conn()) @@ -748,7 +754,9 @@ impl FriWitnessGeneratorDal<'_, '_> { prover_jobs_fri.l1_batch_number FROM prover_jobs_fri - JOIN recursion_tip_witness_jobs_fri rtwj ON prover_jobs_fri.l1_batch_number = rtwj.l1_batch_number + JOIN + recursion_tip_witness_jobs_fri rtwj + ON prover_jobs_fri.l1_batch_number = rtwj.l1_batch_number WHERE rtwj.status = 'waiting_for_proofs' AND prover_jobs_fri.status = 'successful' @@ -761,7 +769,7 @@ impl FriWitnessGeneratorDal<'_, '_> { COUNT(*) = rtwj.number_of_final_node_jobs ) RETURNING - l1_batch_number; + l1_batch_number; "#, AggregationRound::NodeAggregation as i64, ) @@ -785,14 +793,16 @@ impl FriWitnessGeneratorDal<'_, '_> { prover_jobs_fri.l1_batch_number FROM prover_jobs_fri - JOIN scheduler_witness_jobs_fri swj ON prover_jobs_fri.l1_batch_number = swj.l1_batch_number + JOIN + scheduler_witness_jobs_fri swj + ON prover_jobs_fri.l1_batch_number = swj.l1_batch_number WHERE swj.status = 'waiting_for_proofs' AND prover_jobs_fri.status = 'successful' AND prover_jobs_fri.aggregation_round = $1 ) RETURNING - l1_batch_number; + l1_batch_number; "#, AggregationRound::RecursionTip as i64, ) @@ -828,12 +838,12 @@ impl FriWitnessGeneratorDal<'_, '_> { AND attempts < $2 ) RETURNING - id, - status, - attempts, - circuit_id, - error, - picked_by + id, + status, + attempts, + circuit_id, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -877,12 +887,12 @@ impl FriWitnessGeneratorDal<'_, '_> { AND attempts < $2 ) RETURNING - id, - status, - attempts, - circuit_id, - error, - picked_by + id, + status, + attempts, + circuit_id, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -926,11 +936,11 @@ impl FriWitnessGeneratorDal<'_, '_> { AND attempts < $2 ) RETURNING - l1_batch_number, - status, - attempts, - error, - picked_by + l1_batch_number, + status, + attempts, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -979,11 +989,11 @@ impl FriWitnessGeneratorDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - recursion_tip_witness_jobs_fri.l1_batch_number, - recursion_tip_witness_jobs_fri.number_of_final_node_jobs + recursion_tip_witness_jobs_fri.l1_batch_number, + recursion_tip_witness_jobs_fri.number_of_final_node_jobs "#, protocol_version.minor as i32, protocol_version.patch.0 as i32, @@ -1042,11 +1052,11 @@ impl FriWitnessGeneratorDal<'_, '_> { AND attempts < $2 ) RETURNING - l1_batch_number, - status, - attempts, - error, - picked_by + l1_batch_number, + status, + attempts, + error, + picked_by "#, &processing_timeout, max_attempts as i32, @@ -1095,10 +1105,10 @@ impl FriWitnessGeneratorDal<'_, '_> { LIMIT 1 FOR UPDATE - SKIP LOCKED + SKIP LOCKED ) RETURNING - scheduler_witness_jobs_fri.* + scheduler_witness_jobs_fri.* "#, protocol_version.minor as i32, picked_by, @@ -1586,11 +1596,11 @@ impl FriWitnessGeneratorDal<'_, '_> { OR status = 'failed' ) RETURNING - l1_batch_number, - status, - attempts, - error, - picked_by + l1_batch_number, + status, + attempts, + error, + picked_by "#, i64::from(block_number.0), max_attempts as i64 @@ -1656,11 +1666,11 @@ impl FriWitnessGeneratorDal<'_, '_> { OR status = 'failed' ) RETURNING - l1_batch_number, - status, - attempts, - error, - picked_by + l1_batch_number, + status, + attempts, + error, + picked_by "#, i64::from(block_number.0), max_attempts as i64 @@ -1700,11 +1710,11 @@ impl FriWitnessGeneratorDal<'_, '_> { OR status = 'failed' ) RETURNING - l1_batch_number, - status, - attempts, - error, - picked_by + l1_batch_number, + status, + attempts, + error, + picked_by "#, i64::from(block_number.0), max_attempts as i64 @@ -1793,7 +1803,7 @@ impl FriWitnessGeneratorDal<'_, '_> { wit.created_at FROM proof_compression_jobs_fri AS comp - JOIN witness_inputs_fri AS wit ON comp.l1_batch_number = wit.l1_batch_number + JOIN witness_inputs_fri AS wit ON comp.l1_batch_number = wit.l1_batch_number WHERE wit.created_at > $1 ORDER BY diff --git a/restore-ecosystem.sh b/restore-ecosystem.sh index 7fbff3e9802..400f8ab69d1 100755 --- a/restore-ecosystem.sh +++ b/restore-ecosystem.sh @@ -1,5 +1,8 @@ #!/bin/bash +# This script restores the Postgres databases and chain configuration files for a given ecosystem. +# Allowing you to recover the L2 state from a previous backup. + # Check if the ecosystem name was provided as an argument if [ -z "$1" ]; then echo "Usage: ./restore-ecosystem ECOSYSTEM_NAME" diff --git a/zk_toolbox/crates/common/src/git.rs b/zk_toolbox/crates/common/src/git.rs index e44112842a8..ea6540c20b2 100644 --- a/zk_toolbox/crates/common/src/git.rs +++ b/zk_toolbox/crates/common/src/git.rs @@ -38,9 +38,3 @@ pub fn pull(shell: &Shell, link_to_code: PathBuf) -> anyhow::Result<()> { Cmd::new(cmd!(shell, "git pull origin {current_branch}")).run()?; Ok(()) } - -pub fn checkout(shell: &Shell, path: PathBuf, branch: &str) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(path); - Cmd::new(cmd!(shell, "git checkout {branch}")).run()?; - Ok(()) -} diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index ab2935ac02d..80b204cc619 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -31,7 +31,7 @@ pub const ERA_OBSERVABILITY_COMPOSE_FILE: &str = "era-observability/docker-compo /// Path to era observability repository pub const ERA_OBSERBAVILITY_DIR: &str = "era-observability"; /// Era observability repo link -pub const ERA_OBSERBAVILITY_GIT_REPO: &str = "https://github.com/lambdaclass/era-observability"; +pub const ERA_OBSERBAVILITY_GIT_REPO: &str = "https://github.com/matter-labs/era-observability"; pub(crate) const LOCAL_APPS_PATH: &str = "apps/"; pub(crate) const LOCAL_CHAINS_PATH: &str = "chains/"; pub(crate) const LOCAL_CONFIGS_PATH: &str = "configs/"; diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs index 9dca67f3b25..f20c3c24157 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs @@ -20,8 +20,6 @@ pub fn run(shell: &Shell) -> anyhow::Result<()> { ERA_OBSERBAVILITY_GIT_REPO, ERA_OBSERBAVILITY_DIR, )?; - - git::checkout(shell, path_to_era_observability, "eigenda")?; spinner.finish(); Ok(()) From 30fdc5903b85b53356f557c6bbd350d5cc3f14da Mon Sep 17 00:00:00 2001 From: Juan Munoz Date: Tue, 8 Oct 2024 12:36:59 -0300 Subject: [PATCH 17/36] remove arguments from ecosystem init --- eigenda-integration.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/eigenda-integration.md b/eigenda-integration.md index a80676deea1..8666392b900 100644 --- a/eigenda-integration.md +++ b/eigenda-integration.md @@ -61,8 +61,6 @@ zk_inception ecosystem init \ --l1-rpc-url http://127.0.0.1:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_eigen_da \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_eigen_da \ --chain eigen_da \ --verbose ``` From 27cebccad9e1dab14a2380634f5f56d4af5ba88b Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 12:54:35 -0300 Subject: [PATCH 18/36] Update queries (#294) --- ...db7dbe0bb23c4fc87a78be2d01b77da2ecbd3.json | 23 ---- ...772b3002289a05ab23b7dafde2af10c7d88da.json | 24 ++++ ...69d57dbfda4be1f70232afffca82a895d43e0.json | 36 ++++++ ...c3bdae18b88c4c223e374df9a997c271b091b.json | 38 ------ ...b0dd75acbe4248159d2fb62d9e9878c158016.json | 18 +++ ...6f5521c888c5bc9050333dd18a88c098b2d12.json | 15 +++ ...50e42abbaf365a1b041d0e7a809796ef0fe63.json | 22 ---- ...95254f94dddb4ed84a675c2655421758b049.json} | 4 +- ...74ddd4f2422089604768b19e0b6d948c0cf1b.json | 100 ++++++++++++++++ ...3897edf8c868094ad029e2e8fcf286d44fd55.json | 16 --- ...8275cef018c7060c49b9f7a387a14f6a5be8.json} | 4 +- ...3db7a71aca15698bafba051a8d9a91a4dbc76.json | 112 ------------------ ...4032f3d900b74e62dfdec0c9f61bf2bacb595.json | 21 ++++ ...cb2685cda1ae7ecca83062ede7320c3b4a427.json | 15 +++ ...6e94d0b4f902c84e31fca606773cf4c9d17fc.json | 16 +++ ...ecbc877c8e04d47bf16b9c2e59ba652d2955f.json | 14 +++ ...6d922fa1fc9c202072fbc04cae1bbf97195aa.json | 14 --- ...6a12866726e1f3ca9cf9d89f61c4e97373361.json | 16 +++ ...6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json | 16 --- ...6e7a755c4bc6c25c7e6caff5fd6142813d349.json | 16 --- ...3b54a1965e100e0949006212f97e6d11a18b.json} | 4 +- ...aafe4b1aa2250fc535bfcdff39172551d42b.json} | 4 +- ...0ded732839b9f5bf16042205a730fac07c3a.json} | 4 +- ...ddbee82a65c00c5740e9017382c57c279b18.json} | 4 +- ...84d2fcc5d436e52cc82ed2aba664bde3ee6b.json} | 4 +- ...4ec52b3eb68c346492a8fed98f20f2a0381d.json} | 4 +- ...f1a855e76b37b6ed36ae612b551f9a6a55633.json | 18 --- ...3dcb740d1d2750fe17fb5330e50323b96b49.json} | 4 +- ...b9086ba42bac53629793a42d67b353c30210.json} | 4 +- ...c801788d834bc1ea9b8194e4461c3df979f8b.json | 32 ----- ...62f3339768a85aaff9a509901e9f42b09097b.json | 28 +++++ ...feb5f094160627bc09db4bda2dda9a8c11c44.json | 15 --- ...f8dcb1642688940bc52831498479b250de2b1.json | 112 ++++++++++++++++++ ...5c0665ee3ea4086ddb91428fa1b6a00760737.json | 35 ------ ...119a0a60dc9804175b2baf8b45939c74bd583.json | 15 --- ...b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json} | 4 +- ...de8ba36685dde66d1b3fcf549a52c5001d06.json} | 4 +- ...494ce1d8e3b6cfb0b897745fb596f283be79.json} | 4 +- ...0f2ede5e22b0bbd8bc910cb36a91ed992bde1.json | 15 --- ...eee66178533c3c0f51e2bfa9ff201e17e4014.json | 44 +++++++ ...47e1ed84929bf75bb814ee2280d241912d38f.json | 15 +++ ...2c5eeab06ff664141fa542229e7c9a46d4410.json | 34 ------ ...0800383e6b73a8b48dc3e589a29a4367cbb5a.json | 15 --- ...717e73c5e6b063be3553d82bfecb98334980.json} | 4 +- ...16618914d6dedb39a9a40d36484741e8b01f4.json | 15 --- ...1e4657024c66a42e1bbc261c92c9e1c79930.json} | 4 +- ...96ec397767a063dc21aa3add974cb9b070361.json | 16 --- ...b3c0210383d8698f6f84f694fece9fd59f3d5.json | 16 --- ...98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json} | 4 +- ...c32170ea4b47051a100f71a41dd6ed9bb1a9.json} | 4 +- ...d656e2a26752fa71ae936ff2eb821b0734b61.json | 14 +++ ...2484f0a0d76a8097b61ead5f1f20301ce524d.json | 17 --- ...e03def94f4bf4674def1893e2232be80f57a0.json | 31 ----- ...36d50db45560befec6418020b01c55d94a98.json} | 4 +- ...944b2acedce201da4454aadb79f3545713ae.json} | 4 +- ...4fc05eaa158a6f38a87187d7f2c2068a0112a.json | 28 ----- ...375d56609cd6b7f425361acfd0a411dc48bd.json} | 4 +- ...3bb67bd12f2b635af783897f9d2a4f4ead2e.json} | 4 +- ...9250962832a0d95d0d82bdd56271d6a07e497.json | 15 +++ ...6aaa0430eeeb48c19ec3eabe78996baa9b140.json | 28 +++++ ...80eef9af91f5b4fcaebb7beb3a415d4b231d.json} | 4 +- ...07554ce738a2d7005472e7e76a64a8fbd57ad.json | 14 --- ...1b74555863b0cc8281ef2e9c830fa1d38e227.json | 16 +++ ...5e94ad6bdd84c31e4b2e0c629e51857533974.json | 23 ++++ ...1edf3965f7ce67341d957ec680a18a0c39ea.json} | 4 +- ...d0651174fd63cf6a8950fa6e7c4838ac5abbf.json | 26 ---- ...aea6710351dea1f1e57d73447476c3fcd199.json} | 4 +- ...528f012c2ecaebf6622ca1ae481045604e58d.json | 21 ---- ...6bd6ec3a1cd9db6187e99af40ca8fea4c6ace.json | 35 ++++++ ...aaad910ca46830c470f8fc1d4265f9ed8631.json} | 4 +- ...9973527841e234d8b4635fb337e4b659b7f71.json | 15 +++ ...abfb146e9932d4142139a49599c4bdbd659f.json} | 4 +- ...b671fa559dfdb29b315b17ae2f86533d247e9.json | 16 +++ ...ccc7108538405743fe1ad71451d0f1842561.json} | 4 +- ...fa3245a75736d14b1ff0799f5f05cd4a247a4.json | 34 ------ ...0ff145ac2f388a246b9c7b32193e2d4208dd.json} | 4 +- ...51dfd285628a75a35b152bccb3c73e9cc057.json} | 4 +- ...3be52d97128642d8528535975f8dea798e6c.json} | 4 +- ...84b0dd496700a61569929dcc7602ec678b09.json} | 4 +- ...af767e790a54957218de908c7144658b4681.json} | 4 +- ...c5de2c9f8f3cf22d0f1145ae67879039e28d.json} | 4 +- ...139300ad3b80ac9e70c00864c3d9f6521b028.json | 28 ----- ...7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json} | 4 +- ...7ca44a1f6cb5e46bf9e5ffe38741bf61c5cc0.json | 23 ++++ ...d23f83a260adf56000748c4b19752e2948ab5.json | 34 ++++++ ...ec87cf78f40429222b3163f3c5235c87e007f.json | 31 +++++ ...0c8fdca5c5731d490c1ac3ed56266b1d5a8a5.json | 32 +++++ ...77a428820fdcea9969aff3b29ca16727357b.json} | 4 +- ...a69138206dfeb41f3daff4a3eef1de0bed4e4.json | 16 --- ...d0a586ce73d09ee28887cb1ad1c0392250629.json | 34 ++++++ ...372281e8502002ff9fec82294ea6b972447d8.json | 16 +++ ...c39e774c405508e73e77cdd0c01f924c97c0.json} | 4 +- ...5d3a0eed793f7b64ad3ed035f20e896371721.json | 31 ----- ...bd2225e57ae8d42042ac107b35a36c522f0d8.json | 34 ++++++ ...21c9bacd8badc624de7664b82dba97d6c9b9.json} | 4 +- ...dd13a572f665e0a5c0edba8076438ba9f044c.json | 15 +++ ...6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json | 15 --- ...ddab6569d897e748d39abca9538872d986ad.json} | 4 +- ...d1f5e03924dbf6dd1e824d4aca837e604b7e9.json | 44 ------- ...99bf19b587a16ad70a671b0de48fd608bf31c.json | 23 ++++ ...6d19f7ba92c40566e1d098c435da41e95274.json} | 4 +- ...cab02b0656d863dbfce4b5dad7bc72e52b05d.json | 34 ------ ...709dbee508ad6d1cae43e477cf1bef8cb4aa9.json | 23 ---- ...c5e9dc50b533524bf856740215aacb8ff5bd.json} | 4 +- ...e5a6cca1524adfe99b0cb35662746479dcc1.json} | 4 +- ...9d0e2d571533d4d5f683919987b6f8cbb00e0.json | 15 --- ...33acd2066a5e238088b39b982b10770f51479.json | 100 ---------------- ...8997f003e26768c5de663160d89af54a1ee7.json} | 4 +- ...9713f437db492e2075ca69e11e2ef5728ccaa.json | 24 ---- ...19da280e72546298b615b88662fa4a19f2bdf.json | 14 +++ ...9442f610a6c57280615eb8c982a4afb847aff.json | 17 +++ ...d5cfa6c8498e8a58b8ae6fe3a1584401d3b4b.json | 16 +++ ...309c1b45d0134a6c6e15533443353db3bee6.json} | 4 +- ...dc766262b5bdc58be0444e164b1bd9feed02d.json | 38 ++++++ ...c5223c9d5e2e42d89bb456d24c601edc06a05.json | 40 +++++++ ...1885d320c146cd8fad77c107ef12fa38e6c98.json | 40 ------- ...d6542fd54ceac41dd41b995a8409c5ab046c.json} | 4 +- ...ac9ff144a09e68db07e4ca1c1de105765711f.json | 15 +++ ...eb0539849f7fd590712103db7d45d119caca2.json | 15 +++ ...9411ba30ac67080552279d821d66b1b804db3.json | 14 --- ...2687cfe890fe89c748f836bad6eb0cc0fcf5.json} | 4 +- ...3a61bf579a0dbeb01c545fa61b10a0047297b.json | 26 ++++ ...29b9149fee37c5ef7d69e259ee33cb8ca860.json} | 4 +- ...8e7265828e3ae78e391ef235b700f0adfe5e4.json | 22 ++++ ...7be93b12319c041751bc4138fa64258ecd5c.json} | 4 +- ...5eb148aa6d1dbd69bf3fe48522101a6ea0bcb.json | 23 ---- ...2fe98364aa684180dd6fbf540bb0b68d96a64.json | 15 --- ...4a8e245ecee4866264d38146938595b07f37.json} | 4 +- ...020b1036f002312271eec76ec3d40f77eb71.json} | 4 +- ...8ed14ea4ed363db1501e7268909bc18f2043.json} | 4 +- ...5ff66f2c3b2b83d45360818a8782e56aa3d66.json | 36 ------ ...b50ebc862a133734382bad8fdfa3a93d8b743.json | 29 ----- ...5371292d0bfbb23eaa2db163ef6fd1e1374d9.json | 31 +++++ ...2745b90c68ebec5706b106ef69cb61333466e.json | 29 +++++ ...c3cec1dbc3e9af406bb900c3ec52590978bc.json} | 4 +- ...ea2b254fed4b5437ee2968707dffe6e6869d.json} | 4 +- ...f6cb289fdec08f4db49ee40ba67371df1927.json} | 4 +- ...52314178f8ab2aea7efb69ae18fbf40e8994.json} | 4 +- ...f113a19feb73c4cf9876855523499998b99c0.json | 19 --- ...c43ffc5f519d35c90c60f89579f0689906df5.json | 18 +++ ...e4b3d55a0fee71105df880ff4f599844d06f.json} | 4 +- ...7def3a97275b66ad33d214054dc9048ddf584.json | 18 --- ...7fadffc1d3091e8c050835d4f178a328fcc8.json} | 4 +- ...c0a4dbcc7b955132056b5ff10f00cf5844874.json | 19 +++ ...2fd05e27e9df6d760173bba37446988d1b8e.json} | 4 +- ...91e08d28ca6796c0bbba95016d1899cf84fe.json} | 4 +- ...68ba43ba128a00a194dce65015bd9eeb1b42f.json | 17 +++ ...ffd15b636d553794900596528e8fb03ca5aed.json | 17 +++ ...e2d3a6ebb3657862b91e3ece34119f098fc2d.json | 32 ----- ...234c19578572973094b21ddbb3876da6bb95.json} | 4 +- ...0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json | 62 ---------- ...ad4da8f523aa84cd047216d8e3cf69c92106.json} | 4 +- ...10c3b232bbaaa12747f72e3b8c5ad64702a79.json | 32 +++++ ...03782558380c48caf8734cacee350e3841dd.json} | 4 +- ...e40978966fb2ad8b342907580dd17c0a52779.json | 17 +++ ...42fde93ffb610bff173f04f1b80e30754ad5.json} | 4 +- ...9f8bea0033f6727c2582f7f141330a47f440.json} | 4 +- ...35d9a865ecbd87d38008a649e8943fdf8a43.json} | 4 +- ...2186aa86751cb3b4de26a60af1cf987ca636.json} | 4 +- ...2c0214469edff6fc74965f1ec1366f8b46b8e.json | 20 ++++ ...41976a264759c4060c1a38e466ee2052fc17d.json | 15 --- ...86d3521faa0fbbfc7ca810c818ca9720b121.json} | 4 +- ...43d01cff938d3aed44c9bb1d864bfdf69e39a.json | 22 ++++ ...d419667f11d80036cda021ecbf23b0b5f7f42.json | 20 ---- ...a81aef0d77e0a4b02307f59e289c0e61717c5.json | 19 --- ...846983cb3f693c02159ba41c1a875aed7e03d.json | 62 ++++++++++ ...a5aec09b39b9d0367b0c29167f8a6a166d18.json} | 4 +- ...f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json | 16 --- ...1426d60c56b526af5ed2e0c602f881c3bbf0.json} | 4 +- ...8594776d64b8b5a4094fef979e35ab76d5bcd.json | 15 +++ ...cd21d4645563f93afd4428734196c2b212276.json | 17 --- ...0767a2cd4488e670674cd9149f7a332c0198d.json | 22 ---- ...3e804b9c2af8c98ae64ccace8c5035b3c499.json} | 4 +- ...ac1e4ed2a372922e55728de083bffb533e11.json} | 4 +- ...8c88cdb52125daedbade044934761fe2147d.json} | 4 +- ...77313681560f538c6524c54648d6308533744.json | 32 +++++ ...08a01b63ae4aa03c983c3a52c802d585e5a80.json | 15 --- ...43c868c63c853edb5c4f41e48a3cc6378eca9.json | 32 ----- ...3027b4c2ce405d3bcc9821440408a394d7f5.json} | 4 +- ...165108e64cc8cf1a2a5604cb180f6d8ac4fe.json} | 4 +- ...23bb83b9fb0422b396cbd201f02ebce7b921.json} | 4 +- ...9314a2ce0180c20126ad22ddaa20c7c32c2c.json} | 4 +- ...b5a4672ad50a9de92c84d939ac4c69842e355.json | 16 --- ...3b6da86d1e693be03936730c340121167341f.json | 17 --- ...599bf65cbf2fd7c27a2dcad43e41a0f43cba0.json | 26 ++++ ...d023d9cce8cd66ad684475ece209832babd5e.json | 16 +++ ...29d1227a213b6cacb4c30a1f94c56f56cb4af.json | 15 +++ ...fb9c8674344921b98b8b26e4d85994b3d72af.json | 22 ++++ ...2757fe394fa096f09c62faa26f728a69d1ae.json} | 4 +- ...fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json | 26 ---- ...5d005d8760c4809b7aef902155196873da66e.json | 15 --- ...38e1010d7389457b3c97e9b238a3a0291a54e.json | 16 --- ...583a7526ae38ceb4bf80543cfd3fb60492fb9.json | 17 --- ...bcaf4e7f673e9d9827427aa4bbedb3cae8b8f.json | 15 +++ ...ea8e04c2c28771aa50a9e3ebb95c5e428be0e.json | 19 +++ ...92d6496147f6425c2485fc8a8f2739e65f80.json} | 4 +- ...bf0344b237bf1914657d36f44b9d21ae966a6.json | 16 +++ ...cbb33d511e07d80a5acd79bc559abdbda49bc.json | 16 +++ ...4f3cc4f9c96c2a51a157b83fff9b411beeb9.json} | 4 +- ...dae905acac53b46eeaeb059d23e48a71df3b4.json | 22 ---- ...218d106d5852fa6f65aca22e12b3a4b7f9c2.json} | 4 +- 201 files changed, 1715 insertions(+), 1715 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3.json create mode 100644 core/lib/dal/.sqlx/query-0124bad591f7ed156000c515cb0772b3002289a05ab23b7dafde2af10c7d88da.json create mode 100644 core/lib/dal/.sqlx/query-0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0.json delete mode 100644 core/lib/dal/.sqlx/query-046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b.json create mode 100644 core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json create mode 100644 core/lib/dal/.sqlx/query-05891e039a54b70ac406a63b73b6f5521c888c5bc9050333dd18a88c098b2d12.json delete mode 100644 core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json rename core/lib/dal/.sqlx/{query-2b1aa207a058f66265acf2c21b8ed5d8007789c0fc1eab948f6d7339dfb69147.json => query-08740b96c883fce75b6238bb6bdc95254f94dddb4ed84a675c2655421758b049.json} (80%) create mode 100644 core/lib/dal/.sqlx/query-0a0dd4a98128c2ef60d385f5c0674ddd4f2422089604768b19e0b6d948c0cf1b.json delete mode 100644 core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json rename core/lib/dal/.sqlx/{query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json => query-0bae50601f2dc3ea7ba9ab6b9f508275cef018c7060c49b9f7a387a14f6a5be8.json} (80%) delete mode 100644 core/lib/dal/.sqlx/query-1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76.json create mode 100644 core/lib/dal/.sqlx/query-150d0219cabbabcc9d0e97e3fd94032f3d900b74e62dfdec0c9f61bf2bacb595.json create mode 100644 core/lib/dal/.sqlx/query-1823e1ac602ce4ba1db06543af9cb2685cda1ae7ecca83062ede7320c3b4a427.json create mode 100644 core/lib/dal/.sqlx/query-18d507f6c9fe051da9899e2e4346e94d0b4f902c84e31fca606773cf4c9d17fc.json create mode 100644 core/lib/dal/.sqlx/query-1a6f4db7fb5ad9b54367c7f9d87ecbc877c8e04d47bf16b9c2e59ba652d2955f.json delete mode 100644 core/lib/dal/.sqlx/query-1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa.json create mode 100644 core/lib/dal/.sqlx/query-1ed6c99dcaa47f1525499362e316a12866726e1f3ca9cf9d89f61c4e97373361.json delete mode 100644 core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json delete mode 100644 core/lib/dal/.sqlx/query-2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349.json rename core/lib/dal/.sqlx/{query-3490fe0b778a03c73111bf8cbf426b0b3185a231bbf0b8b132a1a95bc157e827.json => query-21acded689c24b653d149ff169f53b54a1965e100e0949006212f97e6d11a18b.json} (75%) rename core/lib/dal/.sqlx/{query-eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755.json => query-2234d7728d91cefaee792c900448aafe4b1aa2250fc535bfcdff39172551d42b.json} (81%) rename core/lib/dal/.sqlx/{query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json => query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json} (77%) rename core/lib/dal/.sqlx/{query-040eaa878c3473f5edc73b77e572b5ea100f59295cd693d14ee0d5ee089c7981.json => query-229db646c85461858689b81d80f5ddbee82a65c00c5740e9017382c57c279b18.json} (63%) rename core/lib/dal/.sqlx/{query-24722ee4ced7f03e60b1b5ecaaa5234d536b064951a67d826ac49b7a3a095a1a.json => query-24865315c100f495a6e2cefb3b0e84d2fcc5d436e52cc82ed2aba664bde3ee6b.json} (71%) rename core/lib/dal/.sqlx/{query-86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f.json => query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json} (62%) delete mode 100644 core/lib/dal/.sqlx/query-25fb31277591dd7d5d783bd8777f1a855e76b37b6ed36ae612b551f9a6a55633.json rename core/lib/dal/.sqlx/{query-1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8.json => query-2719fa6e4f282535526ef6c3c7c73dcb740d1d2750fe17fb5330e50323b96b49.json} (90%) rename core/lib/dal/.sqlx/{query-69c885498b186f3b7cbb215112ec86783d7da0ec1d008680872f3619cf217923.json => query-28c5a9f99af4d2857346e0df73c9b9086ba42bac53629793a42d67b353c30210.json} (60%) delete mode 100644 core/lib/dal/.sqlx/query-28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b.json create mode 100644 core/lib/dal/.sqlx/query-2a2083fd04ebd006eb0aa4e0e5f62f3339768a85aaff9a509901e9f42b09097b.json delete mode 100644 core/lib/dal/.sqlx/query-2d1e0f2e043c193052c9cc20f9efeb5f094160627bc09db4bda2dda9a8c11c44.json create mode 100644 core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json delete mode 100644 core/lib/dal/.sqlx/query-2d8da95804a7a300ff2b756e6785c0665ee3ea4086ddb91428fa1b6a00760737.json delete mode 100644 core/lib/dal/.sqlx/query-2e5b9ae1b81b0abfe7a962c93b3119a0a60dc9804175b2baf8b45939c74bd583.json rename core/lib/dal/.sqlx/{query-5d341e334cbc2d38efc8a060325677d57040037da0e07cef2c7b7246851a3703.json => query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json} (80%) rename core/lib/dal/.sqlx/{query-2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a.json => query-2fc088b319ff8f197e50bb379b77de8ba36685dde66d1b3fcf549a52c5001d06.json} (80%) rename core/lib/dal/.sqlx/{query-30e5c8710b1611872da06b72ac681aff512b3a9b2587b8e59848345c07dd8f3b.json => query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json} (89%) delete mode 100644 core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json create mode 100644 core/lib/dal/.sqlx/query-32e8bdbf16fdd2b7617d7a909c3eee66178533c3c0f51e2bfa9ff201e17e4014.json create mode 100644 core/lib/dal/.sqlx/query-3353dd735addf184732843151dd47e1ed84929bf75bb814ee2280d241912d38f.json delete mode 100644 core/lib/dal/.sqlx/query-33a78184559d0c0d13469fa799e2c5eeab06ff664141fa542229e7c9a46d4410.json delete mode 100644 core/lib/dal/.sqlx/query-3518a3e93da8115b54426d72aea0800383e6b73a8b48dc3e589a29a4367cbb5a.json rename core/lib/dal/.sqlx/{query-be2c8e525d6867c0d2bd254c73ef1719fd1284af1dbb60ea128550224b52da93.json => query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json} (79%) delete mode 100644 core/lib/dal/.sqlx/query-37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4.json rename core/lib/dal/.sqlx/{query-469db79fcf8d692507317080e0d843620210aff3a9a7aa8a249db50c1c56d927.json => query-37b653ba8a544224ef6fb88e073d1e4657024c66a42e1bbc261c92c9e1c79930.json} (60%) delete mode 100644 core/lib/dal/.sqlx/query-3b4d5009ec22f54cc7d305aa11d96ec397767a063dc21aa3add974cb9b070361.json delete mode 100644 core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json rename core/lib/dal/.sqlx/{query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json => query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json} (79%) rename core/lib/dal/.sqlx/{query-0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a.json => query-400457c97449097837d5e31d4476c32170ea4b47051a100f71a41dd6ed9bb1a9.json} (76%) create mode 100644 core/lib/dal/.sqlx/query-403b0b1c93f973aedb41aad7700d656e2a26752fa71ae936ff2eb821b0734b61.json delete mode 100644 core/lib/dal/.sqlx/query-406e51d7884cdff36632ccf97912484f0a0d76a8097b61ead5f1f20301ce524d.json delete mode 100644 core/lib/dal/.sqlx/query-40c17194a2089a7d3fa6b7923c9e03def94f4bf4674def1893e2232be80f57a0.json rename core/lib/dal/.sqlx/{query-546c729829083b7eba94fea742c162d717ffcf46fdf5d2ce5d32555353b6da6b.json => query-41b2d575151bff136037d6f1d75f36d50db45560befec6418020b01c55d94a98.json} (52%) rename core/lib/dal/.sqlx/{query-8b9e5d525c026de97c0a732b1adc8dc4bd57e32dfefe1017acba9a15fc14b895.json => query-442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae.json} (61%) delete mode 100644 core/lib/dal/.sqlx/query-45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a.json rename core/lib/dal/.sqlx/{query-ada54322a28012b1b761f3631c4cd6ca26aa2fa565fcf208b6985f461c1868f2.json => query-497e1c08e8460be6e838a47dada8375d56609cd6b7f425361acfd0a411dc48bd.json} (77%) rename core/lib/dal/.sqlx/{query-03e00f422f991f8f12aad0083e1c42cfea253a182ca5df143a183cd522ecac33.json => query-499571f0484f4a54541450f935853bb67bd12f2b635af783897f9d2a4f4ead2e.json} (81%) create mode 100644 core/lib/dal/.sqlx/query-4b6fdc5021c536e622373dca8729250962832a0d95d0d82bdd56271d6a07e497.json create mode 100644 core/lib/dal/.sqlx/query-5017fb273e532be3a093d682d916aaa0430eeeb48c19ec3eabe78996baa9b140.json rename core/lib/dal/.sqlx/{query-e073cfdc7a00559994ce04eca15f35d55901fb1e6805f23413ea43e3637540a0.json => query-50687903977b973d72acac96b08880eef9af91f5b4fcaebb7beb3a415d4b231d.json} (85%) delete mode 100644 core/lib/dal/.sqlx/query-5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad.json create mode 100644 core/lib/dal/.sqlx/query-57851c16dce02999bf386e549791b74555863b0cc8281ef2e9c830fa1d38e227.json create mode 100644 core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json rename core/lib/dal/.sqlx/{query-477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33.json => query-6083e429948c139e36cfce2c5ed41edf3965f7ce67341d957ec680a18a0c39ea.json} (57%) delete mode 100644 core/lib/dal/.sqlx/query-6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf.json rename core/lib/dal/.sqlx/{query-718d29517c100ad9d258a7ee90c48449c1c4bed4d0236fcedc177c9478e72262.json => query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json} (57%) delete mode 100644 core/lib/dal/.sqlx/query-66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d.json create mode 100644 core/lib/dal/.sqlx/query-68327709a740a09b198770ec1746bd6ec3a1cd9db6187e99af40ca8fea4c6ace.json rename core/lib/dal/.sqlx/{query-b42fc86726ac40d0ca38640884da192a143f6a7501b7da65bb7df40a0a4ead70.json => query-6da05c9ac3d0f30b856177e28d28aaad910ca46830c470f8fc1d4265f9ed8631.json} (76%) create mode 100644 core/lib/dal/.sqlx/query-6e725f38a8f04ca5516ba3128569973527841e234d8b4635fb337e4b659b7f71.json rename core/lib/dal/.sqlx/{query-6e3a3ef443ce8aab55b10eea55f9c8ff11775885aebaf457075c6825305244e5.json => query-6f4463a2f95fec4f8b511d28566babfb146e9932d4142139a49599c4bdbd659f.json} (68%) create mode 100644 core/lib/dal/.sqlx/query-7204d08f6fb83c83c09eb2942ecb671fa559dfdb29b315b17ae2f86533d247e9.json rename core/lib/dal/.sqlx/{query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json => query-7235e50f9ce4b5c4f6f8325117eaccc7108538405743fe1ad71451d0f1842561.json} (67%) delete mode 100644 core/lib/dal/.sqlx/query-785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4.json rename core/lib/dal/.sqlx/{query-dac29d7eb16782713acb7aa68aaa8b12b67678e983dc2570be28fe9b1e016c28.json => query-7f3ca3f1d0b3414575e71af98b810ff145ac2f388a246b9c7b32193e2d4208dd.json} (62%) rename core/lib/dal/.sqlx/{query-a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0.json => query-868bfdc5d8ee5eab395fa690891751dfd285628a75a35b152bccb3c73e9cc057.json} (83%) rename core/lib/dal/.sqlx/{query-cb0a9f6137fb6bee5d17d644714b3b22ea2cd184932fcd59f5931239c7a78003.json => query-87e968b1ef6b95203b5d1e41ddf73be52d97128642d8528535975f8dea798e6c.json} (85%) rename core/lib/dal/.sqlx/{query-2cba440c2925631655a7f67486a5a8869da8f10738ba77e3d8e048057b0e7b12.json => query-8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09.json} (70%) rename core/lib/dal/.sqlx/{query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json => query-8dd9596258c4e9eeb9178b32ffefaf767e790a54957218de908c7144658b4681.json} (83%) rename core/lib/dal/.sqlx/{query-41c9f45d6eb727aafad0d8c18024cee5c602d275bb812022cc8fdabf0a60e151.json => query-8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d.json} (77%) delete mode 100644 core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json rename core/lib/dal/.sqlx/{query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json => query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json} (66%) create mode 100644 core/lib/dal/.sqlx/query-99c6597a6edfb8736a9f2f5a5d77ca44a1f6cb5e46bf9e5ffe38741bf61c5cc0.json create mode 100644 core/lib/dal/.sqlx/query-9badf9f287fd5f8a11cc855b230d23f83a260adf56000748c4b19752e2948ab5.json create mode 100644 core/lib/dal/.sqlx/query-9f7b35311f0afff1be7dc489348ec87cf78f40429222b3163f3c5235c87e007f.json create mode 100644 core/lib/dal/.sqlx/query-a329c468b25d6d5533766b8ad3d0c8fdca5c5731d490c1ac3ed56266b1d5a8a5.json rename core/lib/dal/.sqlx/{query-6621de90a024cc85946f17948e5c171cd0e4d38bd6e9cfec58b2d7f53a3204e1.json => query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json} (97%) delete mode 100644 core/lib/dal/.sqlx/query-a4fcd075b68467bb119e49e6b20a69138206dfeb41f3daff4a3eef1de0bed4e4.json create mode 100644 core/lib/dal/.sqlx/query-a65364d10a20420211022dc8234d0a586ce73d09ee28887cb1ad1c0392250629.json create mode 100644 core/lib/dal/.sqlx/query-a8fcb4d43b702d561a573a30790372281e8502002ff9fec82294ea6b972447d8.json rename core/lib/dal/.sqlx/{query-e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9.json => query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json} (53%) delete mode 100644 core/lib/dal/.sqlx/query-acfd5a60fda5782bddb8cba033a5d3a0eed793f7b64ad3ed035f20e896371721.json create mode 100644 core/lib/dal/.sqlx/query-ad8cbc084ef8bc5995031c807bbbd2225e57ae8d42042ac107b35a36c522f0d8.json rename core/lib/dal/.sqlx/{query-a1f4334b6bc1642c1bc0ff4ffa34470914b52e2d714e9deb903d62a5d66f0e04.json => query-afe66b4684f2ab1187b49f64f16f21c9bacd8badc624de7664b82dba97d6c9b9.json} (86%) create mode 100644 core/lib/dal/.sqlx/query-b1486215ce220d6d2faa7690a5cdd13a572f665e0a5c0edba8076438ba9f044c.json delete mode 100644 core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json rename core/lib/dal/.sqlx/{query-ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225.json => query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json} (79%) delete mode 100644 core/lib/dal/.sqlx/query-b47a2961dc2f1347864682c572dd1f5e03924dbf6dd1e824d4aca837e604b7e9.json create mode 100644 core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json rename core/lib/dal/.sqlx/{query-139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1.json => query-bba037e1fcffc4415afe3016ff266d19f7ba92c40566e1d098c435da41e95274.json} (52%) delete mode 100644 core/lib/dal/.sqlx/query-bf481c2b498420f80765b837059cab02b0656d863dbfce4b5dad7bc72e52b05d.json delete mode 100644 core/lib/dal/.sqlx/query-bfc84bcf0985446b337467dd1da709dbee508ad6d1cae43e477cf1bef8cb4aa9.json rename core/lib/dal/.sqlx/{query-c02f404ce9b0f92b8052ef6f3eaabda70cb9c56ae3e30dc0a8257e43d6714155.json => query-c013947660883612b83bfd207c26c5e9dc50b533524bf856740215aacb8ff5bd.json} (83%) rename core/lib/dal/.sqlx/{query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json => query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json} (52%) delete mode 100644 core/lib/dal/.sqlx/query-c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0.json delete mode 100644 core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json rename core/lib/dal/.sqlx/{query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json => query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json} (78%) delete mode 100644 core/lib/dal/.sqlx/query-c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa.json create mode 100644 core/lib/dal/.sqlx/query-c5dfe49f8042f773ced96f8363819da280e72546298b615b88662fa4a19f2bdf.json create mode 100644 core/lib/dal/.sqlx/query-c69d19005dd28bd0abfc84e29fd9442f610a6c57280615eb8c982a4afb847aff.json create mode 100644 core/lib/dal/.sqlx/query-c8dd57fb86247e67896485c6295d5cfa6c8498e8a58b8ae6fe3a1584401d3b4b.json rename core/lib/dal/.sqlx/{query-6849be0788a509e4e68acc2da4bfadbcfc343374fad173df0cc8db38419a9726.json => query-c9742c7f58c5c0f676cee0d0d3a9309c1b45d0134a6c6e15533443353db3bee6.json} (63%) create mode 100644 core/lib/dal/.sqlx/query-c9a842d04e8b225e43f07f76541dc766262b5bdc58be0444e164b1bd9feed02d.json create mode 100644 core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json delete mode 100644 core/lib/dal/.sqlx/query-cf8cff1e6d277088519ef7dfbdb1885d320c146cd8fad77c107ef12fa38e6c98.json rename core/lib/dal/.sqlx/{query-0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261.json => query-cff500ffe0b6586dd96eb2d3620dd6542fd54ceac41dd41b995a8409c5ab046c.json} (55%) create mode 100644 core/lib/dal/.sqlx/query-d0db89030ed6979ecba6786aef1ac9ff144a09e68db07e4ca1c1de105765711f.json create mode 100644 core/lib/dal/.sqlx/query-d1490262c7a2a583928a611ae69eb0539849f7fd590712103db7d45d119caca2.json delete mode 100644 core/lib/dal/.sqlx/query-d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3.json rename core/lib/dal/.sqlx/{query-ba2343a38e37d104786f9276d91f67d2ef1428c61ae84003c9b52b03204d1f0a.json => query-d43ae07cf1539826363573d121392687cfe890fe89c748f836bad6eb0cc0fcf5.json} (70%) create mode 100644 core/lib/dal/.sqlx/query-d47574939bdfb157a621f86a3ea3a61bf579a0dbeb01c545fa61b10a0047297b.json rename core/lib/dal/.sqlx/{query-0fef49a649d20c9fd263c1dfa40daa9b94d398c635c37746736e98f1f18fcca7.json => query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json} (89%) create mode 100644 core/lib/dal/.sqlx/query-ddfb7ba0592f1e6714a4496cee18e7265828e3ae78e391ef235b700f0adfe5e4.json rename core/lib/dal/.sqlx/{query-5f7034d22251a893249208c5ff8fa5c8bf46bc0cea4ac2b25ecde236c30ae32d.json => query-de784f549ceda62e37459519b52e7be93b12319c041751bc4138fa64258ecd5c.json} (87%) delete mode 100644 core/lib/dal/.sqlx/query-e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb.json delete mode 100644 core/lib/dal/.sqlx/query-ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64.json rename core/lib/dal/.sqlx/{query-0f1856e55a370280a078d048f09e2d457914c737660b37e9f66b576bbc9a7904.json => query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json} (91%) rename core/lib/dal/.sqlx/{query-cea9fe027a6a0ada827f23b48ac32432295b2f7ee40bf13522a6edbd236f1970.json => query-ec2942fecedb7ee47279bf0418eb020b1036f002312271eec76ec3d40f77eb71.json} (86%) rename core/lib/dal/.sqlx/{query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json => query-ee50258050e7a08be67335cddf258ed14ea4ed363db1501e7268909bc18f2043.json} (78%) delete mode 100644 core/lib/dal/.sqlx/query-f012d0922265269746396dac8f25ff66f2c3b2b83d45360818a8782e56aa3d66.json delete mode 100644 core/lib/dal/.sqlx/query-f3a049c7eb0d8903737f02fa154b50ebc862a133734382bad8fdfa3a93d8b743.json create mode 100644 core/lib/dal/.sqlx/query-f90a87a0c8a3ad95d722fbcf1b05371292d0bfbb23eaa2db163ef6fd1e1374d9.json create mode 100644 core/lib/dal/.sqlx/query-fce6fff384875df689dc422153a2745b90c68ebec5706b106ef69cb61333466e.json rename core/lib/dal/.sqlx/{query-0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521.json => query-fd4b42f90ffe24f76a35a005460cc3cec1dbc3e9af406bb900c3ec52590978bc.json} (73%) rename core/lib/dal/.sqlx/{query-99d9ee2a0d0450acefa0d9b6c031e30606fddf6631c859ab03819ec476bcf005.json => query-fd8aaef58e1b9bf4f389fb2943adea2b254fed4b5437ee2968707dffe6e6869d.json} (75%) rename core/lib/dal/.sqlx/{query-9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325.json => query-febd2b039a686631e3c943882864f6cb289fdec08f4db49ee40ba67371df1927.json} (59%) rename prover/crates/lib/prover_dal/.sqlx/{query-41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d.json => query-095c459e05aa4c7fad0d1fa74b2d52314178f8ab2aea7efb69ae18fbf40e8994.json} (80%) delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-1080b95755b3047747a9fabc7c7c43ffc5f519d35c90c60f89579f0689906df5.json rename prover/crates/lib/prover_dal/.sqlx/{query-eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96.json => query-128b9be7a480a1ef132e5d477a43e4b3d55a0fee71105df880ff4f599844d06f.json} (81%) delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584.json rename prover/crates/lib/prover_dal/.sqlx/{query-d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def.json => query-2d11a834e177596113b5ffd634067fadffc1d3091e8c050835d4f178a328fcc8.json} (82%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-2f194183f0140eee0dd801b2087c0a4dbcc7b955132056b5ff10f00cf5844874.json rename prover/crates/lib/prover_dal/.sqlx/{query-102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8.json => query-30128c0642524295ab05c37c60db2fd05e27e9df6d760173bba37446988d1b8e.json} (84%) rename prover/crates/lib/prover_dal/.sqlx/{query-9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e.json => query-332f1363f19160f9a7708635fd1691e08d28ca6796c0bbba95016d1899cf84fe.json} (84%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-340a84063719f9b837a61cbc63368ba43ba128a00a194dce65015bd9eeb1b42f.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-37fa629a87113f13c89ce5c1a8fffd15b636d553794900596528e8fb03ca5aed.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json rename prover/crates/lib/prover_dal/.sqlx/{query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json => query-3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95.json} (79%) delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json rename prover/crates/lib/prover_dal/.sqlx/{query-216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a.json => query-3fead9a82ea277785a9ee5c075a2ad4da8f523aa84cd047216d8e3cf69c92106.json} (82%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-3ffc042b13c185ca6963fcb2d4d10c3b232bbaaa12747f72e3b8c5ad64702a79.json rename prover/crates/lib/prover_dal/.sqlx/{query-bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a.json => query-4580503b825352de7691478a5de103782558380c48caf8734cacee350e3841dd.json} (82%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-48b57a279bfff34d44d1f5a6501e40978966fb2ad8b342907580dd17c0a52779.json rename prover/crates/lib/prover_dal/.sqlx/{query-d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50.json => query-54cc92f95c07effcb08fa0b174c742fde93ffb610bff173f04f1b80e30754ad5.json} (82%) rename prover/crates/lib/prover_dal/.sqlx/{query-2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9.json => query-6b7cf7ae3c66c46f4ecec2b0710f9f8bea0033f6727c2582f7f141330a47f440.json} (82%) rename prover/crates/lib/prover_dal/.sqlx/{query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json => query-6c37c8a0a921408e3b812adf77d835d9a865ecbd87d38008a649e8943fdf8a43.json} (92%) rename prover/crates/lib/prover_dal/.sqlx/{query-2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7.json => query-705e3880df382c3c25f41630d83f2186aa86751cb3b4de26a60af1cf987ca636.json} (92%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-7238cfe04ba59967fe5589665ad2c0214469edff6fc74965f1ec1366f8b46b8e.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json rename prover/crates/lib/prover_dal/.sqlx/{query-8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123.json => query-7493571411ee2cb4f7b09bcfc4eb86d3521faa0fbbfc7ca810c818ca9720b121.json} (77%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-749d18c0fdae16ce0ed4e3c23e543d01cff938d3aed44c9bb1d864bfdf69e39a.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-7ab760de174d37c04373cf48489846983cb3f693c02159ba41c1a875aed7e03d.json rename prover/crates/lib/prover_dal/.sqlx/{query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json => query-866bffdc527c079d128c1d21595ca5aec09b39b9d0367b0c29167f8a6a166d18.json} (83%) delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json rename prover/crates/lib/prover_dal/.sqlx/{query-28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19.json => query-8c5aba6ce584c1671f2d65fb47701426d60c56b526af5ed2e0c602f881c3bbf0.json} (52%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-926cddf712322b476064a6efb2a8594776d64b8b5a4094fef979e35ab76d5bcd.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d.json rename prover/crates/lib/prover_dal/.sqlx/{query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json => query-9f5f6d6da7dbb7292b7fe60283993e804b9c2af8c98ae64ccace8c5035b3c499.json} (80%) rename prover/crates/lib/prover_dal/.sqlx/{query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json => query-a4407ac701423bc91505af3e7250ac1e4ed2a372922e55728de083bffb533e11.json} (60%) rename prover/crates/lib/prover_dal/.sqlx/{query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json => query-a6848b0c4cb256ca1d79d83dc5cb8c88cdb52125daedbade044934761fe2147d.json} (83%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-a6eb7a1f1aa2f6f5d90fbe3b8c777313681560f538c6524c54648d6308533744.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json rename prover/crates/lib/prover_dal/.sqlx/{query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json => query-b9aaf5fe4d0261f7b05c8601a96b3027b4c2ce405d3bcc9821440408a394d7f5.json} (73%) rename prover/crates/lib/prover_dal/.sqlx/{query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json => query-c01337f381828818b2b23a7fcc3d165108e64cc8cf1a2a5604cb180f6d8ac4fe.json} (57%) rename prover/crates/lib/prover_dal/.sqlx/{query-7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd.json => query-c5569d55d77296b8c7180c95682423bb83b9fb0422b396cbd201f02ebce7b921.json} (93%) rename prover/crates/lib/prover_dal/.sqlx/{query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json => query-c6d02dc9cb9908a57c79729c759b9314a2ce0180c20126ad22ddaa20c7c32c2c.json} (66%) delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-cb453f0677b92539747e175e796599bf65cbf2fd7c27a2dcad43e41a0f43cba0.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-cebbd80998bf2be10c735f6c414d023d9cce8cd66ad684475ece209832babd5e.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-e2aceb9b86e74a3d119d383dcc729d1227a213b6cacb4c30a1f94c56f56cb4af.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-e65d9d8389b60f48468561984f0fb9c8674344921b98b8b26e4d85994b3d72af.json rename prover/crates/lib/prover_dal/.sqlx/{query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json => query-e6ddecc79d55bf5bd3e348a735b02757fe394fa096f09c62faa26f728a69d1ae.json} (79%) delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-f05ea1f0ee0be1ac50c35f5dbf4bcaf4e7f673e9d9827427aa4bbedb3cae8b8f.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-f294a1c32ffb957c901dcdfa942ea8e04c2c28771aa50a9e3ebb95c5e428be0e.json rename prover/crates/lib/prover_dal/.sqlx/{query-8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b.json => query-f2ed1acf57927528cf978ad759ac92d6496147f6425c2485fc8a8f2739e65f80.json} (81%) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-f3280a144a9aea48dae9b2914f0bf0344b237bf1914657d36f44b9d21ae966a6.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-f68afde603675b0853547c61a74cbb33d511e07d80a5acd79bc559abdbda49bc.json rename prover/crates/lib/prover_dal/.sqlx/{query-0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036.json => query-f70306e92c2a2d69e0e75eb0cf614f3cc4f9c96c2a51a157b83fff9b411beeb9.json} (80%) delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4.json rename prover/crates/lib/prover_dal/.sqlx/{query-28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388.json => query-fd29394931eed5b99d2fb4fac907218d106d5852fa6f65aca22e12b3a4b7f9c2.json} (92%) diff --git a/core/lib/dal/.sqlx/query-00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3.json b/core/lib/dal/.sqlx/query-00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3.json deleted file mode 100644 index d83713192cb..00000000000 --- a/core/lib/dal/.sqlx/query-00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "00c0389f4cde049078885cdf05bdb7dbe0bb23c4fc87a78be2d01b77da2ecbd3" -} diff --git a/core/lib/dal/.sqlx/query-0124bad591f7ed156000c515cb0772b3002289a05ab23b7dafde2af10c7d88da.json b/core/lib/dal/.sqlx/query-0124bad591f7ed156000c515cb0772b3002289a05ab23b7dafde2af10c7d88da.json new file mode 100644 index 00000000000..5cd56fee6b7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0124bad591f7ed156000c515cb0772b3002289a05ab23b7dafde2af10c7d88da.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n base_token_ratios (\n numerator, denominator, ratio_timestamp, created_at, updated_at\n )\n VALUES\n ($1, $2, $3, NOW(), NOW())\n RETURNING\n id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Numeric", + "Numeric", + "Timestamp" + ] + }, + "nullable": [ + false + ] + }, + "hash": "0124bad591f7ed156000c515cb0772b3002289a05ab23b7dafde2af10c7d88da" +} diff --git a/core/lib/dal/.sqlx/query-0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0.json b/core/lib/dal/.sqlx/query-0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0.json new file mode 100644 index 00000000000..5693bdf987e --- /dev/null +++ b/core/lib/dal/.sqlx/query-0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0.json @@ -0,0 +1,36 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n sl AS (\n SELECT\n (\n SELECT\n ARRAY[hashed_key, value] AS kv\n FROM\n storage_logs\n WHERE\n storage_logs.miniblock_number = $1\n AND storage_logs.hashed_key >= u.start_key\n AND storage_logs.hashed_key <= u.end_key\n ORDER BY\n storage_logs.hashed_key\n LIMIT\n 1\n )\n FROM\n UNNEST($2::bytea [], $3::bytea []) AS u (start_key, end_key)\n )\n \n SELECT\n sl.kv[1] AS \"hashed_key?\",\n sl.kv[2] AS \"value?\",\n initial_writes.index\n FROM\n sl\n LEFT OUTER JOIN initial_writes ON initial_writes.hashed_key = sl.kv[1]\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key?", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "value?", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "index", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "ByteaArray", + "ByteaArray" + ] + }, + "nullable": [ + null, + null, + true + ] + }, + "hash": "0429f2fa683bdff6fc1ff5069de69d57dbfda4be1f70232afffca82a895d43e0" +} diff --git a/core/lib/dal/.sqlx/query-046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b.json b/core/lib/dal/.sqlx/query-046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b.json deleted file mode 100644 index 950d72a3e23..00000000000 --- a/core/lib/dal/.sqlx/query-046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n soft AS (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n TYPE = 'Soft'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n ),\n hard AS (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n TYPE = 'Hard'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n )\n SELECT\n soft.pruned_l1_batch AS last_soft_pruned_l1_batch,\n soft.pruned_miniblock AS last_soft_pruned_miniblock,\n hard.pruned_l1_batch AS last_hard_pruned_l1_batch,\n hard.pruned_miniblock AS last_hard_pruned_miniblock\n FROM\n soft\n FULL JOIN hard ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_soft_pruned_l1_batch", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "last_soft_pruned_miniblock", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "last_hard_pruned_l1_batch", - "type_info": "Int8" - }, - { - "ordinal": 3, - "name": "last_hard_pruned_miniblock", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true, - true, - true, - true - ] - }, - "hash": "046590a310cc7bbfa421d0d8e20c3bdae18b88c4c223e374df9a997c271b091b" -} diff --git a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json b/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json new file mode 100644 index 00000000000..8c41c0ab976 --- /dev/null +++ b/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW())\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Bytea", + "Bytea", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016" +} diff --git a/core/lib/dal/.sqlx/query-05891e039a54b70ac406a63b73b6f5521c888c5bc9050333dd18a88c098b2d12.json b/core/lib/dal/.sqlx/query-05891e039a54b70ac406a63b73b6f5521c888c5bc9050333dd18a88c098b2d12.json new file mode 100644 index 00000000000..735d04d2053 --- /dev/null +++ b/core/lib/dal/.sqlx/query-05891e039a54b70ac406a63b73b6f5521c888c5bc9050333dd18a88c098b2d12.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n new_logs AS MATERIALIZED (\n SELECT DISTINCT\n ON (hashed_key)\n hashed_key,\n miniblock_number,\n operation_number\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ORDER BY\n hashed_key,\n miniblock_number DESC,\n operation_number DESC\n )\n \n DELETE FROM storage_logs USING new_logs\n WHERE\n storage_logs.hashed_key = new_logs.hashed_key\n AND storage_logs.miniblock_number <= $2\n AND (storage_logs.miniblock_number, storage_logs.operation_number)\n < (new_logs.miniblock_number, new_logs.operation_number)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "05891e039a54b70ac406a63b73b6f5521c888c5bc9050333dd18a88c098b2d12" +} diff --git a/core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json b/core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json deleted file mode 100644 index f3c85b9b43d..00000000000 --- a/core/lib/dal/.sqlx/query-05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n AND status = 'unpicked'\n )\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Interval" - ] - }, - "nullable": [ - false - ] - }, - "hash": "05c2a77d9f65d435e2df63a300850e42abbaf365a1b041d0e7a809796ef0fe63" -} diff --git a/core/lib/dal/.sqlx/query-2b1aa207a058f66265acf2c21b8ed5d8007789c0fc1eab948f6d7339dfb69147.json b/core/lib/dal/.sqlx/query-08740b96c883fce75b6238bb6bdc95254f94dddb4ed84a675c2655421758b049.json similarity index 80% rename from core/lib/dal/.sqlx/query-2b1aa207a058f66265acf2c21b8ed5d8007789c0fc1eab948f6d7339dfb69147.json rename to core/lib/dal/.sqlx/query-08740b96c883fce75b6238bb6bdc95254f94dddb4ed84a675c2655421758b049.json index 96b48892516..51392a6ed8f 100644 --- a/core/lib/dal/.sqlx/query-2b1aa207a058f66265acf2c21b8ed5d8007789c0fc1eab948f6d7339dfb69147.json +++ b/core/lib/dal/.sqlx/query-08740b96c883fce75b6238bb6bdc95254f94dddb4ed84a675c2655421758b049.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n DELETE FROM transactions\n WHERE\n miniblock_number IS NULL\n AND received_at < NOW() - $1::INTERVAL\n AND is_priority = FALSE\n AND error IS NULL\n RETURNING\n hash\n ", + "query": "\n DELETE FROM transactions\n WHERE\n miniblock_number IS NULL\n AND received_at < NOW() - $1::INTERVAL\n AND is_priority = FALSE\n AND error IS NULL\n RETURNING\n hash\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "2b1aa207a058f66265acf2c21b8ed5d8007789c0fc1eab948f6d7339dfb69147" + "hash": "08740b96c883fce75b6238bb6bdc95254f94dddb4ed84a675c2655421758b049" } diff --git a/core/lib/dal/.sqlx/query-0a0dd4a98128c2ef60d385f5c0674ddd4f2422089604768b19e0b6d948c0cf1b.json b/core/lib/dal/.sqlx/query-0a0dd4a98128c2ef60d385f5c0674ddd4f2422089604768b19e0b6d948c0cf1b.json new file mode 100644 index 00000000000..0d4d8c0dc74 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0a0dd4a98128c2ef60d385f5c0674ddd4f2422089604768b19e0b6d948c0cf1b.json @@ -0,0 +1,100 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n events_select AS (\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n miniblock_number > $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n )\n \n SELECT\n miniblocks.hash AS \"block_hash?\",\n address AS \"address!\",\n topic1 AS \"topic1!\",\n topic2 AS \"topic2!\",\n topic3 AS \"topic3!\",\n topic4 AS \"topic4!\",\n value AS \"value!\",\n miniblock_number AS \"miniblock_number!\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n tx_hash AS \"tx_hash!\",\n tx_index_in_block AS \"tx_index_in_block!\",\n event_index_in_block AS \"event_index_in_block!\",\n event_index_in_tx AS \"event_index_in_tx!\",\n miniblocks.timestamp AS \"block_timestamp\"\n FROM\n events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "block_hash?", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "address!", + "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "topic1!", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "topic2!", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "topic3!", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "topic4!", + "type_info": "Bytea" + }, + { + "ordinal": 6, + "name": "value!", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "miniblock_number!", + "type_info": "Int8" + }, + { + "ordinal": 8, + "name": "l1_batch_number?", + "type_info": "Int8" + }, + { + "ordinal": 9, + "name": "tx_hash!", + "type_info": "Bytea" + }, + { + "ordinal": 10, + "name": "tx_index_in_block!", + "type_info": "Int4" + }, + { + "ordinal": 11, + "name": "event_index_in_block!", + "type_info": "Int4" + }, + { + "ordinal": 12, + "name": "event_index_in_tx!", + "type_info": "Int4" + }, + { + "ordinal": 13, + "name": "block_timestamp", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + false, + true, + false, + false, + false, + false, + false + ] + }, + "hash": "0a0dd4a98128c2ef60d385f5c0674ddd4f2422089604768b19e0b6d948c0cf1b" +} diff --git a/core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json b/core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json deleted file mode 100644 index 822a6967f6d..00000000000 --- a/core/lib/dal/.sqlx/query-0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ON CONFLICT DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Timestamp" - ] - }, - "nullable": [] - }, - "hash": "0b5d5efeac95d429cf6a5be22153897edf8c868094ad029e2e8fcf286d44fd55" -} diff --git a/core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json b/core/lib/dal/.sqlx/query-0bae50601f2dc3ea7ba9ab6b9f508275cef018c7060c49b9f7a387a14f6a5be8.json similarity index 80% rename from core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json rename to core/lib/dal/.sqlx/query-0bae50601f2dc3ea7ba9ab6b9f508275cef018c7060c49b9f7a387a14f6a5be8.json index 36e56da404e..ca0578f338f 100644 --- a/core/lib/dal/.sqlx/query-dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d.json +++ b/core/lib/dal/.sqlx/query-0bae50601f2dc3ea7ba9ab6b9f508275cef018c7060c49b9f7a387a14f6a5be8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.hash AS block_hash,\n miniblocks.number,\n miniblocks.l1_batch_number,\n miniblocks.timestamp,\n miniblocks.base_fee_per_gas,\n miniblocks.gas_limit AS \"block_gas_limit?\",\n miniblocks.logs_bloom,\n prev_miniblock.hash AS \"parent_hash?\",\n l1_batches.timestamp AS \"l1_batch_timestamp?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"refunded_gas?\",\n transactions.hash AS \"tx_hash?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number = $1\n ORDER BY\n transactions.index_in_block ASC\n ", + "query": "\n SELECT\n miniblocks.hash AS block_hash,\n miniblocks.number,\n miniblocks.l1_batch_number,\n miniblocks.timestamp,\n miniblocks.base_fee_per_gas,\n miniblocks.gas_limit AS \"block_gas_limit?\",\n miniblocks.logs_bloom,\n prev_miniblock.hash AS \"parent_hash?\",\n l1_batches.timestamp AS \"l1_batch_timestamp?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"refunded_gas?\",\n transactions.hash AS \"tx_hash?\"\n FROM\n miniblocks\n LEFT JOIN\n miniblocks prev_miniblock\n ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number = $1\n ORDER BY\n transactions.index_in_block ASC\n ", "describe": { "columns": [ { @@ -84,5 +84,5 @@ false ] }, - "hash": "dcfc3c0df11b923116af194a26c122dbdbf650edfec6d9c18f96c3bd0064d18d" + "hash": "0bae50601f2dc3ea7ba9ab6b9f508275cef018c7060c49b9f7a387a14f6a5be8" } diff --git a/core/lib/dal/.sqlx/query-1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76.json b/core/lib/dal/.sqlx/query-1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76.json deleted file mode 100644 index 13e4cdb9431..00000000000 --- a/core/lib/dal/.sqlx/query-1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76.json +++ /dev/null @@ -1,112 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "root_hash?", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "commit_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 6, - "name": "committed_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 7, - "name": "prove_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 8, - "name": "proven_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 9, - "name": "execute_tx_hash?", - "type_info": "Text" - }, - { - "ordinal": 10, - "name": "executed_at?", - "type_info": "Timestamp" - }, - { - "ordinal": 11, - "name": "l1_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 12, - "name": "l2_fair_gas_price", - "type_info": "Int8" - }, - { - "ordinal": 13, - "name": "fair_pubdata_price", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 15, - "name": "default_aa_code_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - true, - false, - true, - false, - true, - false, - true, - false, - false, - true, - true, - true - ] - }, - "hash": "1074d0a2e4a4afb9a92f3822e133db7a71aca15698bafba051a8d9a91a4dbc76" -} diff --git a/core/lib/dal/.sqlx/query-150d0219cabbabcc9d0e97e3fd94032f3d900b74e62dfdec0c9f61bf2bacb595.json b/core/lib/dal/.sqlx/query-150d0219cabbabcc9d0e97e3fd94032f3d900b74e62dfdec0c9f61bf2bacb595.json new file mode 100644 index 00000000000..e3a8399ca28 --- /dev/null +++ b/core/lib/dal/.sqlx/query-150d0219cabbabcc9d0e97e3fd94032f3d900b74e62dfdec0c9f61bf2bacb595.json @@ -0,0 +1,21 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n snapshot_recovery (\n l1_batch_number,\n l1_batch_timestamp,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_timestamp,\n miniblock_hash,\n protocol_version,\n storage_logs_chunks_processed,\n updated_at,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int8", + "Int8", + "Bytea", + "Int4", + "BoolArray" + ] + }, + "nullable": [] + }, + "hash": "150d0219cabbabcc9d0e97e3fd94032f3d900b74e62dfdec0c9f61bf2bacb595" +} diff --git a/core/lib/dal/.sqlx/query-1823e1ac602ce4ba1db06543af9cb2685cda1ae7ecca83062ede7320c3b4a427.json b/core/lib/dal/.sqlx/query-1823e1ac602ce4ba1db06543af9cb2685cda1ae7ecca83062ede7320c3b4a427.json new file mode 100644 index 00000000000..1e20a9151b9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-1823e1ac602ce4ba1db06543af9cb2685cda1ae7ecca83062ede7320c3b4a427.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n contracts_verification_info (address, verification_info)\n VALUES\n ($1, $2)\n ON CONFLICT (address) DO\n UPDATE\n SET\n verification_info = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "1823e1ac602ce4ba1db06543af9cb2685cda1ae7ecca83062ede7320c3b4a427" +} diff --git a/core/lib/dal/.sqlx/query-18d507f6c9fe051da9899e2e4346e94d0b4f902c84e31fca606773cf4c9d17fc.json b/core/lib/dal/.sqlx/query-18d507f6c9fe051da9899e2e4346e94d0b4f902c84e31fca606773cf4c9d17fc.json new file mode 100644 index 00000000000..dc913c57005 --- /dev/null +++ b/core/lib/dal/.sqlx/query-18d507f6c9fe051da9899e2e4346e94d0b4f902c84e31fca606773cf4c9d17fc.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n initial_writes (hashed_key, index, l1_batch_number, created_at, updated_at)\n SELECT\n u.hashed_key,\n u.index,\n $3,\n NOW(),\n NOW()\n FROM\n UNNEST($1::bytea [], $2::bigint []) AS u (hashed_key, index)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int8Array", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "18d507f6c9fe051da9899e2e4346e94d0b4f902c84e31fca606773cf4c9d17fc" +} diff --git a/core/lib/dal/.sqlx/query-1a6f4db7fb5ad9b54367c7f9d87ecbc877c8e04d47bf16b9c2e59ba652d2955f.json b/core/lib/dal/.sqlx/query-1a6f4db7fb5ad9b54367c7f9d87ecbc877c8e04d47bf16b9c2e59ba652d2955f.json new file mode 100644 index 00000000000..a9ec3c77906 --- /dev/null +++ b/core/lib/dal/.sqlx/query-1a6f4db7fb5ad9b54367c7f9d87ecbc877c8e04d47bf16b9c2e59ba652d2955f.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n vm_runner_protective_reads (\n l1_batch_number, created_at, updated_at, processing_started_at\n )\n VALUES\n ($1, NOW(), NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW(),\n processing_started_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "1a6f4db7fb5ad9b54367c7f9d87ecbc877c8e04d47bf16b9c2e59ba652d2955f" +} diff --git a/core/lib/dal/.sqlx/query-1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa.json b/core/lib/dal/.sqlx/query-1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa.json deleted file mode 100644 index f24a28ffdc2..00000000000 --- a/core/lib/dal/.sqlx/query-1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n vm_runner_bwip (l1_batch_number, created_at, updated_at, processing_started_at)\n VALUES\n ($1, NOW(), NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW(),\n processing_started_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "1bbfac481c402bcb3bb888b84146d922fa1fc9c202072fbc04cae1bbf97195aa" -} diff --git a/core/lib/dal/.sqlx/query-1ed6c99dcaa47f1525499362e316a12866726e1f3ca9cf9d89f61c4e97373361.json b/core/lib/dal/.sqlx/query-1ed6c99dcaa47f1525499362e316a12866726e1f3ca9cf9d89f61c4e97373361.json new file mode 100644 index 00000000000..86256d4d0a0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-1ed6c99dcaa47f1525499362e316a12866726e1f3ca9cf9d89f61c4e97373361.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n data_availability (l1_batch_number, blob_id, sent_at, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Timestamp" + ] + }, + "nullable": [] + }, + "hash": "1ed6c99dcaa47f1525499362e316a12866726e1f3ca9cf9d89f61c4e97373361" +} diff --git a/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json deleted file mode 100644 index 3817369ecc1..00000000000 --- a/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n consensus_replica_state (fake_key, global_config, genesis, state)\n VALUES\n (TRUE, $1, $2, $3)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Jsonb", - "Jsonb", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85" -} diff --git a/core/lib/dal/.sqlx/query-2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349.json b/core/lib/dal/.sqlx/query-2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349.json deleted file mode 100644 index e48fddcf617..00000000000 --- a/core/lib/dal/.sqlx/query-2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_proof_generation_details (l1_batch_number, tee_type, status, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ON CONFLICT (l1_batch_number, tee_type) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Text" - ] - }, - "nullable": [] - }, - "hash": "2169cc7dfb074566ceb0bd5754d6e7a755c4bc6c25c7e6caff5fd6142813d349" -} diff --git a/core/lib/dal/.sqlx/query-3490fe0b778a03c73111bf8cbf426b0b3185a231bbf0b8b132a1a95bc157e827.json b/core/lib/dal/.sqlx/query-21acded689c24b653d149ff169f53b54a1965e100e0949006212f97e6d11a18b.json similarity index 75% rename from core/lib/dal/.sqlx/query-3490fe0b778a03c73111bf8cbf426b0b3185a231bbf0b8b132a1a95bc157e827.json rename to core/lib/dal/.sqlx/query-21acded689c24b653d149ff169f53b54a1965e100e0949006212f97e6d11a18b.json index 3275e94936a..b613aa8d71b 100644 --- a/core/lib/dal/.sqlx/query-3490fe0b778a03c73111bf8cbf426b0b3185a231bbf0b8b132a1a95bc157e827.json +++ b/core/lib/dal/.sqlx/query-21acded689c24b653d149ff169f53b54a1965e100e0949006212f97e6d11a18b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n hashed_key,\n l1_batch_number,\n INDEX\n FROM\n initial_writes\n WHERE\n hashed_key = ANY ($1::bytea[])\n ", + "query": "\n SELECT\n hashed_key,\n l1_batch_number,\n index\n FROM\n initial_writes\n WHERE\n hashed_key = ANY($1::bytea [])\n ", "describe": { "columns": [ { @@ -30,5 +30,5 @@ false ] }, - "hash": "3490fe0b778a03c73111bf8cbf426b0b3185a231bbf0b8b132a1a95bc157e827" + "hash": "21acded689c24b653d149ff169f53b54a1965e100e0949006212f97e6d11a18b" } diff --git a/core/lib/dal/.sqlx/query-eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755.json b/core/lib/dal/.sqlx/query-2234d7728d91cefaee792c900448aafe4b1aa2250fc535bfcdff39172551d42b.json similarity index 81% rename from core/lib/dal/.sqlx/query-eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755.json rename to core/lib/dal/.sqlx/query-2234d7728d91cefaee792c900448aafe4b1aa2250fc535bfcdff39172551d42b.json index b9783f771a7..48fa673b2c9 100644 --- a/core/lib/dal/.sqlx/query-eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755.json +++ b/core/lib/dal/.sqlx/query-2234d7728d91cefaee792c900448aafe4b1aa2250fc535bfcdff39172551d42b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $2 -- can't just use equality as NULL != NULL\n AND is_gateway = $3\n AND id > (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $2\n AND is_gateway = $3\n )\n ORDER BY\n id\n LIMIT\n $1\n ", + "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $2 -- can't just use equality as NULL != NULL\n AND is_gateway = $3\n AND id > (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $2\n AND is_gateway = $3\n )\n ORDER BY\n id\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -110,5 +110,5 @@ true ] }, - "hash": "eab36591af61369e36e3dab79025ac6758a0a4e367f93a9bd48ec82c51e09755" + "hash": "2234d7728d91cefaee792c900448aafe4b1aa2250fc535bfcdff39172551d42b" } diff --git a/core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json b/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json similarity index 77% rename from core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json rename to core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json index b797ccb4604..b8d6482ea74 100644 --- a/core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json +++ b/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE processed_events\n SET\n next_block_to_process = $3\n WHERE\n TYPE = $1\n AND chain_id = $2\n ", + "query": "\n UPDATE processed_events\n SET\n next_block_to_process = $3\n WHERE\n type = $1\n AND chain_id = $2\n ", "describe": { "columns": [], "parameters": { @@ -22,5 +22,5 @@ }, "nullable": [] }, - "hash": "c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5" + "hash": "228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a" } diff --git a/core/lib/dal/.sqlx/query-040eaa878c3473f5edc73b77e572b5ea100f59295cd693d14ee0d5ee089c7981.json b/core/lib/dal/.sqlx/query-229db646c85461858689b81d80f5ddbee82a65c00c5740e9017382c57c279b18.json similarity index 63% rename from core/lib/dal/.sqlx/query-040eaa878c3473f5edc73b77e572b5ea100f59295cd693d14ee0d5ee089c7981.json rename to core/lib/dal/.sqlx/query-229db646c85461858689b81d80f5ddbee82a65c00c5740e9017382c57c279b18.json index c0e0c777cc5..3a21023c4da 100644 --- a/core/lib/dal/.sqlx/query-040eaa878c3473f5edc73b77e572b5ea100f59295cd693d14ee0d5ee089c7981.json +++ b/core/lib/dal/.sqlx/query-229db646c85461858689b81d80f5ddbee82a65c00c5740e9017382c57c279b18.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number\n FROM\n snapshots\n WHERE\n NOT (''::TEXT = ANY (storage_logs_filepaths))\n ORDER BY\n l1_batch_number DESC\n ", + "query": "\n SELECT\n l1_batch_number\n FROM\n snapshots\n WHERE\n NOT (''::TEXT = ANY(storage_logs_filepaths))\n ORDER BY\n l1_batch_number DESC\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "040eaa878c3473f5edc73b77e572b5ea100f59295cd693d14ee0d5ee089c7981" + "hash": "229db646c85461858689b81d80f5ddbee82a65c00c5740e9017382c57c279b18" } diff --git a/core/lib/dal/.sqlx/query-24722ee4ced7f03e60b1b5ecaaa5234d536b064951a67d826ac49b7a3a095a1a.json b/core/lib/dal/.sqlx/query-24865315c100f495a6e2cefb3b0e84d2fcc5d436e52cc82ed2aba664bde3ee6b.json similarity index 71% rename from core/lib/dal/.sqlx/query-24722ee4ced7f03e60b1b5ecaaa5234d536b064951a67d826ac49b7a3a095a1a.json rename to core/lib/dal/.sqlx/query-24865315c100f495a6e2cefb3b0e84d2fcc5d436e52cc82ed2aba664bde3ee6b.json index 194f4faedb1..d343cd5a710 100644 --- a/core/lib/dal/.sqlx/query-24722ee4ced7f03e60b1b5ecaaa5234d536b064951a67d826ac49b7a3a095a1a.json +++ b/core/lib/dal/.sqlx/query-24865315c100f495a6e2cefb3b0e84d2fcc5d436e52cc82ed2aba664bde3ee6b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n hashed_key,\n INDEX\n FROM\n initial_writes\n WHERE\n l1_batch_number = $1\n ORDER BY\n INDEX\n ", + "query": "\n SELECT\n hashed_key,\n index\n FROM\n initial_writes\n WHERE\n l1_batch_number = $1\n ORDER BY\n index\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ false ] }, - "hash": "24722ee4ced7f03e60b1b5ecaaa5234d536b064951a67d826ac49b7a3a095a1a" + "hash": "24865315c100f495a6e2cefb3b0e84d2fcc5d436e52cc82ed2aba664bde3ee6b" } diff --git a/core/lib/dal/.sqlx/query-86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f.json b/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json similarity index 62% rename from core/lib/dal/.sqlx/query-86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f.json rename to core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json index 42cf55bd939..9d8cc36189f 100644 --- a/core/lib/dal/.sqlx/query-86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f.json +++ b/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -32,5 +32,5 @@ false ] }, - "hash": "86dbcf93abdd4206d2d62b140cf5dd11537925d02f5a7f2bae23c8dc48204e3f" + "hash": "2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d" } diff --git a/core/lib/dal/.sqlx/query-25fb31277591dd7d5d783bd8777f1a855e76b37b6ed36ae612b551f9a6a55633.json b/core/lib/dal/.sqlx/query-25fb31277591dd7d5d783bd8777f1a855e76b37b6ed36ae612b551f9a6a55633.json deleted file mode 100644 index ee88bcdf39b..00000000000 --- a/core/lib/dal/.sqlx/query-25fb31277591dd7d5d783bd8777f1a855e76b37b6ed36ae612b551f9a6a55633.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW())\n ON CONFLICT DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int8", - "Bytea", - "Bytea", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "25fb31277591dd7d5d783bd8777f1a855e76b37b6ed36ae612b551f9a6a55633" -} diff --git a/core/lib/dal/.sqlx/query-1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8.json b/core/lib/dal/.sqlx/query-2719fa6e4f282535526ef6c3c7c73dcb740d1d2750fe17fb5330e50323b96b49.json similarity index 90% rename from core/lib/dal/.sqlx/query-1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8.json rename to core/lib/dal/.sqlx/query-2719fa6e4f282535526ef6c3c7c73dcb740d1d2750fe17fb5330e50323b96b49.json index 206d2f91e3b..0fcc6b45700 100644 --- a/core/lib/dal/.sqlx/query-1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8.json +++ b/core/lib/dal/.sqlx/query-2719fa6e4f282535526ef6c3c7c73dcb740d1d2750fe17fb5330e50323b96b49.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n NULL::BIGINT AS \"l1_batch_number?\",\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n tx_index_in_l1_batch ASC,\n log_index_in_tx ASC\n ", + "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n NULL::BIGINT AS \"l1_batch_number?\",\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n WHERE\n tx_hash = ANY($1)\n ORDER BY\n tx_index_in_l1_batch ASC,\n log_index_in_tx ASC\n ", "describe": { "columns": [ { @@ -84,5 +84,5 @@ false ] }, - "hash": "1e3c88b41bc02cb6a116fa930ae85b8b60165ed757ae1044e83fdc877d95cbd8" + "hash": "2719fa6e4f282535526ef6c3c7c73dcb740d1d2750fe17fb5330e50323b96b49" } diff --git a/core/lib/dal/.sqlx/query-69c885498b186f3b7cbb215112ec86783d7da0ec1d008680872f3619cf217923.json b/core/lib/dal/.sqlx/query-28c5a9f99af4d2857346e0df73c9b9086ba42bac53629793a42d67b353c30210.json similarity index 60% rename from core/lib/dal/.sqlx/query-69c885498b186f3b7cbb215112ec86783d7da0ec1d008680872f3619cf217923.json rename to core/lib/dal/.sqlx/query-28c5a9f99af4d2857346e0df73c9b9086ba42bac53629793a42d67b353c30210.json index 82575c807fb..93520d4ded3 100644 --- a/core/lib/dal/.sqlx/query-69c885498b186f3b7cbb215112ec86783d7da0ec1d008680872f3619cf217923.json +++ b/core/lib/dal/.sqlx/query-28c5a9f99af4d2857346e0df73c9b9086ba42bac53629793a42d67b353c30210.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n DELETE FROM transactions\n WHERE\n hash = ANY ($1)\n ", + "query": "\n DELETE FROM transactions\n WHERE\n hash = ANY($1)\n ", "describe": { "columns": [], "parameters": { @@ -10,5 +10,5 @@ }, "nullable": [] }, - "hash": "69c885498b186f3b7cbb215112ec86783d7da0ec1d008680872f3619cf217923" + "hash": "28c5a9f99af4d2857346e0df73c9b9086ba42bac53629793a42d67b353c30210" } diff --git a/core/lib/dal/.sqlx/query-28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b.json b/core/lib/dal/.sqlx/query-28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b.json deleted file mode 100644 index c61184d56c7..00000000000 --- a/core/lib/dal/.sqlx/query-28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n upgrade_id,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n miniblock_number,\n index_in_block,\n error,\n execution_info,\n refunded_gas,\n effective_gas_price,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n TRUE,\n data_table.initiator_address,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.data,\n data_table.upgrade_id,\n data_table.contract_address,\n data_table.l1_block_number,\n data_table.value,\n '\\x0000000000000000000000000000000000000000'::bytea,\n '\\x'::bytea,\n data_table.tx_format,\n data_table.l1_tx_mint,\n data_table.l1_tx_refund_recipient,\n $19,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.execution_info,\n data_table.refunded_gas,\n data_table.effective_gas_price,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS hash,\n UNNEST($2::bytea[]) AS initiator_address,\n UNNEST($3::NUMERIC[]) AS gas_limit,\n UNNEST($4::NUMERIC[]) AS max_fee_per_gas,\n UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit,\n UNNEST($6::jsonb[]) AS data,\n UNNEST($7::INT[]) AS upgrade_id,\n UNNEST($8::bytea[]) AS contract_address,\n UNNEST($9::INT[]) AS l1_block_number,\n UNNEST($10::NUMERIC[]) AS value,\n UNNEST($11::INTEGER[]) AS tx_format,\n UNNEST($12::NUMERIC[]) AS l1_tx_mint,\n UNNEST($13::bytea[]) AS l1_tx_refund_recipient,\n UNNEST($14::INT[]) AS index_in_block,\n UNNEST($15::VARCHAR[]) AS error,\n UNNEST($16::jsonb[]) AS execution_info,\n UNNEST($17::BIGINT[]) AS refunded_gas,\n UNNEST($18::NUMERIC[]) AS effective_gas_price\n ) AS data_table\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "NumericArray", - "NumericArray", - "NumericArray", - "JsonbArray", - "Int4Array", - "ByteaArray", - "Int4Array", - "NumericArray", - "Int4Array", - "NumericArray", - "ByteaArray", - "Int4Array", - "VarcharArray", - "JsonbArray", - "Int8Array", - "NumericArray", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b" -} diff --git a/core/lib/dal/.sqlx/query-2a2083fd04ebd006eb0aa4e0e5f62f3339768a85aaff9a509901e9f42b09097b.json b/core/lib/dal/.sqlx/query-2a2083fd04ebd006eb0aa4e0e5f62f3339768a85aaff9a509901e9f42b09097b.json new file mode 100644 index 00000000000..a713616d582 --- /dev/null +++ b/core/lib/dal/.sqlx/query-2a2083fd04ebd006eb0aa4e0e5f62f3339768a85aaff9a509901e9f42b09097b.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND data_availability.blob_id IS NULL\n AND pubdata_input IS NOT NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "pubdata_input", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "2a2083fd04ebd006eb0aa4e0e5f62f3339768a85aaff9a509901e9f42b09097b" +} diff --git a/core/lib/dal/.sqlx/query-2d1e0f2e043c193052c9cc20f9efeb5f094160627bc09db4bda2dda9a8c11c44.json b/core/lib/dal/.sqlx/query-2d1e0f2e043c193052c9cc20f9efeb5f094160627bc09db4bda2dda9a8c11c44.json deleted file mode 100644 index 1d9c276b078..00000000000 --- a/core/lib/dal/.sqlx/query-2d1e0f2e043c193052c9cc20f9efeb5f094160627bc09db4bda2dda9a8c11c44.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n contracts_verification_info (address, verification_info)\n VALUES\n ($1, $2)\n ON CONFLICT (address) DO\n UPDATE\n SET\n verification_info = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Bytea", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "2d1e0f2e043c193052c9cc20f9efeb5f094160627bc09db4bda2dda9a8c11c44" -} diff --git a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json b/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json new file mode 100644 index 00000000000..81ae6c590f9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json @@ -0,0 +1,112 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "root_hash?", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "commit_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "committed_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 7, + "name": "prove_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 8, + "name": "proven_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 9, + "name": "execute_tx_hash?", + "type_info": "Text" + }, + { + "ordinal": 10, + "name": "executed_at?", + "type_info": "Timestamp" + }, + { + "ordinal": 11, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 12, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 13, + "name": "fair_pubdata_price", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 15, + "name": "default_aa_code_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + true, + false, + true, + false, + true, + false, + true, + false, + false, + true, + true, + true + ] + }, + "hash": "2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1" +} diff --git a/core/lib/dal/.sqlx/query-2d8da95804a7a300ff2b756e6785c0665ee3ea4086ddb91428fa1b6a00760737.json b/core/lib/dal/.sqlx/query-2d8da95804a7a300ff2b756e6785c0665ee3ea4086ddb91428fa1b6a00760737.json deleted file mode 100644 index 8b8cef7dd77..00000000000 --- a/core/lib/dal/.sqlx/query-2d8da95804a7a300ff2b756e6785c0665ee3ea4086ddb91428fa1b6a00760737.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT\n ON (hashed_key) hashed_key,\n miniblock_number,\n value\n FROM\n storage_logs\n WHERE\n hashed_key = ANY ($1)\n AND miniblock_number <= $2\n AND miniblock_number <= COALESCE(\n (\n SELECT\n MAX(number)\n FROM\n miniblocks\n ),\n (\n SELECT\n miniblock_number\n FROM\n snapshot_recovery\n )\n )\n ORDER BY\n hashed_key,\n miniblock_number DESC,\n operation_number DESC\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "hashed_key", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "miniblock_number", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "value", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "ByteaArray", - "Int8" - ] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "2d8da95804a7a300ff2b756e6785c0665ee3ea4086ddb91428fa1b6a00760737" -} diff --git a/core/lib/dal/.sqlx/query-2e5b9ae1b81b0abfe7a962c93b3119a0a60dc9804175b2baf8b45939c74bd583.json b/core/lib/dal/.sqlx/query-2e5b9ae1b81b0abfe7a962c93b3119a0a60dc9804175b2baf8b45939c74bd583.json deleted file mode 100644 index 20548776830..00000000000 --- a/core/lib/dal/.sqlx/query-2e5b9ae1b81b0abfe7a962c93b3119a0a60dc9804175b2baf8b45939c74bd583.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n compiler_versions (VERSION, compiler, created_at, updated_at)\n SELECT\n u.version,\n $2,\n NOW(),\n NOW()\n FROM\n UNNEST($1::TEXT[]) AS u (VERSION)\n ON CONFLICT (VERSION, compiler) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "TextArray", - "Text" - ] - }, - "nullable": [] - }, - "hash": "2e5b9ae1b81b0abfe7a962c93b3119a0a60dc9804175b2baf8b45939c74bd583" -} diff --git a/core/lib/dal/.sqlx/query-5d341e334cbc2d38efc8a060325677d57040037da0e07cef2c7b7246851a3703.json b/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json similarity index 80% rename from core/lib/dal/.sqlx/query-5d341e334cbc2d38efc8a060325677d57040037da0e07cef2c7b7246851a3703.json rename to core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json index f0d8b01d57c..1d515edba81 100644 --- a/core/lib/dal/.sqlx/query-5d341e334cbc2d38efc8a060325677d57040037da0e07cef2c7b7246851a3703.json +++ b/core/lib/dal/.sqlx/query-2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE contract_verification_requests\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n id = (\n SELECT\n id\n FROM\n contract_verification_requests\n WHERE\n status = 'queued'\n OR (\n status = 'in_progress'\n AND processing_started_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n created_at\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n id,\n contract_address,\n source_code,\n contract_name,\n zk_compiler_version,\n compiler_version,\n optimization_used,\n optimizer_mode,\n constructor_arguments,\n is_system,\n force_evmla\n ", + "query": "\n UPDATE contract_verification_requests\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n id = (\n SELECT\n id\n FROM\n contract_verification_requests\n WHERE\n status = 'queued'\n OR (\n status = 'in_progress'\n AND processing_started_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n created_at\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n id,\n contract_address,\n source_code,\n contract_name,\n zk_compiler_version,\n compiler_version,\n optimization_used,\n optimizer_mode,\n constructor_arguments,\n is_system,\n force_evmla\n ", "describe": { "columns": [ { @@ -78,5 +78,5 @@ false ] }, - "hash": "5d341e334cbc2d38efc8a060325677d57040037da0e07cef2c7b7246851a3703" + "hash": "2fa2ba4a62f79d780d239409d426b602aa0cf9b0c5b1ef39b7d07d6309454fcd" } diff --git a/core/lib/dal/.sqlx/query-2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a.json b/core/lib/dal/.sqlx/query-2fc088b319ff8f197e50bb379b77de8ba36685dde66d1b3fcf549a52c5001d06.json similarity index 80% rename from core/lib/dal/.sqlx/query-2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a.json rename to core/lib/dal/.sqlx/query-2fc088b319ff8f197e50bb379b77de8ba36685dde66d1b3fcf549a52c5001d06.json index c4f8057011d..679d2ff917d 100644 --- a/core/lib/dal/.sqlx/query-2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a.json +++ b/core/lib/dal/.sqlx/query-2fc088b319ff8f197e50bb379b77de8ba36685dde66d1b3fcf549a52c5001d06.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n hashed_key,\n l1_batch_number,\n INDEX\n FROM\n initial_writes\n ", + "query": "\n SELECT\n hashed_key,\n l1_batch_number,\n index\n FROM\n initial_writes\n ", "describe": { "columns": [ { @@ -28,5 +28,5 @@ false ] }, - "hash": "2506e9edfd4b41ca1e187909631ae942bab5d71daaed7017e3fa62dc5e42ab0a" + "hash": "2fc088b319ff8f197e50bb379b77de8ba36685dde66d1b3fcf549a52c5001d06" } diff --git a/core/lib/dal/.sqlx/query-30e5c8710b1611872da06b72ac681aff512b3a9b2587b8e59848345c07dd8f3b.json b/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json similarity index 89% rename from core/lib/dal/.sqlx/query-30e5c8710b1611872da06b72ac681aff512b3a9b2587b8e59848345c07dd8f3b.json rename to core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json index 1ddc86f29a0..6012c632651 100644 --- a/core/lib/dal/.sqlx/query-30e5c8710b1611872da06b72ac681aff512b3a9b2587b8e59848345c07dd8f3b.json +++ b/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n error = $4\n WHERE\n l1_batch_number = $2\n AND status != $5\n RETURNING\n tee_verifier_input_producer_jobs.attempts\n ", + "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n error = $4\n WHERE\n l1_batch_number = $2\n AND status != $5\n RETURNING\n tee_verifier_input_producer_jobs.attempts\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ false ] }, - "hash": "30e5c8710b1611872da06b72ac681aff512b3a9b2587b8e59848345c07dd8f3b" + "hash": "3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79" } diff --git a/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json deleted file mode 100644 index cabe0a3dc55..00000000000 --- a/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1" -} diff --git a/core/lib/dal/.sqlx/query-32e8bdbf16fdd2b7617d7a909c3eee66178533c3c0f51e2bfa9ff201e17e4014.json b/core/lib/dal/.sqlx/query-32e8bdbf16fdd2b7617d7a909c3eee66178533c3c0f51e2bfa9ff201e17e4014.json new file mode 100644 index 00000000000..f2fdebd416c --- /dev/null +++ b/core/lib/dal/.sqlx/query-32e8bdbf16fdd2b7617d7a909c3eee66178533c3c0f51e2bfa9ff201e17e4014.json @@ -0,0 +1,44 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n table_name,\n PG_TABLE_SIZE(\n ('public.' || QUOTE_IDENT(table_name))::regclass\n ) AS table_size,\n PG_INDEXES_SIZE(\n ('public.' || QUOTE_IDENT(table_name))::regclass\n ) AS indexes_size,\n PG_RELATION_SIZE(\n ('public.' || QUOTE_IDENT(table_name))::regclass\n ) AS relation_size,\n PG_TOTAL_RELATION_SIZE(\n ('public.' || QUOTE_IDENT(table_name))::regclass\n ) AS total_size\n FROM\n information_schema.tables\n WHERE\n table_schema = 'public'\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "table_name", + "type_info": "Name" + }, + { + "ordinal": 1, + "name": "table_size", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "indexes_size", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "relation_size", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "total_size", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + null, + null, + null, + null + ] + }, + "hash": "32e8bdbf16fdd2b7617d7a909c3eee66178533c3c0f51e2bfa9ff201e17e4014" +} diff --git a/core/lib/dal/.sqlx/query-3353dd735addf184732843151dd47e1ed84929bf75bb814ee2280d241912d38f.json b/core/lib/dal/.sqlx/query-3353dd735addf184732843151dd47e1ed84929bf75bb814ee2280d241912d38f.json new file mode 100644 index 00000000000..aea0d712857 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3353dd735addf184732843151dd47e1ed84929bf75bb814ee2280d241912d38f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches_consensus_committees (l1_batch_number, attesters, updated_at)\n VALUES\n ($1, $2, NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = $1,\n attesters = $2,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "3353dd735addf184732843151dd47e1ed84929bf75bb814ee2280d241912d38f" +} diff --git a/core/lib/dal/.sqlx/query-33a78184559d0c0d13469fa799e2c5eeab06ff664141fa542229e7c9a46d4410.json b/core/lib/dal/.sqlx/query-33a78184559d0c0d13469fa799e2c5eeab06ff664141fa542229e7c9a46d4410.json deleted file mode 100644 index 6dcc1acfd35..00000000000 --- a/core/lib/dal/.sqlx/query-33a78184559d0c0d13469fa799e2c5eeab06ff664141fa542229e7c9a46d4410.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE transactions\n SET\n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = NOW()\n FROM\n (\n SELECT\n data_table_temp.*\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS initiator_address,\n UNNEST($2::INT[]) AS nonce,\n UNNEST($3::bytea[]) AS hash,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::NUMERIC[]) AS gas_limit,\n UNNEST($6::NUMERIC[]) AS max_fee_per_gas,\n UNNEST($7::NUMERIC[]) AS max_priority_fee_per_gas,\n UNNEST($8::NUMERIC[]) AS gas_per_pubdata_limit,\n UNNEST($9::INT[]) AS tx_format,\n UNNEST($10::INTEGER[]) AS index_in_block,\n UNNEST($11::VARCHAR[]) AS error,\n UNNEST($12::NUMERIC[]) AS effective_gas_price,\n UNNEST($13::jsonb[]) AS new_execution_info,\n UNNEST($14::bytea[]) AS input,\n UNNEST($15::jsonb[]) AS data,\n UNNEST($16::BIGINT[]) AS refunded_gas,\n UNNEST($17::NUMERIC[]) AS value,\n UNNEST($18::bytea[]) AS contract_address,\n UNNEST($19::bytea[]) AS paymaster,\n UNNEST($20::bytea[]) AS paymaster_input\n ) AS data_table_temp\n JOIN transactions ON transactions.initiator_address = data_table_temp.initiator_address\n AND transactions.nonce = data_table_temp.nonce\n ORDER BY\n transactions.hash\n ) AS data_table\n WHERE\n transactions.initiator_address = data_table.initiator_address\n AND transactions.nonce = data_table.nonce\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "ByteaArray", - "Int4Array", - "ByteaArray", - "ByteaArray", - "NumericArray", - "NumericArray", - "NumericArray", - "NumericArray", - "Int4Array", - "Int4Array", - "VarcharArray", - "NumericArray", - "JsonbArray", - "ByteaArray", - "JsonbArray", - "Int8Array", - "NumericArray", - "ByteaArray", - "ByteaArray", - "ByteaArray", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "33a78184559d0c0d13469fa799e2c5eeab06ff664141fa542229e7c9a46d4410" -} diff --git a/core/lib/dal/.sqlx/query-3518a3e93da8115b54426d72aea0800383e6b73a8b48dc3e589a29a4367cbb5a.json b/core/lib/dal/.sqlx/query-3518a3e93da8115b54426d72aea0800383e6b73a8b48dc3e589a29a4367cbb5a.json deleted file mode 100644 index a2351b5eef8..00000000000 --- a/core/lib/dal/.sqlx/query-3518a3e93da8115b54426d72aea0800383e6b73a8b48dc3e589a29a4367cbb5a.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n call_traces (tx_hash, call_trace)\n SELECT\n u.tx_hash,\n u.call_trace\n FROM\n UNNEST($1::bytea[], $2::bytea[]) AS u (tx_hash, call_trace)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray" - ] - }, - "nullable": [] - }, - "hash": "3518a3e93da8115b54426d72aea0800383e6b73a8b48dc3e589a29a4367cbb5a" -} diff --git a/core/lib/dal/.sqlx/query-be2c8e525d6867c0d2bd254c73ef1719fd1284af1dbb60ea128550224b52da93.json b/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json similarity index 79% rename from core/lib/dal/.sqlx/query-be2c8e525d6867c0d2bd254c73ef1719fd1284af1dbb60ea128550224b52da93.json rename to core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json index 21964d27d60..7245fa3059e 100644 --- a/core/lib/dal/.sqlx/query-be2c8e525d6867c0d2bd254c73ef1719fd1284af1dbb60ea128550224b52da93.json +++ b/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode\n FROM\n (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", + "query": "\n SELECT\n bytecode\n FROM\n (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", "describe": { "columns": [ { @@ -20,5 +20,5 @@ false ] }, - "hash": "be2c8e525d6867c0d2bd254c73ef1719fd1284af1dbb60ea128550224b52da93" + "hash": "369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980" } diff --git a/core/lib/dal/.sqlx/query-37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4.json b/core/lib/dal/.sqlx/query-37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4.json deleted file mode 100644 index a39a1bdb07b..00000000000 --- a/core/lib/dal/.sqlx/query-37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_attestations (pubkey, attestation)\n VALUES\n ($1, $2)\n ON CONFLICT (pubkey) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Bytea", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "37890022be6b5e893cf051266fa16618914d6dedb39a9a40d36484741e8b01f4" -} diff --git a/core/lib/dal/.sqlx/query-469db79fcf8d692507317080e0d843620210aff3a9a7aa8a249db50c1c56d927.json b/core/lib/dal/.sqlx/query-37b653ba8a544224ef6fb88e073d1e4657024c66a42e1bbc261c92c9e1c79930.json similarity index 60% rename from core/lib/dal/.sqlx/query-469db79fcf8d692507317080e0d843620210aff3a9a7aa8a249db50c1c56d927.json rename to core/lib/dal/.sqlx/query-37b653ba8a544224ef6fb88e073d1e4657024c66a42e1bbc261c92c9e1c79930.json index fe91d27abb1..f4b52c688cb 100644 --- a/core/lib/dal/.sqlx/query-469db79fcf8d692507317080e0d843620210aff3a9a7aa8a249db50c1c56d927.json +++ b/core/lib/dal/.sqlx/query-37b653ba8a544224ef6fb88e073d1e4657024c66a42e1bbc261c92c9e1c79930.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE transactions\n SET\n miniblock_number = $1,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n in_mempool = FALSE,\n execution_info = execution_info || data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n effective_gas_price = data_table.effective_gas_price,\n updated_at = NOW()\n FROM\n (\n SELECT\n UNNEST($2::bytea[]) AS hash,\n UNNEST($3::INTEGER[]) AS index_in_block,\n UNNEST($4::VARCHAR[]) AS error,\n UNNEST($5::jsonb[]) AS new_execution_info,\n UNNEST($6::BIGINT[]) AS refunded_gas,\n UNNEST($7::NUMERIC[]) AS effective_gas_price\n ) AS data_table\n WHERE\n transactions.hash = data_table.hash\n ", + "query": "\n UPDATE transactions\n SET\n miniblock_number = $1,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n in_mempool = FALSE,\n execution_info = execution_info || data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n effective_gas_price = data_table.effective_gas_price,\n updated_at = NOW()\n FROM\n (\n SELECT\n UNNEST($2::bytea []) AS hash,\n UNNEST($3::integer []) AS index_in_block,\n UNNEST($4::varchar []) AS error,\n UNNEST($5::jsonb []) AS new_execution_info,\n UNNEST($6::bigint []) AS refunded_gas,\n UNNEST($7::numeric []) AS effective_gas_price\n ) AS data_table\n WHERE\n transactions.hash = data_table.hash\n ", "describe": { "columns": [], "parameters": { @@ -16,5 +16,5 @@ }, "nullable": [] }, - "hash": "469db79fcf8d692507317080e0d843620210aff3a9a7aa8a249db50c1c56d927" + "hash": "37b653ba8a544224ef6fb88e073d1e4657024c66a42e1bbc261c92c9e1c79930" } diff --git a/core/lib/dal/.sqlx/query-3b4d5009ec22f54cc7d305aa11d96ec397767a063dc21aa3add974cb9b070361.json b/core/lib/dal/.sqlx/query-3b4d5009ec22f54cc7d305aa11d96ec397767a063dc21aa3add974cb9b070361.json deleted file mode 100644 index 38890ae58f2..00000000000 --- a/core/lib/dal/.sqlx/query-3b4d5009ec22f54cc7d305aa11d96ec397767a063dc21aa3add974cb9b070361.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n factory_deps (bytecode_hash, bytecode, miniblock_number, created_at, updated_at)\n SELECT\n u.bytecode_hash,\n u.bytecode,\n $3,\n NOW(),\n NOW()\n FROM\n UNNEST($1::bytea[], $2::bytea[]) AS u (bytecode_hash, bytecode)\n ON CONFLICT (bytecode_hash) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "3b4d5009ec22f54cc7d305aa11d96ec397767a063dc21aa3add974cb9b070361" -} diff --git a/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json b/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json deleted file mode 100644 index 5652e186ceb..00000000000 --- a/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at)\n VALUES\n ($1, $2, $3, NOW())\n ON CONFLICT DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int4", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5" -} diff --git a/core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json b/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json similarity index 79% rename from core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json rename to core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json index ee5de53d6e6..e2a808d41f8 100644 --- a/core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json +++ b/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n next_block_to_process\n FROM\n processed_events\n WHERE\n TYPE = $1\n AND chain_id = $2\n ", + "query": "\n SELECT\n next_block_to_process\n FROM\n processed_events\n WHERE\n type = $1\n AND chain_id = $2\n ", "describe": { "columns": [ { @@ -29,5 +29,5 @@ false ] }, - "hash": "26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448" + "hash": "3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd" } diff --git a/core/lib/dal/.sqlx/query-0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a.json b/core/lib/dal/.sqlx/query-400457c97449097837d5e31d4476c32170ea4b47051a100f71a41dd6ed9bb1a9.json similarity index 76% rename from core/lib/dal/.sqlx/query-0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a.json rename to core/lib/dal/.sqlx/query-400457c97449097837d5e31d4476c32170ea4b47051a100f71a41dd6ed9bb1a9.json index 6a3174958db..3e64cc5f5b2 100644 --- a/core/lib/dal/.sqlx/query-0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a.json +++ b/core/lib/dal/.sqlx/query-400457c97449097837d5e31d4476c32170ea4b47051a100f71a41dd6ed9bb1a9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n eth_txs (\n raw_tx,\n nonce,\n tx_type,\n contract_address,\n predicted_gas_cost,\n created_at,\n updated_at,\n from_addr,\n blob_sidecar,\n is_gateway\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, $8)\n RETURNING\n *\n ", + "query": "\n INSERT INTO\n eth_txs (\n raw_tx,\n nonce,\n tx_type,\n contract_address,\n predicted_gas_cost,\n created_at,\n updated_at,\n from_addr,\n blob_sidecar,\n is_gateway\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, $8)\n RETURNING\n *\n ", "describe": { "columns": [ { @@ -115,5 +115,5 @@ true ] }, - "hash": "0fede71ed258790cf70d6d6a32dcf9654c06dfef57863281601c947830ad448a" + "hash": "400457c97449097837d5e31d4476c32170ea4b47051a100f71a41dd6ed9bb1a9" } diff --git a/core/lib/dal/.sqlx/query-403b0b1c93f973aedb41aad7700d656e2a26752fa71ae936ff2eb821b0734b61.json b/core/lib/dal/.sqlx/query-403b0b1c93f973aedb41aad7700d656e2a26752fa71ae936ff2eb821b0734b61.json new file mode 100644 index 00000000000..6cb9dec6838 --- /dev/null +++ b/core/lib/dal/.sqlx/query-403b0b1c93f973aedb41aad7700d656e2a26752fa71ae936ff2eb821b0734b61.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, 'unpicked', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "403b0b1c93f973aedb41aad7700d656e2a26752fa71ae936ff2eb821b0734b61" +} diff --git a/core/lib/dal/.sqlx/query-406e51d7884cdff36632ccf97912484f0a0d76a8097b61ead5f1f20301ce524d.json b/core/lib/dal/.sqlx/query-406e51d7884cdff36632ccf97912484f0a0d76a8097b61ead5f1f20301ce524d.json deleted file mode 100644 index cc3c2a344eb..00000000000 --- a/core/lib/dal/.sqlx/query-406e51d7884cdff36632ccf97912484f0a0d76a8097b61ead5f1f20301ce524d.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n snapshots (\n VERSION,\n l1_batch_number,\n storage_logs_filepaths,\n factory_deps_filepath,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, ARRAY_FILL(''::TEXT, ARRAY[$3::INTEGER]), $4, NOW(), NOW())\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int8", - "Int4", - "Text" - ] - }, - "nullable": [] - }, - "hash": "406e51d7884cdff36632ccf97912484f0a0d76a8097b61ead5f1f20301ce524d" -} diff --git a/core/lib/dal/.sqlx/query-40c17194a2089a7d3fa6b7923c9e03def94f4bf4674def1893e2232be80f57a0.json b/core/lib/dal/.sqlx/query-40c17194a2089a7d3fa6b7923c9e03def94f4bf4674def1893e2232be80f57a0.json deleted file mode 100644 index c655704fd84..00000000000 --- a/core/lib/dal/.sqlx/query-40c17194a2089a7d3fa6b7923c9e03def94f4bf4674def1893e2232be80f57a0.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n contract_verification_requests (\n contract_address,\n source_code,\n contract_name,\n zk_compiler_version,\n compiler_version,\n optimization_used,\n optimizer_mode,\n constructor_arguments,\n is_system,\n force_evmla,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, 'queued', NOW(), NOW())\n RETURNING\n id\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Text", - "Text", - "Text", - "Text", - "Bool", - "Text", - "Bytea", - "Bool", - "Bool" - ] - }, - "nullable": [ - false - ] - }, - "hash": "40c17194a2089a7d3fa6b7923c9e03def94f4bf4674def1893e2232be80f57a0" -} diff --git a/core/lib/dal/.sqlx/query-546c729829083b7eba94fea742c162d717ffcf46fdf5d2ce5d32555353b6da6b.json b/core/lib/dal/.sqlx/query-41b2d575151bff136037d6f1d75f36d50db45560befec6418020b01c55d94a98.json similarity index 52% rename from core/lib/dal/.sqlx/query-546c729829083b7eba94fea742c162d717ffcf46fdf5d2ce5d32555353b6da6b.json rename to core/lib/dal/.sqlx/query-41b2d575151bff136037d6f1d75f36d50db45560befec6418020b01c55d94a98.json index 70b94f73909..f5dccac1a01 100644 --- a/core/lib/dal/.sqlx/query-546c729829083b7eba94fea742c162d717ffcf46fdf5d2ce5d32555353b6da6b.json +++ b/core/lib/dal/.sqlx/query-41b2d575151bff136037d6f1d75f36d50db45560befec6418020b01c55d94a98.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE transactions\n SET\n in_mempool = FALSE\n FROM\n UNNEST($1::bytea[]) AS s (address)\n WHERE\n transactions.in_mempool = TRUE\n AND transactions.initiator_address = s.address\n ", + "query": "\n UPDATE transactions\n SET\n in_mempool = FALSE\n FROM\n UNNEST($1::bytea []) AS s (address)\n WHERE\n transactions.in_mempool = TRUE\n AND transactions.initiator_address = s.address\n ", "describe": { "columns": [], "parameters": { @@ -10,5 +10,5 @@ }, "nullable": [] }, - "hash": "546c729829083b7eba94fea742c162d717ffcf46fdf5d2ce5d32555353b6da6b" + "hash": "41b2d575151bff136037d6f1d75f36d50db45560befec6418020b01c55d94a98" } diff --git a/core/lib/dal/.sqlx/query-8b9e5d525c026de97c0a732b1adc8dc4bd57e32dfefe1017acba9a15fc14b895.json b/core/lib/dal/.sqlx/query-442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae.json similarity index 61% rename from core/lib/dal/.sqlx/query-8b9e5d525c026de97c0a732b1adc8dc4bd57e32dfefe1017acba9a15fc14b895.json rename to core/lib/dal/.sqlx/query-442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae.json index de369bccec5..621295d4ab8 100644 --- a/core/lib/dal/.sqlx/query-8b9e5d525c026de97c0a732b1adc8dc4bd57e32dfefe1017acba9a15fc14b895.json +++ b/core/lib/dal/.sqlx/query-442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_logs.hashed_key,\n storage_logs.value,\n initial_writes.index\n FROM\n storage_logs\n INNER JOIN initial_writes ON storage_logs.hashed_key = initial_writes.hashed_key\n WHERE\n storage_logs.miniblock_number = $1\n AND storage_logs.hashed_key >= $2::bytea\n AND storage_logs.hashed_key <= $3::bytea\n ORDER BY\n storage_logs.hashed_key\n ", + "query": "\n SELECT\n storage_logs.hashed_key,\n storage_logs.value,\n initial_writes.index\n FROM\n storage_logs\n INNER JOIN initial_writes ON storage_logs.hashed_key = initial_writes.hashed_key\n WHERE\n storage_logs.miniblock_number = $1\n AND storage_logs.hashed_key >= $2::bytea\n AND storage_logs.hashed_key <= $3::bytea\n ORDER BY\n storage_logs.hashed_key\n ", "describe": { "columns": [ { @@ -32,5 +32,5 @@ false ] }, - "hash": "8b9e5d525c026de97c0a732b1adc8dc4bd57e32dfefe1017acba9a15fc14b895" + "hash": "442212bb5f28f234cd624f2acc27944b2acedce201da4454aadb79f3545713ae" } diff --git a/core/lib/dal/.sqlx/query-45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a.json b/core/lib/dal/.sqlx/query-45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a.json deleted file mode 100644 index 36da129b5b7..00000000000 --- a/core/lib/dal/.sqlx/query-45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n eth_txs_history (\n eth_tx_id,\n base_fee_per_gas,\n priority_fee_per_gas,\n tx_hash,\n signed_raw_tx,\n created_at,\n updated_at,\n blob_base_fee_per_gas,\n sent_at_block,\n sent_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, NOW())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING\n id\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Int4", - "Int8", - "Int8", - "Text", - "Bytea", - "Int8", - "Int4" - ] - }, - "nullable": [ - false - ] - }, - "hash": "45a968c6d667b13bbe9d895e7734fc05eaa158a6f38a87187d7f2c2068a0112a" -} diff --git a/core/lib/dal/.sqlx/query-ada54322a28012b1b761f3631c4cd6ca26aa2fa565fcf208b6985f461c1868f2.json b/core/lib/dal/.sqlx/query-497e1c08e8460be6e838a47dada8375d56609cd6b7f425361acfd0a411dc48bd.json similarity index 77% rename from core/lib/dal/.sqlx/query-ada54322a28012b1b761f3631c4cd6ca26aa2fa565fcf208b6985f461c1868f2.json rename to core/lib/dal/.sqlx/query-497e1c08e8460be6e838a47dada8375d56609cd6b7f425361acfd0a411dc48bd.json index 04fde45469f..4a47d5b560a 100644 --- a/core/lib/dal/.sqlx/query-ada54322a28012b1b761f3631c4cd6ca26aa2fa565fcf208b6985f461c1868f2.json +++ b/core/lib/dal/.sqlx/query-497e1c08e8460be6e838a47dada8375d56609cd6b7f425361acfd0a411dc48bd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE eth_txs_history\n SET\n updated_at = NOW(),\n confirmed_at = NOW()\n WHERE\n tx_hash = $1\n RETURNING\n id,\n eth_tx_id\n ", + "query": "\n UPDATE eth_txs_history\n SET\n updated_at = NOW(),\n confirmed_at = NOW()\n WHERE\n tx_hash = $1\n RETURNING\n id,\n eth_tx_id\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ false ] }, - "hash": "ada54322a28012b1b761f3631c4cd6ca26aa2fa565fcf208b6985f461c1868f2" + "hash": "497e1c08e8460be6e838a47dada8375d56609cd6b7f425361acfd0a411dc48bd" } diff --git a/core/lib/dal/.sqlx/query-03e00f422f991f8f12aad0083e1c42cfea253a182ca5df143a183cd522ecac33.json b/core/lib/dal/.sqlx/query-499571f0484f4a54541450f935853bb67bd12f2b635af783897f9d2a4f4ead2e.json similarity index 81% rename from core/lib/dal/.sqlx/query-03e00f422f991f8f12aad0083e1c42cfea253a182ca5df143a183cd522ecac33.json rename to core/lib/dal/.sqlx/query-499571f0484f4a54541450f935853bb67bd12f2b635af783897f9d2a4f4ead2e.json index 12498582ada..ddc3772a1a5 100644 --- a/core/lib/dal/.sqlx/query-03e00f422f991f8f12aad0083e1c42cfea253a182ca5df143a183cd522ecac33.json +++ b/core/lib/dal/.sqlx/query-499571f0484f4a54541450f935853bb67bd12f2b635af783897f9d2a4f4ead2e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE transactions\n SET\n l1_batch_number = NULL,\n miniblock_number = NULL,\n error = NULL,\n index_in_block = NULL,\n execution_info = '{}'\n WHERE\n miniblock_number > $1\n RETURNING\n hash\n ", + "query": "\n UPDATE transactions\n SET\n l1_batch_number = NULL,\n miniblock_number = NULL,\n error = NULL,\n index_in_block = NULL,\n execution_info = '{}'\n WHERE\n miniblock_number > $1\n RETURNING\n hash\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "03e00f422f991f8f12aad0083e1c42cfea253a182ca5df143a183cd522ecac33" + "hash": "499571f0484f4a54541450f935853bb67bd12f2b635af783897f9d2a4f4ead2e" } diff --git a/core/lib/dal/.sqlx/query-4b6fdc5021c536e622373dca8729250962832a0d95d0d82bdd56271d6a07e497.json b/core/lib/dal/.sqlx/query-4b6fdc5021c536e622373dca8729250962832a0d95d0d82bdd56271d6a07e497.json new file mode 100644 index 00000000000..e4dfb7298a7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-4b6fdc5021c536e622373dca8729250962832a0d95d0d82bdd56271d6a07e497.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n miniblocks_consensus (number, certificate)\n VALUES\n ($1, $2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "4b6fdc5021c536e622373dca8729250962832a0d95d0d82bdd56271d6a07e497" +} diff --git a/core/lib/dal/.sqlx/query-5017fb273e532be3a093d682d916aaa0430eeeb48c19ec3eabe78996baa9b140.json b/core/lib/dal/.sqlx/query-5017fb273e532be3a093d682d916aaa0430eeeb48c19ec3eabe78996baa9b140.json new file mode 100644 index 00000000000..42fd4087a1f --- /dev/null +++ b/core/lib/dal/.sqlx/query-5017fb273e532be3a093d682d916aaa0430eeeb48c19ec3eabe78996baa9b140.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n eth_txs_history (\n eth_tx_id,\n base_fee_per_gas,\n priority_fee_per_gas,\n tx_hash,\n signed_raw_tx,\n created_at,\n updated_at,\n blob_base_fee_per_gas,\n sent_at_block,\n sent_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW(), NOW(), $6, $7, NOW())\n ON CONFLICT (tx_hash) DO NOTHING\n RETURNING\n id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int8", + "Text", + "Bytea", + "Int8", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "5017fb273e532be3a093d682d916aaa0430eeeb48c19ec3eabe78996baa9b140" +} diff --git a/core/lib/dal/.sqlx/query-e073cfdc7a00559994ce04eca15f35d55901fb1e6805f23413ea43e3637540a0.json b/core/lib/dal/.sqlx/query-50687903977b973d72acac96b08880eef9af91f5b4fcaebb7beb3a415d4b231d.json similarity index 85% rename from core/lib/dal/.sqlx/query-e073cfdc7a00559994ce04eca15f35d55901fb1e6805f23413ea43e3637540a0.json rename to core/lib/dal/.sqlx/query-50687903977b973d72acac96b08880eef9af91f5b4fcaebb7beb3a415d4b231d.json index 929e4de8c1b..e58a0ec8821 100644 --- a/core/lib/dal/.sqlx/query-e073cfdc7a00559994ce04eca15f35d55901fb1e6805f23413ea43e3637540a0.json +++ b/core/lib/dal/.sqlx/query-50687903977b973d72acac96b08880eef9af91f5b4fcaebb7beb3a415d4b231d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode,\n bytecode_hash\n FROM\n factory_deps\n WHERE\n bytecode_hash = ANY ($1)\n ", + "query": "\n SELECT\n bytecode,\n bytecode_hash\n FROM\n factory_deps\n WHERE\n bytecode_hash = ANY($1)\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ false ] }, - "hash": "e073cfdc7a00559994ce04eca15f35d55901fb1e6805f23413ea43e3637540a0" + "hash": "50687903977b973d72acac96b08880eef9af91f5b4fcaebb7beb3a415d4b231d" } diff --git a/core/lib/dal/.sqlx/query-5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad.json b/core/lib/dal/.sqlx/query-5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad.json deleted file mode 100644 index 07ef0aba074..00000000000 --- a/core/lib/dal/.sqlx/query-5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_generation_details (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, 'unpicked', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "5137159db7d3ff456e368e6246b07554ce738a2d7005472e7e76a64a8fbd57ad" -} diff --git a/core/lib/dal/.sqlx/query-57851c16dce02999bf386e549791b74555863b0cc8281ef2e9c830fa1d38e227.json b/core/lib/dal/.sqlx/query-57851c16dce02999bf386e549791b74555863b0cc8281ef2e9c830fa1d38e227.json new file mode 100644 index 00000000000..f08a75e5800 --- /dev/null +++ b/core/lib/dal/.sqlx/query-57851c16dce02999bf386e549791b74555863b0cc8281ef2e9c830fa1d38e227.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at\n )\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ON CONFLICT (l1_batch_number, tee_type) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "57851c16dce02999bf386e549791b74555863b0cc8281ef2e9c830fa1d38e227" +} diff --git a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json b/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json new file mode 100644 index 00000000000..c2d9fe2e1ac --- /dev/null +++ b/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974" +} diff --git a/core/lib/dal/.sqlx/query-477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33.json b/core/lib/dal/.sqlx/query-6083e429948c139e36cfce2c5ed41edf3965f7ce67341d957ec680a18a0c39ea.json similarity index 57% rename from core/lib/dal/.sqlx/query-477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33.json rename to core/lib/dal/.sqlx/query-6083e429948c139e36cfce2c5ed41edf3965f7ce67341d957ec680a18a0c39ea.json index 68d2f046a17..c6760a336dc 100644 --- a/core/lib/dal/.sqlx/query-477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33.json +++ b/core/lib/dal/.sqlx/query-6083e429948c139e36cfce2c5ed41edf3965f7ce67341d957ec680a18a0c39ea.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE transactions\n SET\n l1_batch_number = $3,\n l1_batch_tx_index = data_table.l1_batch_tx_index,\n updated_at = NOW()\n FROM\n (\n SELECT\n UNNEST($1::INT[]) AS l1_batch_tx_index,\n UNNEST($2::BYTEA[]) AS hash\n ) AS data_table\n WHERE\n transactions.hash = data_table.hash\n ", + "query": "\n UPDATE transactions\n SET\n l1_batch_number = $3,\n l1_batch_tx_index = data_table.l1_batch_tx_index,\n updated_at = NOW()\n FROM\n (\n SELECT\n UNNEST($1::INT []) AS l1_batch_tx_index,\n UNNEST($2::BYTEA []) AS hash\n ) AS data_table\n WHERE\n transactions.hash = data_table.hash\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33" + "hash": "6083e429948c139e36cfce2c5ed41edf3965f7ce67341d957ec680a18a0c39ea" } diff --git a/core/lib/dal/.sqlx/query-6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf.json b/core/lib/dal/.sqlx/query-6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf.json deleted file mode 100644 index 443dbf36ea1..00000000000 --- a/core/lib/dal/.sqlx/query-6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n pruning_log (\n pruned_l1_batch,\n pruned_miniblock,\n TYPE,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - { - "Custom": { - "name": "prune_type", - "kind": { - "Enum": [ - "Soft", - "Hard" - ] - } - } - } - ] - }, - "nullable": [] - }, - "hash": "6179c3c1a0b2aeb01c0527f6ca4d0651174fd63cf6a8950fa6e7c4838ac5abbf" -} diff --git a/core/lib/dal/.sqlx/query-718d29517c100ad9d258a7ee90c48449c1c4bed4d0236fcedc177c9478e72262.json b/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json similarity index 57% rename from core/lib/dal/.sqlx/query-718d29517c100ad9d258a7ee90c48449c1c4bed4d0236fcedc177c9478e72262.json rename to core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json index 4e523efe3fc..01ede1d8643 100644 --- a/core/lib/dal/.sqlx/query-718d29517c100ad9d258a7ee90c48449c1c4bed4d0236fcedc177c9478e72262.json +++ b/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_verifier_input_producer_jobs (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "query": "\n INSERT INTO\n tee_verifier_input_producer_jobs (\n l1_batch_number, status, created_at, updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", "describe": { "columns": [], "parameters": { @@ -24,5 +24,5 @@ }, "nullable": [] }, - "hash": "718d29517c100ad9d258a7ee90c48449c1c4bed4d0236fcedc177c9478e72262" + "hash": "6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199" } diff --git a/core/lib/dal/.sqlx/query-66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d.json b/core/lib/dal/.sqlx/query-66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d.json deleted file mode 100644 index 76900667a9c..00000000000 --- a/core/lib/dal/.sqlx/query-66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n snapshot_recovery (\n l1_batch_number,\n l1_batch_timestamp,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_timestamp,\n miniblock_hash,\n protocol_version,\n storage_logs_chunks_processed,\n updated_at,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, NOW(), NOW())\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int8", - "Int8", - "Bytea", - "Int4", - "BoolArray" - ] - }, - "nullable": [] - }, - "hash": "66510caa7683ed90729cb545ac8528f012c2ecaebf6622ca1ae481045604e58d" -} diff --git a/core/lib/dal/.sqlx/query-68327709a740a09b198770ec1746bd6ec3a1cd9db6187e99af40ca8fea4c6ace.json b/core/lib/dal/.sqlx/query-68327709a740a09b198770ec1746bd6ec3a1cd9db6187e99af40ca8fea4c6ace.json new file mode 100644 index 00000000000..5d34e780b68 --- /dev/null +++ b/core/lib/dal/.sqlx/query-68327709a740a09b198770ec1746bd6ec3a1cd9db6187e99af40ca8fea4c6ace.json @@ -0,0 +1,35 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT DISTINCT\n ON (hashed_key)\n hashed_key,\n miniblock_number,\n value\n FROM\n storage_logs\n WHERE\n hashed_key = ANY($1)\n AND miniblock_number <= $2\n AND miniblock_number <= COALESCE(\n (\n SELECT\n MAX(number)\n FROM\n miniblocks\n ),\n (\n SELECT\n miniblock_number\n FROM\n snapshot_recovery\n )\n )\n ORDER BY\n hashed_key,\n miniblock_number DESC,\n operation_number DESC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "miniblock_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "value", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "ByteaArray", + "Int8" + ] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "68327709a740a09b198770ec1746bd6ec3a1cd9db6187e99af40ca8fea4c6ace" +} diff --git a/core/lib/dal/.sqlx/query-b42fc86726ac40d0ca38640884da192a143f6a7501b7da65bb7df40a0a4ead70.json b/core/lib/dal/.sqlx/query-6da05c9ac3d0f30b856177e28d28aaad910ca46830c470f8fc1d4265f9ed8631.json similarity index 76% rename from core/lib/dal/.sqlx/query-b42fc86726ac40d0ca38640884da192a143f6a7501b7da65bb7df40a0a4ead70.json rename to core/lib/dal/.sqlx/query-6da05c9ac3d0f30b856177e28d28aaad910ca46830c470f8fc1d4265f9ed8631.json index 625fa4ab36e..b246c1e9f3a 100644 --- a/core/lib/dal/.sqlx/query-b42fc86726ac40d0ca38640884da192a143f6a7501b7da65bb7df40a0a4ead70.json +++ b/core/lib/dal/.sqlx/query-6da05c9ac3d0f30b856177e28d28aaad910ca46830c470f8fc1d4265f9ed8631.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n DELETE FROM snapshots\n WHERE\n l1_batch_number > $1\n RETURNING\n VERSION,\n l1_batch_number,\n factory_deps_filepath,\n storage_logs_filepaths\n ", + "query": "\n DELETE FROM snapshots\n WHERE\n l1_batch_number > $1\n RETURNING\n version,\n l1_batch_number,\n factory_deps_filepath,\n storage_logs_filepaths\n ", "describe": { "columns": [ { @@ -36,5 +36,5 @@ false ] }, - "hash": "b42fc86726ac40d0ca38640884da192a143f6a7501b7da65bb7df40a0a4ead70" + "hash": "6da05c9ac3d0f30b856177e28d28aaad910ca46830c470f8fc1d4265f9ed8631" } diff --git a/core/lib/dal/.sqlx/query-6e725f38a8f04ca5516ba3128569973527841e234d8b4635fb337e4b659b7f71.json b/core/lib/dal/.sqlx/query-6e725f38a8f04ca5516ba3128569973527841e234d8b4635fb337e4b659b7f71.json new file mode 100644 index 00000000000..3a773579b24 --- /dev/null +++ b/core/lib/dal/.sqlx/query-6e725f38a8f04ca5516ba3128569973527841e234d8b4635fb337e4b659b7f71.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n tee_attestations (pubkey, attestation)\n VALUES\n ($1, $2)\n ON CONFLICT (pubkey) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "6e725f38a8f04ca5516ba3128569973527841e234d8b4635fb337e4b659b7f71" +} diff --git a/core/lib/dal/.sqlx/query-6e3a3ef443ce8aab55b10eea55f9c8ff11775885aebaf457075c6825305244e5.json b/core/lib/dal/.sqlx/query-6f4463a2f95fec4f8b511d28566babfb146e9932d4142139a49599c4bdbd659f.json similarity index 68% rename from core/lib/dal/.sqlx/query-6e3a3ef443ce8aab55b10eea55f9c8ff11775885aebaf457075c6825305244e5.json rename to core/lib/dal/.sqlx/query-6f4463a2f95fec4f8b511d28566babfb146e9932d4142139a49599c4bdbd659f.json index de474897307..ef514ba7905 100644 --- a/core/lib/dal/.sqlx/query-6e3a3ef443ce8aab55b10eea55f9c8ff11775885aebaf457075c6825305244e5.json +++ b/core/lib/dal/.sqlx/query-6f4463a2f95fec4f8b511d28566babfb146e9932d4142139a49599c4bdbd659f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n transactions.is_priority,\n transactions.initiator_address,\n transactions.gas_limit,\n transactions.gas_per_pubdata_limit,\n transactions.received_at,\n miniblocks.number AS \"miniblock_number?\",\n transactions.error,\n transactions.effective_gas_price,\n transactions.refunded_gas,\n commit_tx.tx_hash AS \"eth_commit_tx_hash?\",\n prove_tx.tx_hash AS \"eth_prove_tx_hash?\",\n execute_tx.tx_hash AS \"eth_execute_tx_hash?\"\n FROM\n transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n transactions.hash = $1\n AND transactions.data != '{}'::jsonb\n ", + "query": "\n SELECT\n transactions.is_priority,\n transactions.initiator_address,\n transactions.gas_limit,\n transactions.gas_per_pubdata_limit,\n transactions.received_at,\n miniblocks.number AS \"miniblock_number?\",\n transactions.error,\n transactions.effective_gas_price,\n transactions.refunded_gas,\n commit_tx.tx_hash AS \"eth_commit_tx_hash?\",\n prove_tx.tx_hash AS \"eth_prove_tx_hash?\",\n execute_tx.tx_hash AS \"eth_execute_tx_hash?\"\n FROM\n transactions\n LEFT JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN l1_batches ON l1_batches.number = miniblocks.l1_batch_number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n transactions.hash = $1\n AND transactions.data != '{}'::jsonb\n ", "describe": { "columns": [ { @@ -84,5 +84,5 @@ false ] }, - "hash": "6e3a3ef443ce8aab55b10eea55f9c8ff11775885aebaf457075c6825305244e5" + "hash": "6f4463a2f95fec4f8b511d28566babfb146e9932d4142139a49599c4bdbd659f" } diff --git a/core/lib/dal/.sqlx/query-7204d08f6fb83c83c09eb2942ecb671fa559dfdb29b315b17ae2f86533d247e9.json b/core/lib/dal/.sqlx/query-7204d08f6fb83c83c09eb2942ecb671fa559dfdb29b315b17ae2f86533d247e9.json new file mode 100644 index 00000000000..01adb17775c --- /dev/null +++ b/core/lib/dal/.sqlx/query-7204d08f6fb83c83c09eb2942ecb671fa559dfdb29b315b17ae2f86533d247e9.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at)\n VALUES\n ($1, $2, $3, NOW())\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "7204d08f6fb83c83c09eb2942ecb671fa559dfdb29b315b17ae2f86533d247e9" +} diff --git a/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json b/core/lib/dal/.sqlx/query-7235e50f9ce4b5c4f6f8325117eaccc7108538405743fe1ad71451d0f1842561.json similarity index 67% rename from core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json rename to core/lib/dal/.sqlx/query-7235e50f9ce4b5c4f6f8325117eaccc7108538405743fe1ad71451d0f1842561.json index 84ff845b0d0..f46674b08bc 100644 --- a/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json +++ b/core/lib/dal/.sqlx/query-7235e50f9ce4b5c4f6f8325117eaccc7108538405743fe1ad71451d0f1842561.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS tx_index_in_block,\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", + "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS tx_index_in_block,\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", "describe": { "columns": [ { @@ -30,5 +30,5 @@ false ] }, - "hash": "6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa" + "hash": "7235e50f9ce4b5c4f6f8325117eaccc7108538405743fe1ad71451d0f1842561" } diff --git a/core/lib/dal/.sqlx/query-785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4.json b/core/lib/dal/.sqlx/query-785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4.json deleted file mode 100644 index e10f32b85eb..00000000000 --- a/core/lib/dal/.sqlx/query-785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n miniblock_number,\n index_in_block,\n error,\n execution_info,\n refunded_gas,\n effective_gas_price,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n TRUE,\n data_table.initiator_address,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.data,\n data_table.priority_op_id,\n data_table.full_fee,\n data_table.layer_2_tip_fee,\n data_table.contract_address,\n data_table.l1_block_number,\n data_table.value,\n '\\x0000000000000000000000000000000000000000'::bytea,\n '\\x'::bytea,\n data_table.tx_format,\n data_table.l1_tx_mint,\n data_table.l1_tx_refund_recipient,\n $21,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.execution_info,\n data_table.refunded_gas,\n data_table.effective_gas_price,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS hash,\n UNNEST($2::bytea[]) AS initiator_address,\n UNNEST($3::NUMERIC[]) AS gas_limit,\n UNNEST($4::NUMERIC[]) AS max_fee_per_gas,\n UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit,\n UNNEST($6::jsonb[]) AS data,\n UNNEST($7::BIGINT[]) AS priority_op_id,\n UNNEST($8::NUMERIC[]) AS full_fee,\n UNNEST($9::NUMERIC[]) AS layer_2_tip_fee,\n UNNEST($10::bytea[]) AS contract_address,\n UNNEST($11::INT[]) AS l1_block_number,\n UNNEST($12::NUMERIC[]) AS value,\n UNNEST($13::INTEGER[]) AS tx_format,\n UNNEST($14::NUMERIC[]) AS l1_tx_mint,\n UNNEST($15::bytea[]) AS l1_tx_refund_recipient,\n UNNEST($16::INT[]) AS index_in_block,\n UNNEST($17::VARCHAR[]) AS error,\n UNNEST($18::jsonb[]) AS execution_info,\n UNNEST($19::BIGINT[]) AS refunded_gas,\n UNNEST($20::NUMERIC[]) AS effective_gas_price\n ) AS data_table\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "NumericArray", - "NumericArray", - "NumericArray", - "JsonbArray", - "Int8Array", - "NumericArray", - "NumericArray", - "ByteaArray", - "Int4Array", - "NumericArray", - "Int4Array", - "NumericArray", - "ByteaArray", - "Int4Array", - "VarcharArray", - "JsonbArray", - "Int8Array", - "NumericArray", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4" -} diff --git a/core/lib/dal/.sqlx/query-dac29d7eb16782713acb7aa68aaa8b12b67678e983dc2570be28fe9b1e016c28.json b/core/lib/dal/.sqlx/query-7f3ca3f1d0b3414575e71af98b810ff145ac2f388a246b9c7b32193e2d4208dd.json similarity index 62% rename from core/lib/dal/.sqlx/query-dac29d7eb16782713acb7aa68aaa8b12b67678e983dc2570be28fe9b1e016c28.json rename to core/lib/dal/.sqlx/query-7f3ca3f1d0b3414575e71af98b810ff145ac2f388a246b9c7b32193e2d4208dd.json index c0258c12311..830e43b00ed 100644 --- a/core/lib/dal/.sqlx/query-dac29d7eb16782713acb7aa68aaa8b12b67678e983dc2570be28fe9b1e016c28.json +++ b/core/lib/dal/.sqlx/query-7f3ca3f1d0b3414575e71af98b810ff145ac2f388a246b9c7b32193e2d4208dd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n DELETE FROM call_traces\n WHERE\n tx_hash = ANY ($1)\n ", + "query": "\n DELETE FROM call_traces\n WHERE\n tx_hash = ANY($1)\n ", "describe": { "columns": [], "parameters": { @@ -10,5 +10,5 @@ }, "nullable": [] }, - "hash": "dac29d7eb16782713acb7aa68aaa8b12b67678e983dc2570be28fe9b1e016c28" + "hash": "7f3ca3f1d0b3414575e71af98b810ff145ac2f388a246b9c7b32193e2d4208dd" } diff --git a/core/lib/dal/.sqlx/query-a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0.json b/core/lib/dal/.sqlx/query-868bfdc5d8ee5eab395fa690891751dfd285628a75a35b152bccb3c73e9cc057.json similarity index 83% rename from core/lib/dal/.sqlx/query-a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0.json rename to core/lib/dal/.sqlx/query-868bfdc5d8ee5eab395fa690891751dfd285628a75a35b152bccb3c73e9cc057.json index 272f20e5268..c2e662ef376 100644 --- a/core/lib/dal/.sqlx/query-a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0.json +++ b/core/lib/dal/.sqlx/query-868bfdc5d8ee5eab395fa690891751dfd285628a75a35b152bccb3c73e9cc057.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL\n AND confirmed_eth_tx_history_id IS NULL\n AND is_gateway = $2\n AND id <= (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $1\n AND is_gateway = $2\n )\n ORDER BY\n id\n ", + "query": "\n SELECT\n *\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL\n AND confirmed_eth_tx_history_id IS NULL\n AND is_gateway = $2\n AND id <= (\n SELECT\n COALESCE(MAX(eth_tx_id), 0)\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NOT NULL\n AND eth_txs.from_addr IS NOT DISTINCT FROM $1\n AND is_gateway = $2\n )\n ORDER BY\n id\n ", "describe": { "columns": [ { @@ -109,5 +109,5 @@ true ] }, - "hash": "a71a87d91dcf0f624dbd64eb8828f65ff83204ebab2ea31847ae305a098823b0" + "hash": "868bfdc5d8ee5eab395fa690891751dfd285628a75a35b152bccb3c73e9cc057" } diff --git a/core/lib/dal/.sqlx/query-cb0a9f6137fb6bee5d17d644714b3b22ea2cd184932fcd59f5931239c7a78003.json b/core/lib/dal/.sqlx/query-87e968b1ef6b95203b5d1e41ddf73be52d97128642d8528535975f8dea798e6c.json similarity index 85% rename from core/lib/dal/.sqlx/query-cb0a9f6137fb6bee5d17d644714b3b22ea2cd184932fcd59f5931239c7a78003.json rename to core/lib/dal/.sqlx/query-87e968b1ef6b95203b5d1e41ddf73be52d97128642d8528535975f8dea798e6c.json index d2da8dff92c..1211bea560b 100644 --- a/core/lib/dal/.sqlx/query-cb0a9f6137fb6bee5d17d644714b3b22ea2cd184932fcd59f5931239c7a78003.json +++ b/core/lib/dal/.sqlx/query-87e968b1ef6b95203b5d1e41ddf73be52d97128642d8528535975f8dea798e6c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n eth_txs_history.*,\n eth_txs.blob_sidecar\n FROM\n eth_txs_history\n LEFT JOIN eth_txs ON eth_tx_id = eth_txs.id\n WHERE\n eth_tx_id = $1\n ORDER BY\n eth_txs_history.created_at DESC\n ", + "query": "\n SELECT\n eth_txs_history.*,\n eth_txs.blob_sidecar\n FROM\n eth_txs_history\n LEFT JOIN eth_txs ON eth_tx_id = eth_txs.id\n WHERE\n eth_tx_id = $1\n ORDER BY\n eth_txs_history.created_at DESC\n ", "describe": { "columns": [ { @@ -90,5 +90,5 @@ true ] }, - "hash": "cb0a9f6137fb6bee5d17d644714b3b22ea2cd184932fcd59f5931239c7a78003" + "hash": "87e968b1ef6b95203b5d1e41ddf73be52d97128642d8528535975f8dea798e6c" } diff --git a/core/lib/dal/.sqlx/query-2cba440c2925631655a7f67486a5a8869da8f10738ba77e3d8e048057b0e7b12.json b/core/lib/dal/.sqlx/query-8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09.json similarity index 70% rename from core/lib/dal/.sqlx/query-2cba440c2925631655a7f67486a5a8869da8f10738ba77e3d8e048057b0e7b12.json rename to core/lib/dal/.sqlx/query-8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09.json index b01a5b41649..5869c1d37a0 100644 --- a/core/lib/dal/.sqlx/query-2cba440c2925631655a7f67486a5a8869da8f10738ba77e3d8e048057b0e7b12.json +++ b/core/lib/dal/.sqlx/query-8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n factory_deps.bytecode,\n transactions.data AS \"data?\",\n transactions.contract_address AS \"contract_address?\"\n FROM\n (\n SELECT\n miniblock_number,\n tx_hash,\n topic3\n FROM\n events\n WHERE\n address = $1\n AND topic1 = $2\n AND topic4 = $3\n LIMIT\n 1\n ) deploy_event\n JOIN factory_deps ON factory_deps.bytecode_hash = deploy_event.topic3\n LEFT JOIN transactions ON transactions.hash = deploy_event.tx_hash\n WHERE\n deploy_event.miniblock_number <= (\n SELECT\n MAX(number)\n FROM\n miniblocks\n )\n ", + "query": "\n SELECT\n factory_deps.bytecode,\n transactions.data AS \"data?\",\n transactions.contract_address AS \"contract_address?\"\n FROM\n (\n SELECT\n miniblock_number,\n tx_hash,\n topic3\n FROM\n events\n WHERE\n address = $1\n AND topic1 = $2\n AND topic4 = $3\n LIMIT\n 1\n ) deploy_event\n JOIN factory_deps ON factory_deps.bytecode_hash = deploy_event.topic3\n LEFT JOIN transactions ON transactions.hash = deploy_event.tx_hash\n WHERE\n deploy_event.miniblock_number <= (\n SELECT\n MAX(number)\n FROM\n miniblocks\n )\n ", "describe": { "columns": [ { @@ -32,5 +32,5 @@ true ] }, - "hash": "2cba440c2925631655a7f67486a5a8869da8f10738ba77e3d8e048057b0e7b12" + "hash": "8ab1634beba74aaef952562a3bcc84b0dd496700a61569929dcc7602ec678b09" } diff --git a/core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json b/core/lib/dal/.sqlx/query-8dd9596258c4e9eeb9178b32ffefaf767e790a54957218de908c7144658b4681.json similarity index 83% rename from core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json rename to core/lib/dal/.sqlx/query-8dd9596258c4e9eeb9178b32ffefaf767e790a54957218de908c7144658b4681.json index dbdec4ac5d6..151d2ece97f 100644 --- a/core/lib/dal/.sqlx/query-526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3.json +++ b/core/lib/dal/.sqlx/query-8dd9596258c4e9eeb9178b32ffefaf767e790a54957218de908c7144658b4681.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n NULL::bytea AS \"block_hash\",\n NULL::BIGINT AS \"l1_batch_number?\",\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx,\n NULL::BIGINT AS \"block_timestamp?\"\n FROM\n events\n WHERE\n tx_hash = ANY ($1)\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", + "query": "\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n NULL::bytea AS \"block_hash\",\n NULL::bigint AS \"l1_batch_number?\",\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx,\n NULL::bigint AS \"block_timestamp?\"\n FROM\n events\n WHERE\n tx_hash = ANY($1)\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", "describe": { "columns": [ { @@ -96,5 +96,5 @@ null ] }, - "hash": "526a8e1c231e99faadd5dbbe9c49ed811faffcc108d04b59fdec5a0ab9d13fa3" + "hash": "8dd9596258c4e9eeb9178b32ffefaf767e790a54957218de908c7144658b4681" } diff --git a/core/lib/dal/.sqlx/query-41c9f45d6eb727aafad0d8c18024cee5c602d275bb812022cc8fdabf0a60e151.json b/core/lib/dal/.sqlx/query-8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d.json similarity index 77% rename from core/lib/dal/.sqlx/query-41c9f45d6eb727aafad0d8c18024cee5c602d275bb812022cc8fdabf0a60e151.json rename to core/lib/dal/.sqlx/query-8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d.json index 8c51c26131b..36ecf511a9a 100644 --- a/core/lib/dal/.sqlx/query-41c9f45d6eb727aafad0d8c18024cee5c602d275bb812022cc8fdabf0a60e151.json +++ b/core/lib/dal/.sqlx/query-8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NULL\n AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY\n eth_txs_history.id DESC\n ", + "query": "\n SELECT\n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NULL\n AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY\n eth_txs_history.id DESC\n ", "describe": { "columns": [ { @@ -52,5 +52,5 @@ false ] }, - "hash": "41c9f45d6eb727aafad0d8c18024cee5c602d275bb812022cc8fdabf0a60e151" + "hash": "8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d" } diff --git a/core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json b/core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json deleted file mode 100644 index e192763b189..00000000000 --- a/core/lib/dal/.sqlx/query-928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND data_availability.blob_id IS NULL\n AND pubdata_input IS NOT NULL\n ORDER BY\n number\n LIMIT\n $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "pubdata_input", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - true - ] - }, - "hash": "928139bf23bd0d57b8dbdb3283b139300ad3b80ac9e70c00864c3d9f6521b028" -} diff --git a/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json b/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json similarity index 66% rename from core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json rename to core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json index 0b1f56ef9f3..3b8accb4fda 100644 --- a/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json +++ b/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ true ] }, - "hash": "bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e" + "hash": "96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d" } diff --git a/core/lib/dal/.sqlx/query-99c6597a6edfb8736a9f2f5a5d77ca44a1f6cb5e46bf9e5ffe38741bf61c5cc0.json b/core/lib/dal/.sqlx/query-99c6597a6edfb8736a9f2f5a5d77ca44a1f6cb5e46bf9e5ffe38741bf61c5cc0.json new file mode 100644 index 00000000000..fd8786ca92c --- /dev/null +++ b/core/lib/dal/.sqlx/query-99c6597a6edfb8736a9f2f5a5d77ca44a1f6cb5e46bf9e5ffe38741bf61c5cc0.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n DISTINCT\n hashed_key\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hashed_key", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "99c6597a6edfb8736a9f2f5a5d77ca44a1f6cb5e46bf9e5ffe38741bf61c5cc0" +} diff --git a/core/lib/dal/.sqlx/query-9badf9f287fd5f8a11cc855b230d23f83a260adf56000748c4b19752e2948ab5.json b/core/lib/dal/.sqlx/query-9badf9f287fd5f8a11cc855b230d23f83a260adf56000748c4b19752e2948ab5.json new file mode 100644 index 00000000000..aa120d6b8f6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-9badf9f287fd5f8a11cc855b230d23f83a260adf56000748c4b19752e2948ab5.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE transactions\n SET\n hash = data_table.hash,\n signature = data_table.signature,\n gas_limit = data_table.gas_limit,\n max_fee_per_gas = data_table.max_fee_per_gas,\n max_priority_fee_per_gas = data_table.max_priority_fee_per_gas,\n gas_per_pubdata_limit = data_table.gas_per_pubdata_limit,\n input = data_table.input,\n data = data_table.data,\n tx_format = data_table.tx_format,\n miniblock_number = $21,\n index_in_block = data_table.index_in_block,\n error = NULLIF(data_table.error, ''),\n effective_gas_price = data_table.effective_gas_price,\n execution_info = data_table.new_execution_info,\n refunded_gas = data_table.refunded_gas,\n value = data_table.value,\n contract_address = data_table.contract_address,\n paymaster = data_table.paymaster,\n paymaster_input = data_table.paymaster_input,\n in_mempool = FALSE,\n updated_at = NOW()\n FROM\n (\n SELECT\n data_table_temp.*\n FROM\n (\n SELECT\n UNNEST($1::bytea []) AS initiator_address,\n UNNEST($2::int []) AS nonce,\n UNNEST($3::bytea []) AS hash,\n UNNEST($4::bytea []) AS signature,\n UNNEST($5::numeric []) AS gas_limit,\n UNNEST($6::numeric []) AS max_fee_per_gas,\n UNNEST($7::numeric []) AS max_priority_fee_per_gas,\n UNNEST($8::numeric []) AS gas_per_pubdata_limit,\n UNNEST($9::int []) AS tx_format,\n UNNEST($10::integer []) AS index_in_block,\n UNNEST($11::varchar []) AS error,\n UNNEST($12::numeric []) AS effective_gas_price,\n UNNEST($13::jsonb []) AS new_execution_info,\n UNNEST($14::bytea []) AS input,\n UNNEST($15::jsonb []) AS data,\n UNNEST($16::bigint []) AS refunded_gas,\n UNNEST($17::numeric []) AS value,\n UNNEST($18::bytea []) AS contract_address,\n UNNEST($19::bytea []) AS paymaster,\n UNNEST($20::bytea []) AS paymaster_input\n ) AS data_table_temp\n JOIN transactions\n ON\n transactions.initiator_address\n = data_table_temp.initiator_address\n AND transactions.nonce = data_table_temp.nonce\n ORDER BY\n transactions.hash\n ) AS data_table\n WHERE\n transactions.initiator_address = data_table.initiator_address\n AND transactions.nonce = data_table.nonce\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "Int4Array", + "ByteaArray", + "ByteaArray", + "NumericArray", + "NumericArray", + "NumericArray", + "NumericArray", + "Int4Array", + "Int4Array", + "VarcharArray", + "NumericArray", + "JsonbArray", + "ByteaArray", + "JsonbArray", + "Int8Array", + "NumericArray", + "ByteaArray", + "ByteaArray", + "ByteaArray", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "9badf9f287fd5f8a11cc855b230d23f83a260adf56000748c4b19752e2948ab5" +} diff --git a/core/lib/dal/.sqlx/query-9f7b35311f0afff1be7dc489348ec87cf78f40429222b3163f3c5235c87e007f.json b/core/lib/dal/.sqlx/query-9f7b35311f0afff1be7dc489348ec87cf78f40429222b3163f3c5235c87e007f.json new file mode 100644 index 00000000000..30d66d0890b --- /dev/null +++ b/core/lib/dal/.sqlx/query-9f7b35311f0afff1be7dc489348ec87cf78f40429222b3163f3c5235c87e007f.json @@ -0,0 +1,31 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n TRUE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n NOW(),\n NOW()\n )\n ON CONFLICT (hash) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Jsonb", + "Int8", + "Numeric", + "Numeric", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Timestamp" + ] + }, + "nullable": [] + }, + "hash": "9f7b35311f0afff1be7dc489348ec87cf78f40429222b3163f3c5235c87e007f" +} diff --git a/core/lib/dal/.sqlx/query-a329c468b25d6d5533766b8ad3d0c8fdca5c5731d490c1ac3ed56266b1d5a8a5.json b/core/lib/dal/.sqlx/query-a329c468b25d6d5533766b8ad3d0c8fdca5c5731d490c1ac3ed56266b1d5a8a5.json new file mode 100644 index 00000000000..d7947345cf6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-a329c468b25d6d5533766b8ad3d0c8fdca5c5731d490c1ac3ed56266b1d5a8a5.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n upgrade_id,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n miniblock_number,\n index_in_block,\n error,\n execution_info,\n refunded_gas,\n effective_gas_price,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n TRUE,\n data_table.initiator_address,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.data,\n data_table.upgrade_id,\n data_table.contract_address,\n data_table.l1_block_number,\n data_table.value,\n '\\x0000000000000000000000000000000000000000'::bytea,\n '\\x'::bytea,\n data_table.tx_format,\n data_table.l1_tx_mint,\n data_table.l1_tx_refund_recipient,\n $19,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.execution_info,\n data_table.refunded_gas,\n data_table.effective_gas_price,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::bytea []) AS hash,\n UNNEST($2::bytea []) AS initiator_address,\n UNNEST($3::numeric []) AS gas_limit,\n UNNEST($4::numeric []) AS max_fee_per_gas,\n UNNEST($5::numeric []) AS gas_per_pubdata_limit,\n UNNEST($6::jsonb []) AS data,\n UNNEST($7::int []) AS upgrade_id,\n UNNEST($8::bytea []) AS contract_address,\n UNNEST($9::int []) AS l1_block_number,\n UNNEST($10::numeric []) AS value,\n UNNEST($11::integer []) AS tx_format,\n UNNEST($12::numeric []) AS l1_tx_mint,\n UNNEST($13::bytea []) AS l1_tx_refund_recipient,\n UNNEST($14::int []) AS index_in_block,\n UNNEST($15::varchar []) AS error,\n UNNEST($16::jsonb []) AS execution_info,\n UNNEST($17::bigint []) AS refunded_gas,\n UNNEST($18::numeric []) AS effective_gas_price\n ) AS data_table\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray", + "NumericArray", + "NumericArray", + "NumericArray", + "JsonbArray", + "Int4Array", + "ByteaArray", + "Int4Array", + "NumericArray", + "Int4Array", + "NumericArray", + "ByteaArray", + "Int4Array", + "VarcharArray", + "JsonbArray", + "Int8Array", + "NumericArray", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "a329c468b25d6d5533766b8ad3d0c8fdca5c5731d490c1ac3ed56266b1d5a8a5" +} diff --git a/core/lib/dal/.sqlx/query-6621de90a024cc85946f17948e5c171cd0e4d38bd6e9cfec58b2d7f53a3204e1.json b/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json similarity index 97% rename from core/lib/dal/.sqlx/query-6621de90a024cc85946f17948e5c171cd0e4d38bd6e9cfec58b2d7f53a3204e1.json rename to core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json index 8ba437fe2ce..1d27af2bbc1 100644 --- a/core/lib/dal/.sqlx/query-6621de90a024cc85946f17948e5c171cd0e4d38bd6e9cfec58b2d7f53a3204e1.json +++ b/core/lib/dal/.sqlx/query-a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE transactions\n SET\n in_mempool = TRUE\n FROM\n (\n SELECT\n hash\n FROM\n (\n SELECT\n hash\n FROM\n transactions\n WHERE\n miniblock_number IS NULL\n AND in_mempool = FALSE\n AND error IS NULL\n AND (\n is_priority = TRUE\n OR (\n max_fee_per_gas >= $2\n AND gas_per_pubdata_limit >= $3\n )\n )\n AND tx_format != $4\n ORDER BY\n is_priority DESC,\n priority_op_id,\n received_at\n LIMIT\n $1\n ) AS subquery1\n ORDER BY\n hash\n ) AS subquery2\n WHERE\n transactions.hash = subquery2.hash\n RETURNING\n transactions.*\n ", + "query": "\n UPDATE transactions\n SET\n in_mempool = TRUE\n FROM\n (\n SELECT\n hash\n FROM\n (\n SELECT\n hash\n FROM\n transactions\n WHERE\n miniblock_number IS NULL\n AND in_mempool = FALSE\n AND error IS NULL\n AND (\n is_priority = TRUE\n OR (\n max_fee_per_gas >= $2\n AND gas_per_pubdata_limit >= $3\n )\n )\n AND tx_format != $4\n ORDER BY\n is_priority DESC,\n priority_op_id,\n received_at\n LIMIT\n $1\n ) AS subquery1\n ORDER BY\n hash\n ) AS subquery2\n WHERE\n transactions.hash = subquery2.hash\n RETURNING\n transactions.*\n ", "describe": { "columns": [ { @@ -231,5 +231,5 @@ true ] }, - "hash": "6621de90a024cc85946f17948e5c171cd0e4d38bd6e9cfec58b2d7f53a3204e1" + "hash": "a36135b5908992324c4308f549ea77a428820fdcea9969aff3b29ca16727357b" } diff --git a/core/lib/dal/.sqlx/query-a4fcd075b68467bb119e49e6b20a69138206dfeb41f3daff4a3eef1de0bed4e4.json b/core/lib/dal/.sqlx/query-a4fcd075b68467bb119e49e6b20a69138206dfeb41f3daff4a3eef1de0bed4e4.json deleted file mode 100644 index 39b0c391ef5..00000000000 --- a/core/lib/dal/.sqlx/query-a4fcd075b68467bb119e49e6b20a69138206dfeb41f3daff4a3eef1de0bed4e4.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n initial_writes (hashed_key, INDEX, l1_batch_number, created_at, updated_at)\n SELECT\n u.hashed_key,\n u.index,\n $3,\n NOW(),\n NOW()\n FROM\n UNNEST($1::bytea[], $2::BIGINT[]) AS u (hashed_key, INDEX)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "ByteaArray", - "Int8Array", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "a4fcd075b68467bb119e49e6b20a69138206dfeb41f3daff4a3eef1de0bed4e4" -} diff --git a/core/lib/dal/.sqlx/query-a65364d10a20420211022dc8234d0a586ce73d09ee28887cb1ad1c0392250629.json b/core/lib/dal/.sqlx/query-a65364d10a20420211022dc8234d0a586ce73d09ee28887cb1ad1c0392250629.json new file mode 100644 index 00000000000..58195689f5d --- /dev/null +++ b/core/lib/dal/.sqlx/query-a65364d10a20420211022dc8234d0a586ce73d09ee28887cb1ad1c0392250629.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n miniblock_number,\n index_in_block,\n error,\n effective_gas_price,\n refunded_gas,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n FALSE,\n data_table.initiator_address,\n data_table.nonce,\n data_table.signature,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.max_priority_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.input,\n data_table.data,\n data_table.tx_format,\n data_table.contract_address,\n data_table.value,\n data_table.paymaster,\n data_table.paymaster_input,\n data_table.new_execution_info,\n $21,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.effective_gas_price,\n data_table.refunded_gas,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::bytea []) AS hash,\n UNNEST($2::bytea []) AS initiator_address,\n UNNEST($3::int []) AS nonce,\n UNNEST($4::bytea []) AS signature,\n UNNEST($5::numeric []) AS gas_limit,\n UNNEST($6::numeric []) AS max_fee_per_gas,\n UNNEST($7::numeric []) AS max_priority_fee_per_gas,\n UNNEST($8::numeric []) AS gas_per_pubdata_limit,\n UNNEST($9::bytea []) AS input,\n UNNEST($10::jsonb []) AS data,\n UNNEST($11::int []) AS tx_format,\n UNNEST($12::bytea []) AS contract_address,\n UNNEST($13::numeric []) AS value,\n UNNEST($14::bytea []) AS paymaster,\n UNNEST($15::bytea []) AS paymaster_input,\n UNNEST($16::jsonb []) AS new_execution_info,\n UNNEST($17::integer []) AS index_in_block,\n UNNEST($18::varchar []) AS error,\n UNNEST($19::numeric []) AS effective_gas_price,\n UNNEST($20::bigint []) AS refunded_gas\n ) AS data_table\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray", + "Int4Array", + "ByteaArray", + "NumericArray", + "NumericArray", + "NumericArray", + "NumericArray", + "ByteaArray", + "JsonbArray", + "Int4Array", + "ByteaArray", + "NumericArray", + "ByteaArray", + "ByteaArray", + "JsonbArray", + "Int4Array", + "VarcharArray", + "NumericArray", + "Int8Array", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "a65364d10a20420211022dc8234d0a586ce73d09ee28887cb1ad1c0392250629" +} diff --git a/core/lib/dal/.sqlx/query-a8fcb4d43b702d561a573a30790372281e8502002ff9fec82294ea6b972447d8.json b/core/lib/dal/.sqlx/query-a8fcb4d43b702d561a573a30790372281e8502002ff9fec82294ea6b972447d8.json new file mode 100644 index 00000000000..33187f131df --- /dev/null +++ b/core/lib/dal/.sqlx/query-a8fcb4d43b702d561a573a30790372281e8502002ff9fec82294ea6b972447d8.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n factory_deps (bytecode_hash, bytecode, miniblock_number, created_at, updated_at)\n SELECT\n u.bytecode_hash,\n u.bytecode,\n $3,\n NOW(),\n NOW()\n FROM\n UNNEST($1::bytea [], $2::bytea []) AS u (bytecode_hash, bytecode)\n ON CONFLICT (bytecode_hash) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "a8fcb4d43b702d561a573a30790372281e8502002ff9fec82294ea6b972447d8" +} diff --git a/core/lib/dal/.sqlx/query-e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9.json b/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json similarity index 53% rename from core/lib/dal/.sqlx/query-e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9.json rename to core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json index abe74036f4c..b17b5828211 100644 --- a/core/lib/dal/.sqlx/query-e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9.json +++ b/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN tee_verifier_input_producer_jobs AS inputs ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", + "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", "describe": { "columns": [ { @@ -36,5 +36,5 @@ false ] }, - "hash": "e3f4af790fca9149f4edff070170294ec464d184ad732692faa61d2ef99c84e9" + "hash": "aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0" } diff --git a/core/lib/dal/.sqlx/query-acfd5a60fda5782bddb8cba033a5d3a0eed793f7b64ad3ed035f20e896371721.json b/core/lib/dal/.sqlx/query-acfd5a60fda5782bddb8cba033a5d3a0eed793f7b64ad3ed035f20e896371721.json deleted file mode 100644 index 9232ef6d009..00000000000 --- a/core/lib/dal/.sqlx/query-acfd5a60fda5782bddb8cba033a5d3a0eed793f7b64ad3ed035f20e896371721.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n TRUE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n NOW(),\n NOW()\n )\n ON CONFLICT (hash) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Jsonb", - "Int8", - "Numeric", - "Numeric", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Timestamp" - ] - }, - "nullable": [] - }, - "hash": "acfd5a60fda5782bddb8cba033a5d3a0eed793f7b64ad3ed035f20e896371721" -} diff --git a/core/lib/dal/.sqlx/query-ad8cbc084ef8bc5995031c807bbbd2225e57ae8d42042ac107b35a36c522f0d8.json b/core/lib/dal/.sqlx/query-ad8cbc084ef8bc5995031c807bbbd2225e57ae8d42042ac107b35a36c522f0d8.json new file mode 100644 index 00000000000..b07701218c3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ad8cbc084ef8bc5995031c807bbbd2225e57ae8d42042ac107b35a36c522f0d8.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n miniblock_number,\n index_in_block,\n error,\n execution_info,\n refunded_gas,\n effective_gas_price,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n TRUE,\n data_table.initiator_address,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.data,\n data_table.priority_op_id,\n data_table.full_fee,\n data_table.layer_2_tip_fee,\n data_table.contract_address,\n data_table.l1_block_number,\n data_table.value,\n '\\x0000000000000000000000000000000000000000'::bytea,\n '\\x'::bytea,\n data_table.tx_format,\n data_table.l1_tx_mint,\n data_table.l1_tx_refund_recipient,\n $21,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.execution_info,\n data_table.refunded_gas,\n data_table.effective_gas_price,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::bytea []) AS hash,\n UNNEST($2::bytea []) AS initiator_address,\n UNNEST($3::numeric []) AS gas_limit,\n UNNEST($4::numeric []) AS max_fee_per_gas,\n UNNEST($5::numeric []) AS gas_per_pubdata_limit,\n UNNEST($6::jsonb []) AS data,\n UNNEST($7::bigint []) AS priority_op_id,\n UNNEST($8::numeric []) AS full_fee,\n UNNEST($9::numeric []) AS layer_2_tip_fee,\n UNNEST($10::bytea []) AS contract_address,\n UNNEST($11::int []) AS l1_block_number,\n UNNEST($12::numeric []) AS value,\n UNNEST($13::integer []) AS tx_format,\n UNNEST($14::numeric []) AS l1_tx_mint,\n UNNEST($15::bytea []) AS l1_tx_refund_recipient,\n UNNEST($16::int []) AS index_in_block,\n UNNEST($17::varchar []) AS error,\n UNNEST($18::jsonb []) AS execution_info,\n UNNEST($19::bigint []) AS refunded_gas,\n UNNEST($20::numeric []) AS effective_gas_price\n ) AS data_table\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray", + "NumericArray", + "NumericArray", + "NumericArray", + "JsonbArray", + "Int8Array", + "NumericArray", + "NumericArray", + "ByteaArray", + "Int4Array", + "NumericArray", + "Int4Array", + "NumericArray", + "ByteaArray", + "Int4Array", + "VarcharArray", + "JsonbArray", + "Int8Array", + "NumericArray", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "ad8cbc084ef8bc5995031c807bbbd2225e57ae8d42042ac107b35a36c522f0d8" +} diff --git a/core/lib/dal/.sqlx/query-a1f4334b6bc1642c1bc0ff4ffa34470914b52e2d714e9deb903d62a5d66f0e04.json b/core/lib/dal/.sqlx/query-afe66b4684f2ab1187b49f64f16f21c9bacd8badc624de7664b82dba97d6c9b9.json similarity index 86% rename from core/lib/dal/.sqlx/query-a1f4334b6bc1642c1bc0ff4ffa34470914b52e2d714e9deb903d62a5d66f0e04.json rename to core/lib/dal/.sqlx/query-afe66b4684f2ab1187b49f64f16f21c9bacd8badc624de7664b82dba97d6c9b9.json index 82612fb5327..6bb3c4e9731 100644 --- a/core/lib/dal/.sqlx/query-a1f4334b6bc1642c1bc0ff4ffa34470914b52e2d714e9deb903d62a5d66f0e04.json +++ b/core/lib/dal/.sqlx/query-afe66b4684f2ab1187b49f64f16f21c9bacd8badc624de7664b82dba97d6c9b9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_address,\n l2_address,\n NAME,\n symbol,\n decimals\n FROM\n tokens\n ORDER BY\n symbol\n ", + "query": "\n SELECT\n l1_address,\n l2_address,\n name,\n symbol,\n decimals\n FROM\n tokens\n ORDER BY\n symbol\n ", "describe": { "columns": [ { @@ -40,5 +40,5 @@ false ] }, - "hash": "a1f4334b6bc1642c1bc0ff4ffa34470914b52e2d714e9deb903d62a5d66f0e04" + "hash": "afe66b4684f2ab1187b49f64f16f21c9bacd8badc624de7664b82dba97d6c9b9" } diff --git a/core/lib/dal/.sqlx/query-b1486215ce220d6d2faa7690a5cdd13a572f665e0a5c0edba8076438ba9f044c.json b/core/lib/dal/.sqlx/query-b1486215ce220d6d2faa7690a5cdd13a572f665e0a5c0edba8076438ba9f044c.json new file mode 100644 index 00000000000..aab080c3a51 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b1486215ce220d6d2faa7690a5cdd13a572f665e0a5c0edba8076438ba9f044c.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "b1486215ce220d6d2faa7690a5cdd13a572f665e0a5c0edba8076438ba9f044c" +} diff --git a/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json deleted file mode 100644 index 356fd8e9d99..00000000000 --- a/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches_consensus_committees (l1_batch_number, attesters, updated_at)\n VALUES\n ($1, $2, NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = $1,\n attesters = $2,\n updated_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7" -} diff --git a/core/lib/dal/.sqlx/query-ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225.json b/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json similarity index 79% rename from core/lib/dal/.sqlx/query-ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225.json rename to core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json index cf102b828aa..f440a265593 100644 --- a/core/lib/dal/.sqlx/query-ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225.json +++ b/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -126,5 +126,5 @@ false ] }, - "hash": "ef70506e90e8add3b95940a7333f8222bd9fbe8ce82d8963f7da03fe6fcf9225" + "hash": "b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad" } diff --git a/core/lib/dal/.sqlx/query-b47a2961dc2f1347864682c572dd1f5e03924dbf6dd1e824d4aca837e604b7e9.json b/core/lib/dal/.sqlx/query-b47a2961dc2f1347864682c572dd1f5e03924dbf6dd1e824d4aca837e604b7e9.json deleted file mode 100644 index 63acfcb30dc..00000000000 --- a/core/lib/dal/.sqlx/query-b47a2961dc2f1347864682c572dd1f5e03924dbf6dd1e824d4aca837e604b7e9.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n table_name,\n PG_TABLE_SIZE(('public.' || QUOTE_IDENT(table_name))::regclass) AS table_size,\n PG_INDEXES_SIZE(('public.' || QUOTE_IDENT(table_name))::regclass) AS indexes_size,\n PG_RELATION_SIZE(('public.' || QUOTE_IDENT(table_name))::regclass) AS relation_size,\n PG_TOTAL_RELATION_SIZE(('public.' || QUOTE_IDENT(table_name))::regclass) AS total_size\n FROM\n information_schema.tables\n WHERE\n table_schema = 'public'\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "table_name", - "type_info": "Name" - }, - { - "ordinal": 1, - "name": "table_size", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "indexes_size", - "type_info": "Int8" - }, - { - "ordinal": 3, - "name": "relation_size", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "total_size", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true, - null, - null, - null, - null - ] - }, - "hash": "b47a2961dc2f1347864682c572dd1f5e03924dbf6dd1e824d4aca837e604b7e9" -} diff --git a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json b/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json new file mode 100644 index 00000000000..120fac1021f --- /dev/null +++ b/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c" +} diff --git a/core/lib/dal/.sqlx/query-139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1.json b/core/lib/dal/.sqlx/query-bba037e1fcffc4415afe3016ff266d19f7ba92c40566e1d098c435da41e95274.json similarity index 52% rename from core/lib/dal/.sqlx/query-139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1.json rename to core/lib/dal/.sqlx/query-bba037e1fcffc4415afe3016ff266d19f7ba92c40566e1d098c435da41e95274.json index 305bb493651..a72b621dae5 100644 --- a/core/lib/dal/.sqlx/query-139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1.json +++ b/core/lib/dal/.sqlx/query-bba037e1fcffc4415afe3016ff266d19f7ba92c40566e1d098c435da41e95274.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY ($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block,\n transactions.l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error,\n transactions.effective_gas_price,\n transactions.initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas,\n transactions.gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\",\n miniblocks.timestamp AS \"block_timestamp?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", + "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n \n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block,\n transactions.l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error,\n transactions.effective_gas_price,\n transactions.initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas,\n transactions.gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\",\n miniblocks.timestamp AS \"block_timestamp?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY($3)\n AND transactions.data != '{}'::jsonb\n ", "describe": { "columns": [ { @@ -110,5 +110,5 @@ false ] }, - "hash": "139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1" + "hash": "bba037e1fcffc4415afe3016ff266d19f7ba92c40566e1d098c435da41e95274" } diff --git a/core/lib/dal/.sqlx/query-bf481c2b498420f80765b837059cab02b0656d863dbfce4b5dad7bc72e52b05d.json b/core/lib/dal/.sqlx/query-bf481c2b498420f80765b837059cab02b0656d863dbfce4b5dad7bc72e52b05d.json deleted file mode 100644 index c353a35ec5e..00000000000 --- a/core/lib/dal/.sqlx/query-bf481c2b498420f80765b837059cab02b0656d863dbfce4b5dad7bc72e52b05d.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n miniblock_number,\n index_in_block,\n error,\n effective_gas_price,\n refunded_gas,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n FALSE,\n data_table.initiator_address,\n data_table.nonce,\n data_table.signature,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.max_priority_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.input,\n data_table.data,\n data_table.tx_format,\n data_table.contract_address,\n data_table.value,\n data_table.paymaster,\n data_table.paymaster_input,\n data_table.new_execution_info,\n $21,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.effective_gas_price,\n data_table.refunded_gas,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS hash,\n UNNEST($2::bytea[]) AS initiator_address,\n UNNEST($3::INT[]) AS nonce,\n UNNEST($4::bytea[]) AS signature,\n UNNEST($5::NUMERIC[]) AS gas_limit,\n UNNEST($6::NUMERIC[]) AS max_fee_per_gas,\n UNNEST($7::NUMERIC[]) AS max_priority_fee_per_gas,\n UNNEST($8::NUMERIC[]) AS gas_per_pubdata_limit,\n UNNEST($9::bytea[]) AS input,\n UNNEST($10::jsonb[]) AS data,\n UNNEST($11::INT[]) AS tx_format,\n UNNEST($12::bytea[]) AS contract_address,\n UNNEST($13::NUMERIC[]) AS value,\n UNNEST($14::bytea[]) AS paymaster,\n UNNEST($15::bytea[]) AS paymaster_input,\n UNNEST($16::jsonb[]) AS new_execution_info,\n UNNEST($17::INTEGER[]) AS index_in_block,\n UNNEST($18::VARCHAR[]) AS error,\n UNNEST($19::NUMERIC[]) AS effective_gas_price,\n UNNEST($20::BIGINT[]) AS refunded_gas\n ) AS data_table\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "ByteaArray", - "ByteaArray", - "Int4Array", - "ByteaArray", - "NumericArray", - "NumericArray", - "NumericArray", - "NumericArray", - "ByteaArray", - "JsonbArray", - "Int4Array", - "ByteaArray", - "NumericArray", - "ByteaArray", - "ByteaArray", - "JsonbArray", - "Int4Array", - "VarcharArray", - "NumericArray", - "Int8Array", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "bf481c2b498420f80765b837059cab02b0656d863dbfce4b5dad7bc72e52b05d" -} diff --git a/core/lib/dal/.sqlx/query-bfc84bcf0985446b337467dd1da709dbee508ad6d1cae43e477cf1bef8cb4aa9.json b/core/lib/dal/.sqlx/query-bfc84bcf0985446b337467dd1da709dbee508ad6d1cae43e477cf1bef8cb4aa9.json deleted file mode 100644 index 8079d52a703..00000000000 --- a/core/lib/dal/.sqlx/query-bfc84bcf0985446b337467dd1da709dbee508ad6d1cae43e477cf1bef8cb4aa9.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT\n hashed_key\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "hashed_key", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "bfc84bcf0985446b337467dd1da709dbee508ad6d1cae43e477cf1bef8cb4aa9" -} diff --git a/core/lib/dal/.sqlx/query-c02f404ce9b0f92b8052ef6f3eaabda70cb9c56ae3e30dc0a8257e43d6714155.json b/core/lib/dal/.sqlx/query-c013947660883612b83bfd207c26c5e9dc50b533524bf856740215aacb8ff5bd.json similarity index 83% rename from core/lib/dal/.sqlx/query-c02f404ce9b0f92b8052ef6f3eaabda70cb9c56ae3e30dc0a8257e43d6714155.json rename to core/lib/dal/.sqlx/query-c013947660883612b83bfd207c26c5e9dc50b533524bf856740215aacb8ff5bd.json index 53a0aa152f9..b8728de2dac 100644 --- a/core/lib/dal/.sqlx/query-c02f404ce9b0f92b8052ef6f3eaabda70cb9c56ae3e30dc0a8257e43d6714155.json +++ b/core/lib/dal/.sqlx/query-c013947660883612b83bfd207c26c5e9dc50b533524bf856740215aacb8ff5bd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n eth_txs_history.*,\n eth_txs.blob_sidecar\n FROM\n eth_txs_history\n LEFT JOIN eth_txs ON eth_tx_id = eth_txs.id\n WHERE\n eth_tx_id = $1\n ORDER BY\n eth_txs_history.created_at DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n eth_txs_history.*,\n eth_txs.blob_sidecar\n FROM\n eth_txs_history\n LEFT JOIN eth_txs ON eth_tx_id = eth_txs.id\n WHERE\n eth_tx_id = $1\n ORDER BY\n eth_txs_history.created_at DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -90,5 +90,5 @@ true ] }, - "hash": "c02f404ce9b0f92b8052ef6f3eaabda70cb9c56ae3e30dc0a8257e43d6714155" + "hash": "c013947660883612b83bfd207c26c5e9dc50b533524bf856740215aacb8ff5bd" } diff --git a/core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json b/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json similarity index 52% rename from core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json rename to core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json index bb0d73ee6c8..61832d25fd2 100644 --- a/core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json +++ b/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n processed_events (\n TYPE,\n chain_id,\n next_block_to_process\n )\n VALUES\n ($1, $2, $3)\n ", + "query": "\n INSERT INTO\n processed_events (\n type,\n chain_id,\n next_block_to_process\n )\n VALUES\n ($1, $2, $3)\n ", "describe": { "columns": [], "parameters": { @@ -22,5 +22,5 @@ }, "nullable": [] }, - "hash": "afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66" + "hash": "c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1" } diff --git a/core/lib/dal/.sqlx/query-c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0.json b/core/lib/dal/.sqlx/query-c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0.json deleted file mode 100644 index bdabc52d137..00000000000 --- a/core/lib/dal/.sqlx/query-c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks_consensus (number, certificate)\n VALUES\n ($1, $2)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0" -} diff --git a/core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json b/core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json deleted file mode 100644 index 1c15bde02fd..00000000000 --- a/core/lib/dal/.sqlx/query-c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479.json +++ /dev/null @@ -1,100 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n events_select AS (\n SELECT\n address,\n topic1,\n topic2,\n topic3,\n topic4,\n value,\n miniblock_number,\n tx_hash,\n tx_index_in_block,\n event_index_in_block,\n event_index_in_tx\n FROM\n events\n WHERE\n miniblock_number > $1\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n )\n SELECT\n miniblocks.hash AS \"block_hash?\",\n address AS \"address!\",\n topic1 AS \"topic1!\",\n topic2 AS \"topic2!\",\n topic3 AS \"topic3!\",\n topic4 AS \"topic4!\",\n value AS \"value!\",\n miniblock_number AS \"miniblock_number!\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n tx_hash AS \"tx_hash!\",\n tx_index_in_block AS \"tx_index_in_block!\",\n event_index_in_block AS \"event_index_in_block!\",\n event_index_in_tx AS \"event_index_in_tx!\",\n miniblocks.timestamp AS \"block_timestamp\"\n FROM\n events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY\n miniblock_number ASC,\n event_index_in_block ASC\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "block_hash?", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "address!", - "type_info": "Bytea" - }, - { - "ordinal": 2, - "name": "topic1!", - "type_info": "Bytea" - }, - { - "ordinal": 3, - "name": "topic2!", - "type_info": "Bytea" - }, - { - "ordinal": 4, - "name": "topic3!", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "topic4!", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "value!", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "miniblock_number!", - "type_info": "Int8" - }, - { - "ordinal": 8, - "name": "l1_batch_number?", - "type_info": "Int8" - }, - { - "ordinal": 9, - "name": "tx_hash!", - "type_info": "Bytea" - }, - { - "ordinal": 10, - "name": "tx_index_in_block!", - "type_info": "Int4" - }, - { - "ordinal": 11, - "name": "event_index_in_block!", - "type_info": "Int4" - }, - { - "ordinal": 12, - "name": "event_index_in_tx!", - "type_info": "Int4" - }, - { - "ordinal": 13, - "name": "block_timestamp", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, - true, - false, - false, - false, - false, - false - ] - }, - "hash": "c3af06cc232adb93f16456da07733acd2066a5e238088b39b982b10770f51479" -} diff --git a/core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json b/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json similarity index 78% rename from core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json rename to core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json index 3297d411d8a..162c722add9 100644 --- a/core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json +++ b/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -48,5 +48,5 @@ false ] }, - "hash": "85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc" + "hash": "c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7" } diff --git a/core/lib/dal/.sqlx/query-c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa.json b/core/lib/dal/.sqlx/query-c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa.json deleted file mode 100644 index 6dd2f6cc7a9..00000000000 --- a/core/lib/dal/.sqlx/query-c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n base_token_ratios (numerator, denominator, ratio_timestamp, created_at, updated_at)\n VALUES\n ($1, $2, $3, NOW(), NOW())\n RETURNING\n id\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Numeric", - "Numeric", - "Timestamp" - ] - }, - "nullable": [ - false - ] - }, - "hash": "c5aef75dbeb520c965a0996abed9713f437db492e2075ca69e11e2ef5728ccaa" -} diff --git a/core/lib/dal/.sqlx/query-c5dfe49f8042f773ced96f8363819da280e72546298b615b88662fa4a19f2bdf.json b/core/lib/dal/.sqlx/query-c5dfe49f8042f773ced96f8363819da280e72546298b615b88662fa4a19f2bdf.json new file mode 100644 index 00000000000..532d1d929b4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c5dfe49f8042f773ced96f8363819da280e72546298b615b88662fa4a19f2bdf.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n vm_runner_bwip (l1_batch_number, created_at, updated_at, processing_started_at)\n VALUES\n ($1, NOW(), NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW(),\n processing_started_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "c5dfe49f8042f773ced96f8363819da280e72546298b615b88662fa4a19f2bdf" +} diff --git a/core/lib/dal/.sqlx/query-c69d19005dd28bd0abfc84e29fd9442f610a6c57280615eb8c982a4afb847aff.json b/core/lib/dal/.sqlx/query-c69d19005dd28bd0abfc84e29fd9442f610a6c57280615eb8c982a4afb847aff.json new file mode 100644 index 00000000000..b12a7fb2490 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c69d19005dd28bd0abfc84e29fd9442f610a6c57280615eb8c982a4afb847aff.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n snapshots (\n version,\n l1_batch_number,\n storage_logs_filepaths,\n factory_deps_filepath,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, ARRAY_FILL(''::TEXT, ARRAY[$3::INTEGER]), $4, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int8", + "Int4", + "Text" + ] + }, + "nullable": [] + }, + "hash": "c69d19005dd28bd0abfc84e29fd9442f610a6c57280615eb8c982a4afb847aff" +} diff --git a/core/lib/dal/.sqlx/query-c8dd57fb86247e67896485c6295d5cfa6c8498e8a58b8ae6fe3a1584401d3b4b.json b/core/lib/dal/.sqlx/query-c8dd57fb86247e67896485c6295d5cfa6c8498e8a58b8ae6fe3a1584401d3b4b.json new file mode 100644 index 00000000000..f442976281d --- /dev/null +++ b/core/lib/dal/.sqlx/query-c8dd57fb86247e67896485c6295d5cfa6c8498e8a58b8ae6fe3a1584401d3b4b.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n consensus_replica_state (fake_key, global_config, genesis, state)\n VALUES\n (TRUE, $1, $2, $3)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Jsonb", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "c8dd57fb86247e67896485c6295d5cfa6c8498e8a58b8ae6fe3a1584401d3b4b" +} diff --git a/core/lib/dal/.sqlx/query-6849be0788a509e4e68acc2da4bfadbcfc343374fad173df0cc8db38419a9726.json b/core/lib/dal/.sqlx/query-c9742c7f58c5c0f676cee0d0d3a9309c1b45d0134a6c6e15533443353db3bee6.json similarity index 63% rename from core/lib/dal/.sqlx/query-6849be0788a509e4e68acc2da4bfadbcfc343374fad173df0cc8db38419a9726.json rename to core/lib/dal/.sqlx/query-c9742c7f58c5c0f676cee0d0d3a9309c1b45d0134a6c6e15533443353db3bee6.json index 72dbb2a4178..cabf48d4df7 100644 --- a/core/lib/dal/.sqlx/query-6849be0788a509e4e68acc2da4bfadbcfc343374fad173df0cc8db38419a9726.json +++ b/core/lib/dal/.sqlx/query-c9742c7f58c5c0f676cee0d0d3a9309c1b45d0134a6c6e15533443353db3bee6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n DELETE FROM tokens\n WHERE\n l2_address = ANY ($1)\n ", + "query": "\n DELETE FROM tokens\n WHERE\n l2_address = ANY($1)\n ", "describe": { "columns": [], "parameters": { @@ -10,5 +10,5 @@ }, "nullable": [] }, - "hash": "6849be0788a509e4e68acc2da4bfadbcfc343374fad173df0cc8db38419a9726" + "hash": "c9742c7f58c5c0f676cee0d0d3a9309c1b45d0134a6c6e15533443353db3bee6" } diff --git a/core/lib/dal/.sqlx/query-c9a842d04e8b225e43f07f76541dc766262b5bdc58be0444e164b1bd9feed02d.json b/core/lib/dal/.sqlx/query-c9a842d04e8b225e43f07f76541dc766262b5bdc58be0444e164b1bd9feed02d.json new file mode 100644 index 00000000000..fb28539ccdf --- /dev/null +++ b/core/lib/dal/.sqlx/query-c9a842d04e8b225e43f07f76541dc766262b5bdc58be0444e164b1bd9feed02d.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n soft AS (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n type = 'Soft'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n ),\n \n hard AS (\n SELECT\n pruned_l1_batch,\n pruned_miniblock\n FROM\n pruning_log\n WHERE\n type = 'Hard'\n ORDER BY\n pruned_l1_batch DESC\n LIMIT\n 1\n )\n \n SELECT\n soft.pruned_l1_batch AS last_soft_pruned_l1_batch,\n soft.pruned_miniblock AS last_soft_pruned_miniblock,\n hard.pruned_l1_batch AS last_hard_pruned_l1_batch,\n hard.pruned_miniblock AS last_hard_pruned_miniblock\n FROM\n soft\n FULL JOIN hard ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_soft_pruned_l1_batch", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "last_soft_pruned_miniblock", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "last_hard_pruned_l1_batch", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "last_hard_pruned_miniblock", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true, + true, + true + ] + }, + "hash": "c9a842d04e8b225e43f07f76541dc766262b5bdc58be0444e164b1bd9feed02d" +} diff --git a/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json b/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json new file mode 100644 index 00000000000..c234cbe4235 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n FALSE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n $19,\n NOW(),\n NOW()\n )\n ON CONFLICT (initiator_address, nonce) DO\n UPDATE\n SET\n hash = $1,\n signature = $4,\n gas_limit = $5,\n max_fee_per_gas = $6,\n max_priority_fee_per_gas = $7,\n gas_per_pubdata_limit = $8,\n input = $9,\n data = $10,\n tx_format = $11,\n contract_address = $12,\n value = $13,\n paymaster = $14,\n paymaster_input = $15,\n execution_info\n = JSONB_BUILD_OBJECT(\n 'gas_used',\n $16::BIGINT,\n 'storage_writes',\n $17::INT,\n 'contracts_used',\n $18::INT\n ),\n in_mempool = FALSE,\n received_at = $19,\n created_at = NOW(),\n updated_at = NOW(),\n error = NULL\n WHERE\n transactions.is_priority = FALSE\n AND transactions.miniblock_number IS NULL\n RETURNING\n (\n SELECT\n hash\n FROM\n transactions\n WHERE\n transactions.initiator_address = $2\n AND transactions.nonce = $3\n ) IS NOT NULL AS \"is_replaced!\"\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "is_replaced!", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Int8", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Numeric", + "Bytea", + "Jsonb", + "Int4", + "Bytea", + "Numeric", + "Bytea", + "Bytea", + "Int8", + "Int4", + "Int4", + "Timestamp" + ] + }, + "nullable": [ + null + ] + }, + "hash": "ca428423f278feea2942fd2c78fc5223c9d5e2e42d89bb456d24c601edc06a05" +} diff --git a/core/lib/dal/.sqlx/query-cf8cff1e6d277088519ef7dfbdb1885d320c146cd8fad77c107ef12fa38e6c98.json b/core/lib/dal/.sqlx/query-cf8cff1e6d277088519ef7dfbdb1885d320c146cd8fad77c107ef12fa38e6c98.json deleted file mode 100644 index 1bd791a3f82..00000000000 --- a/core/lib/dal/.sqlx/query-cf8cff1e6d277088519ef7dfbdb1885d320c146cd8fad77c107ef12fa38e6c98.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n nonce,\n signature,\n gas_limit,\n max_fee_per_gas,\n max_priority_fee_per_gas,\n gas_per_pubdata_limit,\n input,\n data,\n tx_format,\n contract_address,\n value,\n paymaster,\n paymaster_input,\n execution_info,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n FALSE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n JSONB_BUILD_OBJECT('gas_used', $16::BIGINT, 'storage_writes', $17::INT, 'contracts_used', $18::INT),\n $19,\n NOW(),\n NOW()\n )\n ON CONFLICT (initiator_address, nonce) DO\n UPDATE\n SET\n hash = $1,\n signature = $4,\n gas_limit = $5,\n max_fee_per_gas = $6,\n max_priority_fee_per_gas = $7,\n gas_per_pubdata_limit = $8,\n input = $9,\n data = $10,\n tx_format = $11,\n contract_address = $12,\n value = $13,\n paymaster = $14,\n paymaster_input = $15,\n execution_info = JSONB_BUILD_OBJECT('gas_used', $16::BIGINT, 'storage_writes', $17::INT, 'contracts_used', $18::INT),\n in_mempool = FALSE,\n received_at = $19,\n created_at = NOW(),\n updated_at = NOW(),\n error = NULL\n WHERE\n transactions.is_priority = FALSE\n AND transactions.miniblock_number IS NULL\n RETURNING\n (\n SELECT\n hash\n FROM\n transactions\n WHERE\n transactions.initiator_address = $2\n AND transactions.nonce = $3\n ) IS NOT NULL AS \"is_replaced!\"\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "is_replaced!", - "type_info": "Bool" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int8", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Numeric", - "Bytea", - "Jsonb", - "Int4", - "Bytea", - "Numeric", - "Bytea", - "Bytea", - "Int8", - "Int4", - "Int4", - "Timestamp" - ] - }, - "nullable": [ - null - ] - }, - "hash": "cf8cff1e6d277088519ef7dfbdb1885d320c146cd8fad77c107ef12fa38e6c98" -} diff --git a/core/lib/dal/.sqlx/query-0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261.json b/core/lib/dal/.sqlx/query-cff500ffe0b6586dd96eb2d3620dd6542fd54ceac41dd41b995a8409c5ab046c.json similarity index 55% rename from core/lib/dal/.sqlx/query-0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261.json rename to core/lib/dal/.sqlx/query-cff500ffe0b6586dd96eb2d3620dd6542fd54ceac41dd41b995a8409c5ab046c.json index a98cbb18034..ec8bfeafc78 100644 --- a/core/lib/dal/.sqlx/query-0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261.json +++ b/core/lib/dal/.sqlx/query-cff500ffe0b6586dd96eb2d3620dd6542fd54ceac41dd41b995a8409c5ab046c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_logs.hashed_key AS \"hashed_key!\",\n storage_logs.value AS \"value!\",\n storage_logs.miniblock_number AS \"miniblock_number!\",\n initial_writes.l1_batch_number AS \"l1_batch_number!\",\n initial_writes.index\n FROM\n (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number <= $1\n AND hashed_key >= $3\n AND hashed_key <= $4\n GROUP BY\n hashed_key\n ORDER BY\n hashed_key\n ) AS keys\n INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n AND storage_logs.miniblock_number = keys.op[1]\n AND storage_logs.operation_number = keys.op[2]\n INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n WHERE\n initial_writes.l1_batch_number <= $2\n ", + "query": "\n SELECT\n storage_logs.hashed_key AS \"hashed_key!\",\n storage_logs.value AS \"value!\",\n storage_logs.miniblock_number AS \"miniblock_number!\",\n initial_writes.l1_batch_number AS \"l1_batch_number!\",\n initial_writes.index\n FROM\n (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT []) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number <= $1\n AND hashed_key >= $3\n AND hashed_key <= $4\n GROUP BY\n hashed_key\n ORDER BY\n hashed_key\n ) AS keys\n INNER JOIN storage_logs\n ON\n keys.hashed_key = storage_logs.hashed_key\n AND storage_logs.miniblock_number = keys.op[1]\n AND storage_logs.operation_number = keys.op[2]\n INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n WHERE\n initial_writes.l1_batch_number <= $2\n ", "describe": { "columns": [ { @@ -45,5 +45,5 @@ false ] }, - "hash": "0385576f1fb3836fc04a6cde3e92c03e1de8292eb0ea1e026ba1b32a3745c261" + "hash": "cff500ffe0b6586dd96eb2d3620dd6542fd54ceac41dd41b995a8409c5ab046c" } diff --git a/core/lib/dal/.sqlx/query-d0db89030ed6979ecba6786aef1ac9ff144a09e68db07e4ca1c1de105765711f.json b/core/lib/dal/.sqlx/query-d0db89030ed6979ecba6786aef1ac9ff144a09e68db07e4ca1c1de105765711f.json new file mode 100644 index 00000000000..a4f55f53f8c --- /dev/null +++ b/core/lib/dal/.sqlx/query-d0db89030ed6979ecba6786aef1ac9ff144a09e68db07e4ca1c1de105765711f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n compiler_versions (version, compiler, created_at, updated_at)\n SELECT\n u.version,\n $2,\n NOW(),\n NOW()\n FROM\n UNNEST($1::TEXT []) AS u (version)\n ON CONFLICT (version, compiler) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "TextArray", + "Text" + ] + }, + "nullable": [] + }, + "hash": "d0db89030ed6979ecba6786aef1ac9ff144a09e68db07e4ca1c1de105765711f" +} diff --git a/core/lib/dal/.sqlx/query-d1490262c7a2a583928a611ae69eb0539849f7fd590712103db7d45d119caca2.json b/core/lib/dal/.sqlx/query-d1490262c7a2a583928a611ae69eb0539849f7fd590712103db7d45d119caca2.json new file mode 100644 index 00000000000..e3b05aff72a --- /dev/null +++ b/core/lib/dal/.sqlx/query-d1490262c7a2a583928a611ae69eb0539849f7fd590712103db7d45d119caca2.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n call_traces (tx_hash, call_trace)\n SELECT\n u.tx_hash,\n u.call_trace\n FROM\n UNNEST($1::bytea [], $2::bytea []) AS u (tx_hash, call_trace)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "ByteaArray", + "ByteaArray" + ] + }, + "nullable": [] + }, + "hash": "d1490262c7a2a583928a611ae69eb0539849f7fd590712103db7d45d119caca2" +} diff --git a/core/lib/dal/.sqlx/query-d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3.json b/core/lib/dal/.sqlx/query-d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3.json deleted file mode 100644 index 2b5eeec2e63..00000000000 --- a/core/lib/dal/.sqlx/query-d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n vm_runner_protective_reads (l1_batch_number, created_at, updated_at, processing_started_at)\n VALUES\n ($1, NOW(), NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW(),\n processing_started_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "d3abe74360732659a1a35a176679411ba30ac67080552279d821d66b1b804db3" -} diff --git a/core/lib/dal/.sqlx/query-ba2343a38e37d104786f9276d91f67d2ef1428c61ae84003c9b52b03204d1f0a.json b/core/lib/dal/.sqlx/query-d43ae07cf1539826363573d121392687cfe890fe89c748f836bad6eb0cc0fcf5.json similarity index 70% rename from core/lib/dal/.sqlx/query-ba2343a38e37d104786f9276d91f67d2ef1428c61ae84003c9b52b03204d1f0a.json rename to core/lib/dal/.sqlx/query-d43ae07cf1539826363573d121392687cfe890fe89c748f836bad6eb0cc0fcf5.json index ff608265118..650b9f8d439 100644 --- a/core/lib/dal/.sqlx/query-ba2343a38e37d104786f9276d91f67d2ef1428c61ae84003c9b52b03204d1f0a.json +++ b/core/lib/dal/.sqlx/query-d43ae07cf1539826363573d121392687cfe890fe89c748f836bad6eb0cc0fcf5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n DELETE FROM transactions\n WHERE\n in_mempool = TRUE\n AND initiator_address = ANY ($1)\n ", + "query": "\n DELETE FROM transactions\n WHERE\n in_mempool = TRUE\n AND initiator_address = ANY($1)\n ", "describe": { "columns": [], "parameters": { @@ -10,5 +10,5 @@ }, "nullable": [] }, - "hash": "ba2343a38e37d104786f9276d91f67d2ef1428c61ae84003c9b52b03204d1f0a" + "hash": "d43ae07cf1539826363573d121392687cfe890fe89c748f836bad6eb0cc0fcf5" } diff --git a/core/lib/dal/.sqlx/query-d47574939bdfb157a621f86a3ea3a61bf579a0dbeb01c545fa61b10a0047297b.json b/core/lib/dal/.sqlx/query-d47574939bdfb157a621f86a3ea3a61bf579a0dbeb01c545fa61b10a0047297b.json new file mode 100644 index 00000000000..0915ef2b195 --- /dev/null +++ b/core/lib/dal/.sqlx/query-d47574939bdfb157a621f86a3ea3a61bf579a0dbeb01c545fa61b10a0047297b.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n pruning_log (\n pruned_l1_batch,\n pruned_miniblock,\n type,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, $3, NOW(), NOW())\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + { + "Custom": { + "name": "prune_type", + "kind": { + "Enum": [ + "Soft", + "Hard" + ] + } + } + } + ] + }, + "nullable": [] + }, + "hash": "d47574939bdfb157a621f86a3ea3a61bf579a0dbeb01c545fa61b10a0047297b" +} diff --git a/core/lib/dal/.sqlx/query-0fef49a649d20c9fd263c1dfa40daa9b94d398c635c37746736e98f1f18fcca7.json b/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json similarity index 89% rename from core/lib/dal/.sqlx/query-0fef49a649d20c9fd263c1dfa40daa9b94d398c635c37746736e98f1f18fcca7.json rename to core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json index adfd1919db5..fa1a5d6741a 100644 --- a/core/lib/dal/.sqlx/query-0fef49a649d20c9fd263c1dfa40daa9b94d398c635c37746736e98f1f18fcca7.json +++ b/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n status = $2\n OR (\n status = $1\n AND processing_started_at < NOW() - $4::INTERVAL\n )\n OR (\n status = $3\n AND attempts < $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_verifier_input_producer_jobs.l1_batch_number\n ", + "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n status = $2\n OR (\n status = $1\n AND processing_started_at < NOW() - $4::INTERVAL\n )\n OR (\n status = $3\n AND attempts < $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_verifier_input_producer_jobs.l1_batch_number\n ", "describe": { "columns": [ { @@ -61,5 +61,5 @@ false ] }, - "hash": "0fef49a649d20c9fd263c1dfa40daa9b94d398c635c37746736e98f1f18fcca7" + "hash": "d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860" } diff --git a/core/lib/dal/.sqlx/query-ddfb7ba0592f1e6714a4496cee18e7265828e3ae78e391ef235b700f0adfe5e4.json b/core/lib/dal/.sqlx/query-ddfb7ba0592f1e6714a4496cee18e7265828e3ae78e391ef235b700f0adfe5e4.json new file mode 100644 index 00000000000..472c0cb2695 --- /dev/null +++ b/core/lib/dal/.sqlx/query-ddfb7ba0592f1e6714a4496cee18e7265828e3ae78e391ef235b700f0adfe5e4.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE proof_generation_details\n SET\n status = 'picked_by_prover',\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_generation_details\n LEFT JOIN l1_batches ON l1_batch_number = l1_batches.number\n WHERE\n (\n vm_run_data_blob_url IS NOT NULL\n AND proof_gen_data_blob_url IS NOT NULL\n AND l1_batches.hash IS NOT NULL\n AND l1_batches.aux_data_hash IS NOT NULL\n AND l1_batches.meta_parameters_hash IS NOT NULL\n AND status = 'unpicked'\n )\n OR (\n status = 'picked_by_prover'\n AND prover_taken_at < NOW() - $1::INTERVAL\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n )\n RETURNING\n proof_generation_details.l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Interval" + ] + }, + "nullable": [ + false + ] + }, + "hash": "ddfb7ba0592f1e6714a4496cee18e7265828e3ae78e391ef235b700f0adfe5e4" +} diff --git a/core/lib/dal/.sqlx/query-5f7034d22251a893249208c5ff8fa5c8bf46bc0cea4ac2b25ecde236c30ae32d.json b/core/lib/dal/.sqlx/query-de784f549ceda62e37459519b52e7be93b12319c041751bc4138fa64258ecd5c.json similarity index 87% rename from core/lib/dal/.sqlx/query-5f7034d22251a893249208c5ff8fa5c8bf46bc0cea4ac2b25ecde236c30ae32d.json rename to core/lib/dal/.sqlx/query-de784f549ceda62e37459519b52e7be93b12319c041751bc4138fa64258ecd5c.json index 0ae6e3bff4b..9cc6312f70e 100644 --- a/core/lib/dal/.sqlx/query-5f7034d22251a893249208c5ff8fa5c8bf46bc0cea4ac2b25ecde236c30ae32d.json +++ b/core/lib/dal/.sqlx/query-de784f549ceda62e37459519b52e7be93b12319c041751bc4138fa64258ecd5c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_address,\n l2_address,\n NAME,\n symbol,\n decimals\n FROM\n tokens\n WHERE\n well_known = TRUE\n ORDER BY\n symbol\n ", + "query": "\n SELECT\n l1_address,\n l2_address,\n name,\n symbol,\n decimals\n FROM\n tokens\n WHERE\n well_known = TRUE\n ORDER BY\n symbol\n ", "describe": { "columns": [ { @@ -40,5 +40,5 @@ false ] }, - "hash": "5f7034d22251a893249208c5ff8fa5c8bf46bc0cea4ac2b25ecde236c30ae32d" + "hash": "de784f549ceda62e37459519b52e7be93b12319c041751bc4138fa64258ecd5c" } diff --git a/core/lib/dal/.sqlx/query-e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb.json b/core/lib/dal/.sqlx/query-e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb.json deleted file mode 100644 index 576484cd420..00000000000 --- a/core/lib/dal/.sqlx/query-e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "e7d0b7c132b80195dae7cbf50355eb148aa6d1dbd69bf3fe48522101a6ea0bcb" -} diff --git a/core/lib/dal/.sqlx/query-ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64.json b/core/lib/dal/.sqlx/query-ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64.json deleted file mode 100644 index 02cd6733e81..00000000000 --- a/core/lib/dal/.sqlx/query-ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n new_logs AS MATERIALIZED (\n SELECT DISTINCT\n ON (hashed_key) hashed_key,\n miniblock_number,\n operation_number\n FROM\n storage_logs\n WHERE\n miniblock_number BETWEEN $1 AND $2\n ORDER BY\n hashed_key,\n miniblock_number DESC,\n operation_number DESC\n )\n DELETE FROM storage_logs USING new_logs\n WHERE\n storage_logs.hashed_key = new_logs.hashed_key\n AND storage_logs.miniblock_number <= $2\n AND (storage_logs.miniblock_number, storage_logs.operation_number) < (new_logs.miniblock_number, new_logs.operation_number)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "ead71ae66fe4685132c03a973612fe98364aa684180dd6fbf540bb0b68d96a64" -} diff --git a/core/lib/dal/.sqlx/query-0f1856e55a370280a078d048f09e2d457914c737660b37e9f66b576bbc9a7904.json b/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json similarity index 91% rename from core/lib/dal/.sqlx/query-0f1856e55a370280a078d048f09e2d457914c737660b37e9f66b576bbc9a7904.json rename to core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json index 498e839a63d..2419082dcc2 100644 --- a/core/lib/dal/.sqlx/query-0f1856e55a370280a078d048f09e2d457914c737660b37e9f66b576bbc9a7904.json +++ b/core/lib/dal/.sqlx/query-eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n transactions.*\n FROM\n transactions\n INNER JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ORDER BY\n miniblock_number,\n index_in_block\n ", + "query": "\n SELECT\n transactions.*\n FROM\n transactions\n INNER JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ORDER BY\n miniblock_number,\n index_in_block\n ", "describe": { "columns": [ { @@ -229,5 +229,5 @@ true ] }, - "hash": "0f1856e55a370280a078d048f09e2d457914c737660b37e9f66b576bbc9a7904" + "hash": "eb27e1b82b8ecbb9711c417888564a8e245ecee4866264d38146938595b07f37" } diff --git a/core/lib/dal/.sqlx/query-cea9fe027a6a0ada827f23b48ac32432295b2f7ee40bf13522a6edbd236f1970.json b/core/lib/dal/.sqlx/query-ec2942fecedb7ee47279bf0418eb020b1036f002312271eec76ec3d40f77eb71.json similarity index 86% rename from core/lib/dal/.sqlx/query-cea9fe027a6a0ada827f23b48ac32432295b2f7ee40bf13522a6edbd236f1970.json rename to core/lib/dal/.sqlx/query-ec2942fecedb7ee47279bf0418eb020b1036f002312271eec76ec3d40f77eb71.json index b1eae968a89..be93834b26c 100644 --- a/core/lib/dal/.sqlx/query-cea9fe027a6a0ada827f23b48ac32432295b2f7ee40bf13522a6edbd236f1970.json +++ b/core/lib/dal/.sqlx/query-ec2942fecedb7ee47279bf0418eb020b1036f002312271eec76ec3d40f77eb71.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n u.hashed_key AS \"hashed_key!\",\n (\n SELECT\n value\n FROM\n storage_logs\n WHERE\n hashed_key = u.hashed_key\n AND miniblock_number <= $2\n ORDER BY\n miniblock_number DESC,\n operation_number DESC\n LIMIT\n 1\n ) AS \"value?\"\n FROM\n UNNEST($1::bytea[]) AS u (hashed_key)\n ", + "query": "\n SELECT\n u.hashed_key AS \"hashed_key!\",\n (\n SELECT\n value\n FROM\n storage_logs\n WHERE\n hashed_key = u.hashed_key\n AND miniblock_number <= $2\n ORDER BY\n miniblock_number DESC,\n operation_number DESC\n LIMIT\n 1\n ) AS \"value?\"\n FROM\n UNNEST($1::bytea []) AS u (hashed_key)\n ", "describe": { "columns": [ { @@ -25,5 +25,5 @@ null ] }, - "hash": "cea9fe027a6a0ada827f23b48ac32432295b2f7ee40bf13522a6edbd236f1970" + "hash": "ec2942fecedb7ee47279bf0418eb020b1036f002312271eec76ec3d40f77eb71" } diff --git a/core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json b/core/lib/dal/.sqlx/query-ee50258050e7a08be67335cddf258ed14ea4ed363db1501e7268909bc18f2043.json similarity index 78% rename from core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json rename to core/lib/dal/.sqlx/query-ee50258050e7a08be67335cddf258ed14ea4ed363db1501e7268909bc18f2043.json index 4ea4aea2ea6..733a7d36eab 100644 --- a/core/lib/dal/.sqlx/query-4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd.json +++ b/core/lib/dal/.sqlx/query-ee50258050e7a08be67335cddf258ed14ea4ed363db1501e7268909bc18f2043.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.hash AS \"block_hash\",\n miniblocks.number AS \"block_number\",\n prev_miniblock.hash AS \"parent_hash?\",\n miniblocks.timestamp AS \"block_timestamp\",\n miniblocks.base_fee_per_gas AS \"base_fee_per_gas\",\n miniblocks.gas_limit AS \"block_gas_limit?\",\n miniblocks.logs_bloom AS \"block_logs_bloom?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"transaction_refunded_gas?\"\n FROM\n miniblocks\n LEFT JOIN miniblocks prev_miniblock ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number > $1\n ORDER BY\n miniblocks.number ASC,\n transactions.index_in_block ASC\n ", + "query": "\n SELECT\n miniblocks.hash AS \"block_hash\",\n miniblocks.number AS \"block_number\",\n prev_miniblock.hash AS \"parent_hash?\",\n miniblocks.timestamp AS \"block_timestamp\",\n miniblocks.base_fee_per_gas AS \"base_fee_per_gas\",\n miniblocks.gas_limit AS \"block_gas_limit?\",\n miniblocks.logs_bloom AS \"block_logs_bloom?\",\n transactions.gas_limit AS \"transaction_gas_limit?\",\n transactions.refunded_gas AS \"transaction_refunded_gas?\"\n FROM\n miniblocks\n LEFT JOIN\n miniblocks prev_miniblock\n ON prev_miniblock.number = miniblocks.number - 1\n LEFT JOIN transactions ON transactions.miniblock_number = miniblocks.number\n WHERE\n miniblocks.number > $1\n ORDER BY\n miniblocks.number ASC,\n transactions.index_in_block ASC\n ", "describe": { "columns": [ { @@ -66,5 +66,5 @@ false ] }, - "hash": "4cfdfb32d808e33779ea4566e9cf9bb44a0952d475c3e6f207443b72ebddb0cd" + "hash": "ee50258050e7a08be67335cddf258ed14ea4ed363db1501e7268909bc18f2043" } diff --git a/core/lib/dal/.sqlx/query-f012d0922265269746396dac8f25ff66f2c3b2b83d45360818a8782e56aa3d66.json b/core/lib/dal/.sqlx/query-f012d0922265269746396dac8f25ff66f2c3b2b83d45360818a8782e56aa3d66.json deleted file mode 100644 index 9815b5d3895..00000000000 --- a/core/lib/dal/.sqlx/query-f012d0922265269746396dac8f25ff66f2c3b2b83d45360818a8782e56aa3d66.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n sl AS (\n SELECT\n (\n SELECT\n ARRAY[hashed_key, value] AS kv\n FROM\n storage_logs\n WHERE\n storage_logs.miniblock_number = $1\n AND storage_logs.hashed_key >= u.start_key\n AND storage_logs.hashed_key <= u.end_key\n ORDER BY\n storage_logs.hashed_key\n LIMIT\n 1\n )\n FROM\n UNNEST($2::bytea[], $3::bytea[]) AS u (start_key, end_key)\n )\n SELECT\n sl.kv[1] AS \"hashed_key?\",\n sl.kv[2] AS \"value?\",\n initial_writes.index\n FROM\n sl\n LEFT OUTER JOIN initial_writes ON initial_writes.hashed_key = sl.kv[1]\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "hashed_key?", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "value?", - "type_info": "Bytea" - }, - { - "ordinal": 2, - "name": "index", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "ByteaArray", - "ByteaArray" - ] - }, - "nullable": [ - null, - null, - true - ] - }, - "hash": "f012d0922265269746396dac8f25ff66f2c3b2b83d45360818a8782e56aa3d66" -} diff --git a/core/lib/dal/.sqlx/query-f3a049c7eb0d8903737f02fa154b50ebc862a133734382bad8fdfa3a93d8b743.json b/core/lib/dal/.sqlx/query-f3a049c7eb0d8903737f02fa154b50ebc862a133734382bad8fdfa3a93d8b743.json deleted file mode 100644 index 6869b4f50b1..00000000000 --- a/core/lib/dal/.sqlx/query-f3a049c7eb0d8903737f02fa154b50ebc862a133734382bad8fdfa3a93d8b743.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n upgrade_id,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n TRUE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n NOW(),\n NOW()\n )\n ON CONFLICT (hash) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Numeric", - "Numeric", - "Numeric", - "Jsonb", - "Int4", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Bytea", - "Int4", - "Numeric", - "Bytea", - "Timestamp" - ] - }, - "nullable": [] - }, - "hash": "f3a049c7eb0d8903737f02fa154b50ebc862a133734382bad8fdfa3a93d8b743" -} diff --git a/core/lib/dal/.sqlx/query-f90a87a0c8a3ad95d722fbcf1b05371292d0bfbb23eaa2db163ef6fd1e1374d9.json b/core/lib/dal/.sqlx/query-f90a87a0c8a3ad95d722fbcf1b05371292d0bfbb23eaa2db163ef6fd1e1374d9.json new file mode 100644 index 00000000000..5235d03dc3a --- /dev/null +++ b/core/lib/dal/.sqlx/query-f90a87a0c8a3ad95d722fbcf1b05371292d0bfbb23eaa2db163ef6fd1e1374d9.json @@ -0,0 +1,31 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n contract_verification_requests (\n contract_address,\n source_code,\n contract_name,\n zk_compiler_version,\n compiler_version,\n optimization_used,\n optimizer_mode,\n constructor_arguments,\n is_system,\n force_evmla,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, 'queued', NOW(), NOW())\n RETURNING\n id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Text", + "Text", + "Text", + "Text", + "Bool", + "Text", + "Bytea", + "Bool", + "Bool" + ] + }, + "nullable": [ + false + ] + }, + "hash": "f90a87a0c8a3ad95d722fbcf1b05371292d0bfbb23eaa2db163ef6fd1e1374d9" +} diff --git a/core/lib/dal/.sqlx/query-fce6fff384875df689dc422153a2745b90c68ebec5706b106ef69cb61333466e.json b/core/lib/dal/.sqlx/query-fce6fff384875df689dc422153a2745b90c68ebec5706b106ef69cb61333466e.json new file mode 100644 index 00000000000..8c4129fec07 --- /dev/null +++ b/core/lib/dal/.sqlx/query-fce6fff384875df689dc422153a2745b90c68ebec5706b106ef69cb61333466e.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n upgrade_id,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n received_at,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n TRUE,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n NOW(),\n NOW()\n )\n ON CONFLICT (hash) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Numeric", + "Numeric", + "Numeric", + "Jsonb", + "Int4", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Bytea", + "Int4", + "Numeric", + "Bytea", + "Timestamp" + ] + }, + "nullable": [] + }, + "hash": "fce6fff384875df689dc422153a2745b90c68ebec5706b106ef69cb61333466e" +} diff --git a/core/lib/dal/.sqlx/query-0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521.json b/core/lib/dal/.sqlx/query-fd4b42f90ffe24f76a35a005460cc3cec1dbc3e9af406bb900c3ec52590978bc.json similarity index 73% rename from core/lib/dal/.sqlx/query-0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521.json rename to core/lib/dal/.sqlx/query-fd4b42f90ffe24f76a35a005460cc3cec1dbc3e9af406bb900c3ec52590978bc.json index c1400f955b9..271924b8b6c 100644 --- a/core/lib/dal/.sqlx/query-0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521.json +++ b/core/lib/dal/.sqlx/query-fd4b42f90ffe24f76a35a005460cc3cec1dbc3e9af406bb900c3ec52590978bc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n PG_LAST_WAL_RECEIVE_LSN() = PG_LAST_WAL_REPLAY_LSN() AS synced,\n EXTRACT(\n seconds\n FROM\n NOW() - PG_LAST_XACT_REPLAY_TIMESTAMP()\n )::INT AS LAG\n ", + "query": "\n SELECT\n PG_LAST_WAL_RECEIVE_LSN() = PG_LAST_WAL_REPLAY_LSN() AS synced,\n EXTRACT(\n seconds\n FROM\n NOW() - PG_LAST_XACT_REPLAY_TIMESTAMP()\n )::INT AS lag\n ", "describe": { "columns": [ { @@ -22,5 +22,5 @@ null ] }, - "hash": "0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521" + "hash": "fd4b42f90ffe24f76a35a005460cc3cec1dbc3e9af406bb900c3ec52590978bc" } diff --git a/core/lib/dal/.sqlx/query-99d9ee2a0d0450acefa0d9b6c031e30606fddf6631c859ab03819ec476bcf005.json b/core/lib/dal/.sqlx/query-fd8aaef58e1b9bf4f389fb2943adea2b254fed4b5437ee2968707dffe6e6869d.json similarity index 75% rename from core/lib/dal/.sqlx/query-99d9ee2a0d0450acefa0d9b6c031e30606fddf6631c859ab03819ec476bcf005.json rename to core/lib/dal/.sqlx/query-fd8aaef58e1b9bf4f389fb2943adea2b254fed4b5437ee2968707dffe6e6869d.json index ab00c7b26ce..198ab2d802b 100644 --- a/core/lib/dal/.sqlx/query-99d9ee2a0d0450acefa0d9b6c031e30606fddf6631c859ab03819ec476bcf005.json +++ b/core/lib/dal/.sqlx/query-fd8aaef58e1b9bf4f389fb2943adea2b254fed4b5437ee2968707dffe6e6869d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n hashed_key\n FROM\n initial_writes\n WHERE\n hashed_key = ANY ($1)\n ", + "query": "\n SELECT\n hashed_key\n FROM\n initial_writes\n WHERE\n hashed_key = ANY($1)\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "99d9ee2a0d0450acefa0d9b6c031e30606fddf6631c859ab03819ec476bcf005" + "hash": "fd8aaef58e1b9bf4f389fb2943adea2b254fed4b5437ee2968707dffe6e6869d" } diff --git a/core/lib/dal/.sqlx/query-9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325.json b/core/lib/dal/.sqlx/query-febd2b039a686631e3c943882864f6cb289fdec08f4db49ee40ba67371df1927.json similarity index 59% rename from core/lib/dal/.sqlx/query-9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325.json rename to core/lib/dal/.sqlx/query-febd2b039a686631e3c943882864f6cb289fdec08f4db49ee40ba67371df1927.json index 2e1bf7c3e61..ff702280a63 100644 --- a/core/lib/dal/.sqlx/query-9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325.json +++ b/core/lib/dal/.sqlx/query-febd2b039a686631e3c943882864f6cb289fdec08f4db49ee40ba67371df1927.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_logs.address AS \"address!\",\n storage_logs.key AS \"key!\",\n storage_logs.value AS \"value!\",\n storage_logs.miniblock_number AS \"miniblock_number!\",\n initial_writes.l1_batch_number AS \"l1_batch_number!\",\n initial_writes.index\n FROM\n (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT[]) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number <= $1\n AND hashed_key >= $3\n AND hashed_key <= $4\n GROUP BY\n hashed_key\n ORDER BY\n hashed_key\n ) AS keys\n INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n AND storage_logs.miniblock_number = keys.op[1]\n AND storage_logs.operation_number = keys.op[2]\n INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n WHERE\n initial_writes.l1_batch_number <= $2\n ", + "query": "\n SELECT\n storage_logs.address AS \"address!\",\n storage_logs.key AS \"key!\",\n storage_logs.value AS \"value!\",\n storage_logs.miniblock_number AS \"miniblock_number!\",\n initial_writes.l1_batch_number AS \"l1_batch_number!\",\n initial_writes.index\n FROM\n (\n SELECT\n hashed_key,\n MAX(ARRAY[miniblock_number, operation_number]::INT []) AS op\n FROM\n storage_logs\n WHERE\n miniblock_number <= $1\n AND hashed_key >= $3\n AND hashed_key <= $4\n GROUP BY\n hashed_key\n ORDER BY\n hashed_key\n ) AS keys\n INNER JOIN storage_logs\n ON\n keys.hashed_key = storage_logs.hashed_key\n AND storage_logs.miniblock_number = keys.op[1]\n AND storage_logs.operation_number = keys.op[2]\n INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n WHERE\n initial_writes.l1_batch_number <= $2\n ", "describe": { "columns": [ { @@ -51,5 +51,5 @@ false ] }, - "hash": "9f29aa31d4698031e9f3fe2eb273724dcce382936af0d4c386143399995cd325" + "hash": "febd2b039a686631e3c943882864f6cb289fdec08f4db49ee40ba67371df1927" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d.json b/prover/crates/lib/prover_dal/.sqlx/query-095c459e05aa4c7fad0d1fa74b2d52314178f8ab2aea7efb69ae18fbf40e8994.json similarity index 80% rename from prover/crates/lib/prover_dal/.sqlx/query-41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d.json rename to prover/crates/lib/prover_dal/.sqlx/query-095c459e05aa4c7fad0d1fa74b2d52314178f8ab2aea7efb69ae18fbf40e8994.json index b65633a904e..435f6cd5a3b 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-095c459e05aa4c7fad0d1fa74b2d52314178f8ab2aea7efb69ae18fbf40e8994.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n recursion_tip_witness_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n recursion_tip_witness_jobs_fri.l1_batch_number,\n recursion_tip_witness_jobs_fri.number_of_final_node_jobs\n ", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n recursion_tip_witness_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n recursion_tip_witness_jobs_fri.l1_batch_number,\n recursion_tip_witness_jobs_fri.number_of_final_node_jobs\n ", "describe": { "columns": [ { @@ -26,5 +26,5 @@ false ] }, - "hash": "41af30620f8a1f20b8a6c46be162601d35fd2881ac1fd070f0f1a8add4bc388d" + "hash": "095c459e05aa4c7fad0d1fa74b2d52314178f8ab2aea7efb69ae18fbf40e8994" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0.json b/prover/crates/lib/prover_dal/.sqlx/query-0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0.json deleted file mode 100644 index b05230e1be4..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n closed_form_inputs_blob_url,\n number_of_basic_circuits,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, $5, 'waiting_for_proofs', NOW(), NOW(), $6)\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET\n updated_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text", - "Int4", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "0a1ec4690d6b4a67d6ad16badcbf113a19feb73c4cf9876855523499998b99c0" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-1080b95755b3047747a9fabc7c7c43ffc5f519d35c90c60f89579f0689906df5.json b/prover/crates/lib/prover_dal/.sqlx/query-1080b95755b3047747a9fabc7c7c43ffc5f519d35c90c60f89579f0689906df5.json new file mode 100644 index 00000000000..4b1cb2fe98a --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-1080b95755b3047747a9fabc7c7c43ffc5f519d35c90c60f89579f0689906df5.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_compression_jobs_fri (\n l1_batch_number,\n fri_proof_blob_url,\n status,\n created_at,\n updated_at,\n protocol_version,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, NOW(), NOW(), $4, $5)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Text", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "1080b95755b3047747a9fabc7c7c43ffc5f519d35c90c60f89579f0689906df5" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96.json b/prover/crates/lib/prover_dal/.sqlx/query-128b9be7a480a1ef132e5d477a43e4b3d55a0fee71105df880ff4f599844d06f.json similarity index 81% rename from prover/crates/lib/prover_dal/.sqlx/query-eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96.json rename to prover/crates/lib/prover_dal/.sqlx/query-128b9be7a480a1ef132e5d477a43e4b3d55a0fee71105df880ff4f599844d06f.json index 27680c0bb46..e4e02b7e0df 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-128b9be7a480a1ef132e5d477a43e4b3d55a0fee71105df880ff4f599844d06f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "eb2a85cb60c680a71203769db7baf89bbd72934e1405e320e746158e6d395d96" + "hash": "128b9be7a480a1ef132e5d477a43e4b3d55a0fee71105df880ff4f599844d06f" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584.json b/prover/crates/lib/prover_dal/.sqlx/query-29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584.json deleted file mode 100644 index 85e66ed7824..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_compression_jobs_fri (\n l1_batch_number,\n fri_proof_blob_url,\n status,\n created_at,\n updated_at,\n protocol_version,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, NOW(), NOW(), $4, $5)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Text", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "29ff260b02f7b955f9fe0b657b87def3a97275b66ad33d214054dc9048ddf584" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def.json b/prover/crates/lib/prover_dal/.sqlx/query-2d11a834e177596113b5ffd634067fadffc1d3091e8c050835d4f178a328fcc8.json similarity index 82% rename from prover/crates/lib/prover_dal/.sqlx/query-d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def.json rename to prover/crates/lib/prover_dal/.sqlx/query-2d11a834e177596113b5ffd634067fadffc1d3091e8c050835d4f178a328fcc8.json index 3943480b896..c997a53e4d9 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-2d11a834e177596113b5ffd634067fadffc1d3091e8c050835d4f178a328fcc8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n id IN (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n (\n status IN ('in_progress', 'in_gpu_proof')\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n id IN (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n (\n status IN ('in_progress', 'in_gpu_proof')\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -49,5 +49,5 @@ true ] }, - "hash": "d0be28042b50199075cb0eca26f6b93bfd5d96fdc68732fe38c79ccd44b84def" + "hash": "2d11a834e177596113b5ffd634067fadffc1d3091e8c050835d4f178a328fcc8" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2f194183f0140eee0dd801b2087c0a4dbcc7b955132056b5ff10f00cf5844874.json b/prover/crates/lib/prover_dal/.sqlx/query-2f194183f0140eee0dd801b2087c0a4dbcc7b955132056b5ff10f00cf5844874.json new file mode 100644 index 00000000000..e60e0f543c4 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-2f194183f0140eee0dd801b2087c0a4dbcc7b955132056b5ff10f00cf5844874.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n gpu_prover_queue_fri (\n instance_host,\n instance_port,\n instance_status,\n specialized_prover_group_id,\n zone,\n created_at,\n updated_at,\n protocol_version,\n protocol_version_patch\n )\n VALUES\n (CAST($1::TEXT AS INET), $2, 'available', $3, $4, NOW(), NOW(), $5, $6)\n ON CONFLICT (instance_host, instance_port, zone) DO\n UPDATE\n SET\n instance_status = 'available',\n specialized_prover_group_id = $3,\n zone = $4,\n updated_at = NOW(),\n protocol_version = $5,\n protocol_version_patch = $6\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int4", + "Int2", + "Text", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "2f194183f0140eee0dd801b2087c0a4dbcc7b955132056b5ff10f00cf5844874" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8.json b/prover/crates/lib/prover_dal/.sqlx/query-30128c0642524295ab05c37c60db2fd05e27e9df6d760173bba37446988d1b8e.json similarity index 84% rename from prover/crates/lib/prover_dal/.sqlx/query-102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8.json rename to prover/crates/lib/prover_dal/.sqlx/query-30128c0642524295ab05c37c60db2fd05e27e9df6d760173bba37446988d1b8e.json index f912d06de81..10ca762aee2 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-30128c0642524295ab05c37c60db2fd05e27e9df6d760173bba37446988d1b8e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = 'queued',\n error = 'Manually requeued',\n attempts = 2,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = 'queued',\n error = 'Manually requeued',\n attempts = 2,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -37,5 +37,5 @@ true ] }, - "hash": "102b79726652d9150c802350bdca80c233a9fd3e892b5a867a5517c2e04497a8" + "hash": "30128c0642524295ab05c37c60db2fd05e27e9df6d760173bba37446988d1b8e" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e.json b/prover/crates/lib/prover_dal/.sqlx/query-332f1363f19160f9a7708635fd1691e08d28ca6796c0bbba95016d1899cf84fe.json similarity index 84% rename from prover/crates/lib/prover_dal/.sqlx/query-9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e.json rename to prover/crates/lib/prover_dal/.sqlx/query-332f1363f19160f9a7708635fd1691e08d28ca6796c0bbba95016d1899cf84fe.json index 90ea9994206..a63ebc8f0e2 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-332f1363f19160f9a7708635fd1691e08d28ca6796c0bbba95016d1899cf84fe.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'queued',\n error = 'Manually requeued',\n attempts = 2,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'queued',\n error = 'Manually requeued',\n attempts = 2,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -49,5 +49,5 @@ true ] }, - "hash": "9895b2ded08be3e81a5357decf76b4d3d6a762761e45af2a73fe96da804e627e" + "hash": "332f1363f19160f9a7708635fd1691e08d28ca6796c0bbba95016d1899cf84fe" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-340a84063719f9b837a61cbc63368ba43ba128a00a194dce65015bd9eeb1b42f.json b/prover/crates/lib/prover_dal/.sqlx/query-340a84063719f9b837a61cbc63368ba43ba128a00a194dce65015bd9eeb1b42f.json new file mode 100644 index 00000000000..a49edd4737f --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-340a84063719f9b837a61cbc63368ba43ba128a00a194dce65015bd9eeb1b42f.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, 'waiting_for_proofs', NOW(), NOW(), $4)\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "340a84063719f9b837a61cbc63368ba43ba128a00a194dce65015bd9eeb1b42f" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-37fa629a87113f13c89ce5c1a8fffd15b636d553794900596528e8fb03ca5aed.json b/prover/crates/lib/prover_dal/.sqlx/query-37fa629a87113f13c89ce5c1a8fffd15b636d553794900596528e8fb03ca5aed.json new file mode 100644 index 00000000000..3a8a78db9f0 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-37fa629a87113f13c89ce5c1a8fffd15b636d553794900596528e8fb03ca5aed.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (\n l1_batch_number,\n status,\n number_of_final_node_jobs,\n protocol_version,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, 'waiting_for_proofs', $2, $3, NOW(), NOW(), $4)\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "37fa629a87113f13c89ce5c1a8fffd15b636d553794900596528e8fb03ca5aed" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json b/prover/crates/lib/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json deleted file mode 100644 index d0dd5f6976b..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - }, - { - "ordinal": 2, - "name": "depth", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "3941da180ee62a7c5d4e392ff4fe2d3a6ebb3657862b91e3ece34119f098fc2d" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json b/prover/crates/lib/prover_dal/.sqlx/query-3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95.json similarity index 79% rename from prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json rename to prover/crates/lib/prover_dal/.sqlx/query-3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95.json index 140b8f12675..962979344b4 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n aggregation_round ASC,\n circuit_id ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n aggregation_round ASC,\n circuit_id ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", "describe": { "columns": [ { @@ -56,5 +56,5 @@ false ] }, - "hash": "7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2" + "hash": "3b3193bfac70b5fe69bf3bb7ba5a234c19578572973094b21ddbb3876da6bb95" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json b/prover/crates/lib/prover_dal/.sqlx/query-3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json deleted file mode 100644 index 7646c87b847..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n processing_started_at = NOW(),\n updated_at = NOW(),\n picked_by = $5\n WHERE\n id = (\n SELECT\n pj.id\n FROM\n (\n SELECT\n *\n FROM\n UNNEST($1::SMALLINT[], $2::SMALLINT[])\n ) AS tuple (circuit_id, ROUND)\n JOIN LATERAL (\n SELECT\n *\n FROM\n prover_jobs_fri AS pj\n WHERE\n pj.status = 'queued'\n AND pj.protocol_version = $3\n AND pj.protocol_version_patch = $4\n AND pj.circuit_id = tuple.circuit_id\n AND pj.aggregation_round = tuple.round\n ORDER BY\n pj.l1_batch_number ASC,\n pj.id ASC\n LIMIT\n 1\n ) AS pj ON TRUE\n ORDER BY\n pj.l1_batch_number ASC,\n pj.aggregation_round DESC,\n pj.id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "circuit_id", - "type_info": "Int2" - }, - { - "ordinal": 3, - "name": "aggregation_round", - "type_info": "Int2" - }, - { - "ordinal": 4, - "name": "sequence_number", - "type_info": "Int4" - }, - { - "ordinal": 5, - "name": "depth", - "type_info": "Int4" - }, - { - "ordinal": 6, - "name": "is_node_final_proof", - "type_info": "Bool" - } - ], - "parameters": { - "Left": [ - "Int2Array", - "Int2Array", - "Int4", - "Int4", - "Text" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - false - ] - }, - "hash": "3e0a1ebc684810c09ff83784bdd0ad195b0dd2a8ce56b1a9eb531103130b5e3e" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a.json b/prover/crates/lib/prover_dal/.sqlx/query-3fead9a82ea277785a9ee5c075a2ad4da8f523aa84cd047216d8e3cf69c92106.json similarity index 82% rename from prover/crates/lib/prover_dal/.sqlx/query-216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a.json rename to prover/crates/lib/prover_dal/.sqlx/query-3fead9a82ea277785a9ee5c075a2ad4da8f523aa84cd047216d8e3cf69c92106.json index ec503eabee0..6a534c361e2 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-3fead9a82ea277785a9ee5c075a2ad4da8f523aa84cd047216d8e3cf69c92106.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "216d0c263539739b53975a96a10332b826708800a2f72f09bd7aea08cf724e1a" + "hash": "3fead9a82ea277785a9ee5c075a2ad4da8f523aa84cd047216d8e3cf69c92106" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3ffc042b13c185ca6963fcb2d4d10c3b232bbaaa12747f72e3b8c5ad64702a79.json b/prover/crates/lib/prover_dal/.sqlx/query-3ffc042b13c185ca6963fcb2d4d10c3b232bbaaa12747f72e3b8c5ad64702a79.json new file mode 100644 index 00000000000..a39cda265f0 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-3ffc042b13c185ca6963fcb2d4d10c3b232bbaaa12747f72e3b8c5ad64702a79.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj\n ON\n prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 2\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 2, + "name": "depth", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "3ffc042b13c185ca6963fcb2d4d10c3b232bbaaa12747f72e3b8c5ad64702a79" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a.json b/prover/crates/lib/prover_dal/.sqlx/query-4580503b825352de7691478a5de103782558380c48caf8734cacee350e3841dd.json similarity index 82% rename from prover/crates/lib/prover_dal/.sqlx/query-bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a.json rename to prover/crates/lib/prover_dal/.sqlx/query-4580503b825352de7691478a5de103782558380c48caf8734cacee350e3841dd.json index ab1c2dd6552..ee786a1cabe 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-4580503b825352de7691478a5de103782558380c48caf8734cacee350e3841dd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "bcc5d3d35652f49b41d4ee673b171570fc88c17822bebd5b92e3b2f726d9af3a" + "hash": "4580503b825352de7691478a5de103782558380c48caf8734cacee350e3841dd" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-48b57a279bfff34d44d1f5a6501e40978966fb2ad8b342907580dd17c0a52779.json b/prover/crates/lib/prover_dal/.sqlx/query-48b57a279bfff34d44d1f5a6501e40978966fb2ad8b342907580dd17c0a52779.json new file mode 100644 index 00000000000..6e6a6b4ac1a --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-48b57a279bfff34d44d1f5a6501e40978966fb2ad8b342907580dd17c0a52779.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n witness_inputs_fri (\n l1_batch_number,\n witness_inputs_blob_url,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, 'queued', NOW(), NOW(), $4)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "48b57a279bfff34d44d1f5a6501e40978966fb2ad8b342907580dd17c0a52779" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50.json b/prover/crates/lib/prover_dal/.sqlx/query-54cc92f95c07effcb08fa0b174c742fde93ffb610bff173f04f1b80e30754ad5.json similarity index 82% rename from prover/crates/lib/prover_dal/.sqlx/query-d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50.json rename to prover/crates/lib/prover_dal/.sqlx/query-54cc92f95c07effcb08fa0b174c742fde93ffb610bff173f04f1b80e30754ad5.json index 9df8f1c849c..5a5d61aa436 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-54cc92f95c07effcb08fa0b174c742fde93ffb610bff173f04f1b80e30754ad5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -49,5 +49,5 @@ true ] }, - "hash": "d5bb897092bce2788fe02f31c9de6dde4142e09330557cc627fee2db278ace50" + "hash": "54cc92f95c07effcb08fa0b174c742fde93ffb610bff173f04f1b80e30754ad5" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9.json b/prover/crates/lib/prover_dal/.sqlx/query-6b7cf7ae3c66c46f4ecec2b0710f9f8bea0033f6727c2582f7f141330a47f440.json similarity index 82% rename from prover/crates/lib/prover_dal/.sqlx/query-2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9.json rename to prover/crates/lib/prover_dal/.sqlx/query-6b7cf7ae3c66c46f4ecec2b0710f9f8bea0033f6727c2582f7f141330a47f440.json index 14b64e8122e..88afcf436c2 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-6b7cf7ae3c66c46f4ecec2b0710f9f8bea0033f6727c2582f7f141330a47f440.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n id,\n status,\n attempts,\n circuit_id,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -49,5 +49,5 @@ true ] }, - "hash": "2b12c5d469e6220cc8ddc997c666e4aa4f797bcc6e05ec2f2e435a7e940d8cf9" + "hash": "6b7cf7ae3c66c46f4ecec2b0710f9f8bea0033f6727c2582f7f141330a47f440" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json b/prover/crates/lib/prover_dal/.sqlx/query-6c37c8a0a921408e3b812adf77d835d9a865ecbd87d38008a649e8943fdf8a43.json similarity index 92% rename from prover/crates/lib/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json rename to prover/crates/lib/prover_dal/.sqlx/query-6c37c8a0a921408e3b812adf77d835d9a865ecbd87d38008a649e8943fdf8a43.json index 2c94853eacf..d2d8d265e69 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-6c37c8a0a921408e3b812adf77d835d9a865ecbd87d38008a649e8943fdf8a43.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n leaf_aggregation_witness_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n leaf_aggregation_witness_jobs_fri.*\n ", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n leaf_aggregation_witness_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n leaf_aggregation_witness_jobs_fri.*\n ", "describe": { "columns": [ { @@ -104,5 +104,5 @@ false ] }, - "hash": "d272c91f1209c277189a31c59ee191a43dc8eafc33ee067bd41e20f25f7625f0" + "hash": "6c37c8a0a921408e3b812adf77d835d9a865ecbd87d38008a649e8943fdf8a43" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7.json b/prover/crates/lib/prover_dal/.sqlx/query-705e3880df382c3c25f41630d83f2186aa86751cb3b4de26a60af1cf987ca636.json similarity index 92% rename from prover/crates/lib/prover_dal/.sqlx/query-2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7.json rename to prover/crates/lib/prover_dal/.sqlx/query-705e3880df382c3c25f41630d83f2186aa86751cb3b4de26a60af1cf987ca636.json index 0ad6413e1ec..700a7e8bfd6 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-705e3880df382c3c25f41630d83f2186aa86751cb3b4de26a60af1cf987ca636.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE gpu_prover_queue_fri\n SET\n instance_status = 'reserved',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n id IN (\n SELECT\n id\n FROM\n gpu_prover_queue_fri\n WHERE\n specialized_prover_group_id = $2\n AND zone = $3\n AND protocol_version = $4\n AND protocol_version_patch = $5\n AND (\n instance_status = 'available'\n OR (\n instance_status = 'reserved'\n AND processing_started_at < NOW() - $1::INTERVAL\n )\n )\n ORDER BY\n updated_at ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n gpu_prover_queue_fri.*\n ", + "query": "\n UPDATE gpu_prover_queue_fri\n SET\n instance_status = 'reserved',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n id IN (\n SELECT\n id\n FROM\n gpu_prover_queue_fri\n WHERE\n specialized_prover_group_id = $2\n AND zone = $3\n AND protocol_version = $4\n AND protocol_version_patch = $5\n AND (\n instance_status = 'available'\n OR (\n instance_status = 'reserved'\n AND processing_started_at < NOW() - $1::INTERVAL\n )\n )\n ORDER BY\n updated_at ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n gpu_prover_queue_fri.*\n ", "describe": { "columns": [ { @@ -82,5 +82,5 @@ false ] }, - "hash": "2dc6b7bf08cced8791354fc47e319d03f894f40d2ec528382b5643c3d51ec8e7" + "hash": "705e3880df382c3c25f41630d83f2186aa86751cb3b4de26a60af1cf987ca636" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-7238cfe04ba59967fe5589665ad2c0214469edff6fc74965f1ec1366f8b46b8e.json b/prover/crates/lib/prover_dal/.sqlx/query-7238cfe04ba59967fe5589665ad2c0214469edff6fc74965f1ec1366f8b46b8e.json new file mode 100644 index 00000000000..227ff7dbe26 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-7238cfe04ba59967fe5589665ad2c0214469edff6fc74965f1ec1366f8b46b8e.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n depth,\n aggregations_url,\n number_of_dependent_jobs,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, 'waiting_for_proofs', NOW(), NOW(), $7)\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Int4", + "Text", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "7238cfe04ba59967fe5589665ad2c0214469edff6fc74965f1ec1366f8b46b8e" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json deleted file mode 100644 index f8b141a8dac..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at)\n VALUES\n ($1, 'waiting_for_proofs', 1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123.json b/prover/crates/lib/prover_dal/.sqlx/query-7493571411ee2cb4f7b09bcfc4eb86d3521faa0fbbfc7ca810c818ca9720b121.json similarity index 77% rename from prover/crates/lib/prover_dal/.sqlx/query-8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123.json rename to prover/crates/lib/prover_dal/.sqlx/query-7493571411ee2cb4f7b09bcfc4eb86d3521faa0fbbfc7ca810c818ca9720b121.json index 12146fb75a9..9d10e7f7fd4 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-7493571411ee2cb4f7b09bcfc4eb86d3521faa0fbbfc7ca810c818ca9720b121.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n aggregation_round DESC,\n l1_batch_number ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n aggregation_round DESC,\n l1_batch_number ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", "describe": { "columns": [ { @@ -56,5 +56,5 @@ false ] }, - "hash": "8720d411e0c9640afd61e927a89c0b6c018e6a4d279acd24a4ea7d81b5cc5123" + "hash": "7493571411ee2cb4f7b09bcfc4eb86d3521faa0fbbfc7ca810c818ca9720b121" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-749d18c0fdae16ce0ed4e3c23e543d01cff938d3aed44c9bb1d864bfdf69e39a.json b/prover/crates/lib/prover_dal/.sqlx/query-749d18c0fdae16ce0ed4e3c23e543d01cff938d3aed44c9bb1d864bfdf69e39a.json new file mode 100644 index 00000000000..d01c5481fa6 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-749d18c0fdae16ce0ed4e3c23e543d01cff938d3aed44c9bb1d864bfdf69e39a.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n prover_jobs_fri (\n l1_batch_number,\n circuit_id,\n circuit_blob_url,\n aggregation_round,\n sequence_number,\n depth,\n is_node_final_proof,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', NOW(), NOW(), $9)\n ON CONFLICT (\n l1_batch_number, aggregation_round, circuit_id, depth, sequence_number\n ) DO\n UPDATE\n SET\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text", + "Int2", + "Int4", + "Int4", + "Bool", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "749d18c0fdae16ce0ed4e3c23e543d01cff938d3aed44c9bb1d864bfdf69e39a" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42.json b/prover/crates/lib/prover_dal/.sqlx/query-764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42.json deleted file mode 100644 index aac0fcd420c..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n depth,\n aggregations_url,\n number_of_dependent_jobs,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, 'waiting_for_proofs', NOW(), NOW(), $7)\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET\n updated_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Int4", - "Text", - "Int4", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "764693ceeb45f8478a20242b592d419667f11d80036cda021ecbf23b0b5f7f42" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5.json b/prover/crates/lib/prover_dal/.sqlx/query-776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5.json deleted file mode 100644 index c39b660fa2e..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n gpu_prover_queue_fri (\n instance_host,\n instance_port,\n instance_status,\n specialized_prover_group_id,\n zone,\n created_at,\n updated_at,\n protocol_version,\n protocol_version_patch\n )\n VALUES\n (CAST($1::TEXT AS INET), $2, 'available', $3, $4, NOW(), NOW(), $5, $6)\n ON CONFLICT (instance_host, instance_port, zone) DO\n UPDATE\n SET\n instance_status = 'available',\n specialized_prover_group_id = $3,\n zone = $4,\n updated_at = NOW(),\n protocol_version = $5,\n protocol_version_patch = $6\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int4", - "Int2", - "Text", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-7ab760de174d37c04373cf48489846983cb3f693c02159ba41c1a875aed7e03d.json b/prover/crates/lib/prover_dal/.sqlx/query-7ab760de174d37c04373cf48489846983cb3f693c02159ba41c1a875aed7e03d.json new file mode 100644 index 00000000000..6e0c60eeab0 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-7ab760de174d37c04373cf48489846983cb3f693c02159ba41c1a875aed7e03d.json @@ -0,0 +1,62 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n processing_started_at = NOW(),\n updated_at = NOW(),\n picked_by = $5\n WHERE\n id = (\n SELECT\n pj.id\n FROM\n (\n SELECT\n *\n FROM\n UNNEST($1::SMALLINT [], $2::SMALLINT [])\n ) AS tuple (circuit_id, round)\n JOIN LATERAL (\n SELECT\n *\n FROM\n prover_jobs_fri AS pj\n WHERE\n pj.status = 'queued'\n AND pj.protocol_version = $3\n AND pj.protocol_version_patch = $4\n AND pj.circuit_id = tuple.circuit_id\n AND pj.aggregation_round = tuple.round\n ORDER BY\n pj.l1_batch_number ASC,\n pj.id ASC\n LIMIT\n 1\n ) AS pj ON TRUE\n ORDER BY\n pj.l1_batch_number ASC,\n pj.aggregation_round DESC,\n pj.id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "aggregation_round", + "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "sequence_number", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "depth", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "is_node_final_proof", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int2Array", + "Int2Array", + "Int4", + "Int4", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "7ab760de174d37c04373cf48489846983cb3f693c02159ba41c1a875aed7e03d" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json b/prover/crates/lib/prover_dal/.sqlx/query-866bffdc527c079d128c1d21595ca5aec09b39b9d0367b0c29167f8a6a166d18.json similarity index 83% rename from prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json rename to prover/crates/lib/prover_dal/.sqlx/query-866bffdc527c079d128c1d21595ca5aec09b39b9d0367b0c29167f8a6a166d18.json index c97fe7f4042..e5a8f575d01 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-866bffdc527c079d128c1d21595ca5aec09b39b9d0367b0c29167f8a6a166d18.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da" + "hash": "866bffdc527c079d128c1d21595ca5aec09b39b9d0367b0c29167f8a6a166d18" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json b/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json deleted file mode 100644 index d8bd3223905..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n prover_fri_protocol_versions (id, snark_wrapper_vk_hash, created_at, protocol_version_patch)\n VALUES\n ($1, $2, NOW(), $3)\n ON CONFLICT (id, protocol_version_patch) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Bytea", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19.json b/prover/crates/lib/prover_dal/.sqlx/query-8c5aba6ce584c1671f2d65fb47701426d60c56b526af5ed2e0c602f881c3bbf0.json similarity index 52% rename from prover/crates/lib/prover_dal/.sqlx/query-28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19.json rename to prover/crates/lib/prover_dal/.sqlx/query-8c5aba6ce584c1671f2d65fb47701426d60c56b526af5ed2e0c602f881c3bbf0.json index d6e77daaa92..fc0594f6443 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-8c5aba6ce584c1671f2d65fb47701426d60c56b526af5ed2e0c602f881c3bbf0.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n l1_batch_number IN (\n SELECT\n prover_jobs_fri.l1_batch_number\n FROM\n prover_jobs_fri\n JOIN scheduler_witness_jobs_fri swj ON prover_jobs_fri.l1_batch_number = swj.l1_batch_number\n WHERE\n swj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = $1\n )\n RETURNING\n l1_batch_number;\n ", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n l1_batch_number IN (\n SELECT\n prover_jobs_fri.l1_batch_number\n FROM\n prover_jobs_fri\n JOIN\n scheduler_witness_jobs_fri swj\n ON prover_jobs_fri.l1_batch_number = swj.l1_batch_number\n WHERE\n swj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = $1\n )\n RETURNING\n l1_batch_number;\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "28397b5a0b7af832d2a4d3d7011a68a48db6a64afcd41bbe0e17d98fa38fdb19" + "hash": "8c5aba6ce584c1671f2d65fb47701426d60c56b526af5ed2e0c602f881c3bbf0" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-926cddf712322b476064a6efb2a8594776d64b8b5a4094fef979e35ab76d5bcd.json b/prover/crates/lib/prover_dal/.sqlx/query-926cddf712322b476064a6efb2a8594776d64b8b5a4094fef979e35ab76d5bcd.json new file mode 100644 index 00000000000..a2558f49e1b --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-926cddf712322b476064a6efb2a8594776d64b8b5a4094fef979e35ab76d5bcd.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "926cddf712322b476064a6efb2a8594776d64b8b5a4094fef979e35ab76d5bcd" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276.json b/prover/crates/lib/prover_dal/.sqlx/query-929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276.json deleted file mode 100644 index cf5fe8117b1..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n witness_inputs_fri (\n l1_batch_number,\n witness_inputs_blob_url,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, 'queued', NOW(), NOW(), $4)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "929419ad8dcc70e8ce986f17075cd21d4645563f93afd4428734196c2b212276" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d.json b/prover/crates/lib/prover_dal/.sqlx/query-9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d.json deleted file mode 100644 index 2609a2ee0cf..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n l1_batch_number IN (\n SELECT\n prover_jobs_fri.l1_batch_number\n FROM\n prover_jobs_fri\n JOIN recursion_tip_witness_jobs_fri rtwj ON prover_jobs_fri.l1_batch_number = rtwj.l1_batch_number\n WHERE\n rtwj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = $1\n AND prover_jobs_fri.is_node_final_proof = TRUE\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n rtwj.number_of_final_node_jobs\n HAVING\n COUNT(*) = rtwj.number_of_final_node_jobs\n )\n RETURNING\n l1_batch_number;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "9b713312b539b4eefa58346f0070767a2cd4488e670674cd9149f7a332c0198d" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json b/prover/crates/lib/prover_dal/.sqlx/query-9f5f6d6da7dbb7292b7fe60283993e804b9c2af8c98ae64ccace8c5035b3c499.json similarity index 80% rename from prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json rename to prover/crates/lib/prover_dal/.sqlx/query-9f5f6d6da7dbb7292b7fe60283993e804b9c2af8c98ae64ccace8c5035b3c499.json index cf9ff8396ef..fe84883ccd0 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-9f5f6d6da7dbb7292b7fe60283993e804b9c2af8c98ae64ccace8c5035b3c499.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $2\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $3\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.l1_batch_number\n ", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $2\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $3\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.l1_batch_number\n ", "describe": { "columns": [ { @@ -20,5 +20,5 @@ false ] }, - "hash": "e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed" + "hash": "9f5f6d6da7dbb7292b7fe60283993e804b9c2af8c98ae64ccace8c5035b3c499" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json b/prover/crates/lib/prover_dal/.sqlx/query-a4407ac701423bc91505af3e7250ac1e4ed2a372922e55728de083bffb533e11.json similarity index 60% rename from prover/crates/lib/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json rename to prover/crates/lib/prover_dal/.sqlx/query-a4407ac701423bc91505af3e7250ac1e4ed2a372922e55728de083bffb533e11.json index 160eb31bf95..7a06e6557d6 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-a4407ac701423bc91505af3e7250ac1e4ed2a372922e55728de083bffb533e11.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n protocol_version_patch,\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n proof_compression_jobs_fri\n WHERE\n protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "query": "\n SELECT\n protocol_version,\n protocol_version_patch,\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n proof_compression_jobs_fri\n WHERE\n protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", "describe": { "columns": [ { @@ -34,5 +34,5 @@ null ] }, - "hash": "e9c9b69852fa68f463f17b6d63ab99cf505662036f2dd7a9f1807c4c1bad7c7b" + "hash": "a4407ac701423bc91505af3e7250ac1e4ed2a372922e55728de083bffb533e11" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json b/prover/crates/lib/prover_dal/.sqlx/query-a6848b0c4cb256ca1d79d83dc5cb8c88cdb52125daedbade044934761fe2147d.json similarity index 83% rename from prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json rename to prover/crates/lib/prover_dal/.sqlx/query-a6848b0c4cb256ca1d79d83dc5cb8c88cdb52125daedbade044934761fe2147d.json index bf8db798e7d..270868d8981 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-a6848b0c4cb256ca1d79d83dc5cb8c88cdb52125daedbade044934761fe2147d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b" + "hash": "a6848b0c4cb256ca1d79d83dc5cb8c88cdb52125daedbade044934761fe2147d" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-a6eb7a1f1aa2f6f5d90fbe3b8c777313681560f538c6524c54648d6308533744.json b/prover/crates/lib/prover_dal/.sqlx/query-a6eb7a1f1aa2f6f5d90fbe3b8c777313681560f538c6524c54648d6308533744.json new file mode 100644 index 00000000000..319b00b4c61 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-a6eb7a1f1aa2f6f5d90fbe3b8c777313681560f538c6524c54648d6308533744.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj\n ON\n prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 1\n AND prover_jobs_fri.depth = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 2, + "name": "depth", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "a6eb7a1f1aa2f6f5d90fbe3b8c777313681560f538c6524c54648d6308533744" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json deleted file mode 100644 index 93532150f7f..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json b/prover/crates/lib/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json deleted file mode 100644 index fae5c1041a5..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id, depth) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth\n FROM\n prover_jobs_fri\n JOIN node_aggregation_witness_jobs_fri nawj ON prover_jobs_fri.l1_batch_number = nawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = nawj.circuit_id\n AND prover_jobs_fri.depth = nawj.depth\n WHERE\n nawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 1\n AND prover_jobs_fri.depth = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.depth,\n nawj.number_of_dependent_jobs\n HAVING\n COUNT(*) = nawj.number_of_dependent_jobs\n )\n RETURNING\n l1_batch_number,\n circuit_id,\n depth;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - }, - { - "ordinal": 2, - "name": "depth", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "abc93d27a8673b23e18d050e84c43c868c63c853edb5c4f41e48a3cc6378eca9" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json b/prover/crates/lib/prover_dal/.sqlx/query-b9aaf5fe4d0261f7b05c8601a96b3027b4c2ce405d3bcc9821440408a394d7f5.json similarity index 73% rename from prover/crates/lib/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json rename to prover/crates/lib/prover_dal/.sqlx/query-b9aaf5fe4d0261f7b05c8601a96b3027b4c2ce405d3bcc9821440408a394d7f5.json index 7c0264b5646..a7b33f51cb1 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-b9aaf5fe4d0261f7b05c8601a96b3027b4c2ce405d3bcc9821440408a394d7f5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'successful',\n updated_at = NOW(),\n time_taken = $1,\n proof_blob_url = $2\n WHERE\n id = $3\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'successful',\n updated_at = NOW(),\n time_taken = $1,\n proof_blob_url = $2\n WHERE\n id = $3\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", "describe": { "columns": [ { @@ -56,5 +56,5 @@ false ] }, - "hash": "ec04b89218111a5dc8d5ade506ac3465e2211ef3013386feb12d4cc04e0eade9" + "hash": "b9aaf5fe4d0261f7b05c8601a96b3027b4c2ce405d3bcc9821440408a394d7f5" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json b/prover/crates/lib/prover_dal/.sqlx/query-c01337f381828818b2b23a7fcc3d165108e64cc8cf1a2a5604cb180f6d8ac4fe.json similarity index 57% rename from prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json rename to prover/crates/lib/prover_dal/.sqlx/query-c01337f381828818b2b23a7fcc3d165108e64cc8cf1a2a5604cb180f6d8ac4fe.json index 05163dcfa2e..6c583ea1994 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-c01337f381828818b2b23a7fcc3d165108e64cc8cf1a2a5604cb180f6d8ac4fe.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", "describe": { "columns": [ { @@ -34,5 +34,5 @@ null ] }, - "hash": "29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8" + "hash": "c01337f381828818b2b23a7fcc3d165108e64cc8cf1a2a5604cb180f6d8ac4fe" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd.json b/prover/crates/lib/prover_dal/.sqlx/query-c5569d55d77296b8c7180c95682423bb83b9fb0422b396cbd201f02ebce7b921.json similarity index 93% rename from prover/crates/lib/prover_dal/.sqlx/query-7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd.json rename to prover/crates/lib/prover_dal/.sqlx/query-c5569d55d77296b8c7180c95682423bb83b9fb0422b396cbd201f02ebce7b921.json index 3064489830d..f25ad78910d 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-c5569d55d77296b8c7180c95682423bb83b9fb0422b396cbd201f02ebce7b921.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n node_aggregation_witness_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n depth ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n node_aggregation_witness_jobs_fri.*\n ", + "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n node_aggregation_witness_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n depth ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n node_aggregation_witness_jobs_fri.*\n ", "describe": { "columns": [ { @@ -110,5 +110,5 @@ false ] }, - "hash": "7effbacbdcc4bd762386351755f4f32042dfead8a37401558f5fd3b03480f2dd" + "hash": "c5569d55d77296b8c7180c95682423bb83b9fb0422b396cbd201f02ebce7b921" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json b/prover/crates/lib/prover_dal/.sqlx/query-c6d02dc9cb9908a57c79729c759b9314a2ce0180c20126ad22ddaa20c7c32c2c.json similarity index 66% rename from prover/crates/lib/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json rename to prover/crates/lib/prover_dal/.sqlx/query-c6d02dc9cb9908a57c79729c759b9314a2ce0180c20126ad22ddaa20c7c32c2c.json index 918fb2817d2..8462680ad82 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-c6d02dc9cb9908a57c79729c759b9314a2ce0180c20126ad22ddaa20c7c32c2c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n comp.l1_batch_number,\n (comp.updated_at - wit.created_at) AS time_taken,\n wit.created_at\n FROM\n proof_compression_jobs_fri AS comp\n JOIN witness_inputs_fri AS wit ON comp.l1_batch_number = wit.l1_batch_number\n WHERE\n wit.created_at > $1\n ORDER BY\n time_taken DESC;\n ", + "query": "\n SELECT\n comp.l1_batch_number,\n (comp.updated_at - wit.created_at) AS time_taken,\n wit.created_at\n FROM\n proof_compression_jobs_fri AS comp\n JOIN witness_inputs_fri AS wit ON comp.l1_batch_number = wit.l1_batch_number\n WHERE\n wit.created_at > $1\n ORDER BY\n time_taken DESC;\n ", "describe": { "columns": [ { @@ -30,5 +30,5 @@ false ] }, - "hash": "081e2b928f0816c41d6645c1dedbb3402044d201e85e114ff4582394c32bd2bf" + "hash": "c6d02dc9cb9908a57c79729c759b9314a2ce0180c20126ad22ddaa20c7c32c2c" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json deleted file mode 100644 index cadc931fa1c..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at)\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET\n status = $3\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text" - ] - }, - "nullable": [] - }, - "hash": "c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f.json b/prover/crates/lib/prover_dal/.sqlx/query-caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f.json deleted file mode 100644 index 403e34bb91c..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, 'waiting_for_proofs', NOW(), NOW(), $4)\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "caff620ae66d7fbe3caff7505173b6da86d1e693be03936730c340121167341f" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-cb453f0677b92539747e175e796599bf65cbf2fd7c27a2dcad43e41a0f43cba0.json b/prover/crates/lib/prover_dal/.sqlx/query-cb453f0677b92539747e175e796599bf65cbf2fd7c27a2dcad43e41a0f43cba0.json new file mode 100644 index 00000000000..5c5a9e288b2 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-cb453f0677b92539747e175e796599bf65cbf2fd7c27a2dcad43e41a0f43cba0.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id\n FROM\n prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj\n ON\n prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE\n lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n lawj.number_of_basic_circuits\n HAVING\n COUNT(*) = lawj.number_of_basic_circuits\n )\n RETURNING\n l1_batch_number,\n circuit_id;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "circuit_id", + "type_info": "Int2" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false + ] + }, + "hash": "cb453f0677b92539747e175e796599bf65cbf2fd7c27a2dcad43e41a0f43cba0" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-cebbd80998bf2be10c735f6c414d023d9cce8cd66ad684475ece209832babd5e.json b/prover/crates/lib/prover_dal/.sqlx/query-cebbd80998bf2be10c735f6c414d023d9cce8cd66ad684475ece209832babd5e.json new file mode 100644 index 00000000000..d56c7f3de74 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-cebbd80998bf2be10c735f6c414d023d9cce8cd66ad684475ece209832babd5e.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (\n l1_batch_number, circuit_id, status, created_at, updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET\n status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "cebbd80998bf2be10c735f6c414d023d9cce8cd66ad684475ece209832babd5e" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e2aceb9b86e74a3d119d383dcc729d1227a213b6cacb4c30a1f94c56f56cb4af.json b/prover/crates/lib/prover_dal/.sqlx/query-e2aceb9b86e74a3d119d383dcc729d1227a213b6cacb4c30a1f94c56f56cb4af.json new file mode 100644 index 00000000000..0fc0f557b12 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-e2aceb9b86e74a3d119d383dcc729d1227a213b6cacb4c30a1f94c56f56cb4af.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "e2aceb9b86e74a3d119d383dcc729d1227a213b6cacb4c30a1f94c56f56cb4af" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e65d9d8389b60f48468561984f0fb9c8674344921b98b8b26e4d85994b3d72af.json b/prover/crates/lib/prover_dal/.sqlx/query-e65d9d8389b60f48468561984f0fb9c8674344921b98b8b26e4d85994b3d72af.json new file mode 100644 index 00000000000..65a473ddeee --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-e65d9d8389b60f48468561984f0fb9c8674344921b98b8b26e4d85994b3d72af.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n l1_batch_number IN (\n SELECT\n prover_jobs_fri.l1_batch_number\n FROM\n prover_jobs_fri\n JOIN\n recursion_tip_witness_jobs_fri rtwj\n ON prover_jobs_fri.l1_batch_number = rtwj.l1_batch_number\n WHERE\n rtwj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = $1\n AND prover_jobs_fri.is_node_final_proof = TRUE\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n rtwj.number_of_final_node_jobs\n HAVING\n COUNT(*) = rtwj.number_of_final_node_jobs\n )\n RETURNING\n l1_batch_number;\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int2" + ] + }, + "nullable": [ + false + ] + }, + "hash": "e65d9d8389b60f48468561984f0fb9c8674344921b98b8b26e4d85994b3d72af" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json b/prover/crates/lib/prover_dal/.sqlx/query-e6ddecc79d55bf5bd3e348a735b02757fe394fa096f09c62faa26f728a69d1ae.json similarity index 79% rename from prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json rename to prover/crates/lib/prover_dal/.sqlx/query-e6ddecc79d55bf5bd3e348a735b02757fe394fa096f09c62faa26f728a69d1ae.json index d7eb6a32b42..a3da3036ab6 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-e6ddecc79d55bf5bd3e348a735b02757fe394fa096f09c62faa26f728a69d1ae.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86" + "hash": "e6ddecc79d55bf5bd3e348a735b02757fe394fa096f09c62faa26f728a69d1ae" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json b/prover/crates/lib/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json deleted file mode 100644 index af6210ae91e..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'queued'\n WHERE\n (l1_batch_number, circuit_id) IN (\n SELECT\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id\n FROM\n prover_jobs_fri\n JOIN leaf_aggregation_witness_jobs_fri lawj ON prover_jobs_fri.l1_batch_number = lawj.l1_batch_number\n AND prover_jobs_fri.circuit_id = lawj.circuit_id\n WHERE\n lawj.status = 'waiting_for_proofs'\n AND prover_jobs_fri.status = 'successful'\n AND prover_jobs_fri.aggregation_round = 0\n GROUP BY\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n lawj.number_of_basic_circuits\n HAVING\n COUNT(*) = lawj.number_of_basic_circuits\n )\n RETURNING\n l1_batch_number,\n circuit_id;\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "circuit_id", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false - ] - }, - "hash": "e743af4c18ec91eb46db5a19556fba74ec2cfc3c89c7e4e2ea475c3ce4092849" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json deleted file mode 100644 index 4ee9278fe42..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Text" - ] - }, - "nullable": [] - }, - "hash": "e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json deleted file mode 100644 index f8e92b1ad66..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET\n status = $3\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text" - ] - }, - "nullable": [] - }, - "hash": "eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9.json b/prover/crates/lib/prover_dal/.sqlx/query-eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9.json deleted file mode 100644 index fe481b4e54d..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (\n l1_batch_number,\n status,\n number_of_final_node_jobs,\n protocol_version,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, 'waiting_for_proofs', $2, $3, NOW(), NOW(), $4)\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n updated_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "eef1b56e87eff63fcf6ffb98791583a7526ae38ceb4bf80543cfd3fb60492fb9" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-f05ea1f0ee0be1ac50c35f5dbf4bcaf4e7f673e9d9827427aa4bbedb3cae8b8f.json b/prover/crates/lib/prover_dal/.sqlx/query-f05ea1f0ee0be1ac50c35f5dbf4bcaf4e7f673e9d9827427aa4bbedb3cae8b8f.json new file mode 100644 index 00000000000..d47714f9575 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-f05ea1f0ee0be1ac50c35f5dbf4bcaf4e7f673e9d9827427aa4bbedb3cae8b8f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (\n l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at\n )\n VALUES\n ($1, 'waiting_for_proofs', 1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "f05ea1f0ee0be1ac50c35f5dbf4bcaf4e7f673e9d9827427aa4bbedb3cae8b8f" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-f294a1c32ffb957c901dcdfa942ea8e04c2c28771aa50a9e3ebb95c5e428be0e.json b/prover/crates/lib/prover_dal/.sqlx/query-f294a1c32ffb957c901dcdfa942ea8e04c2c28771aa50a9e3ebb95c5e428be0e.json new file mode 100644 index 00000000000..76008be6c33 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-f294a1c32ffb957c901dcdfa942ea8e04c2c28771aa50a9e3ebb95c5e428be0e.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n closed_form_inputs_blob_url,\n number_of_basic_circuits,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, $5, 'waiting_for_proofs', NOW(), NOW(), $6)\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "f294a1c32ffb957c901dcdfa942ea8e04c2c28771aa50a9e3ebb95c5e428be0e" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b.json b/prover/crates/lib/prover_dal/.sqlx/query-f2ed1acf57927528cf978ad759ac92d6496147f6425c2485fc8a8f2739e65f80.json similarity index 81% rename from prover/crates/lib/prover_dal/.sqlx/query-8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b.json rename to prover/crates/lib/prover_dal/.sqlx/query-f2ed1acf57927528cf978ad759ac92d6496147f6425c2485fc8a8f2739e65f80.json index 54fba3bbeac..a261f190acf 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-f2ed1acf57927528cf978ad759ac92d6496147f6425c2485fc8a8f2739e65f80.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n (\n status = 'in_progress'\n AND processing_started_at <= NOW() - $1::INTERVAL\n AND attempts < $2\n )\n OR (\n status = 'failed'\n AND attempts < $2\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", "describe": { "columns": [ { @@ -43,5 +43,5 @@ true ] }, - "hash": "8357972a21b39644e4cbe4bedc3b6d9065bf4494daf8f7632ab2bfe055773f7b" + "hash": "f2ed1acf57927528cf978ad759ac92d6496147f6425c2485fc8a8f2739e65f80" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-f3280a144a9aea48dae9b2914f0bf0344b237bf1914657d36f44b9d21ae966a6.json b/prover/crates/lib/prover_dal/.sqlx/query-f3280a144a9aea48dae9b2914f0bf0344b237bf1914657d36f44b9d21ae966a6.json new file mode 100644 index 00000000000..1a5c178a900 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-f3280a144a9aea48dae9b2914f0bf0344b237bf1914657d36f44b9d21ae966a6.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET\n status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "f3280a144a9aea48dae9b2914f0bf0344b237bf1914657d36f44b9d21ae966a6" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-f68afde603675b0853547c61a74cbb33d511e07d80a5acd79bc559abdbda49bc.json b/prover/crates/lib/prover_dal/.sqlx/query-f68afde603675b0853547c61a74cbb33d511e07d80a5acd79bc559abdbda49bc.json new file mode 100644 index 00000000000..3820754145d --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-f68afde603675b0853547c61a74cbb33d511e07d80a5acd79bc559abdbda49bc.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n prover_fri_protocol_versions (\n id, snark_wrapper_vk_hash, created_at, protocol_version_patch\n )\n VALUES\n ($1, $2, NOW(), $3)\n ON CONFLICT (id, protocol_version_patch) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Bytea", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "f68afde603675b0853547c61a74cbb33d511e07d80a5acd79bc559abdbda49bc" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036.json b/prover/crates/lib/prover_dal/.sqlx/query-f70306e92c2a2d69e0e75eb0cf614f3cc4f9c96c2a51a157b83fff9b411beeb9.json similarity index 80% rename from prover/crates/lib/prover_dal/.sqlx/query-0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036.json rename to prover/crates/lib/prover_dal/.sqlx/query-f70306e92c2a2d69e0e75eb0cf614f3cc4f9c96c2a51a157b83fff9b411beeb9.json index 8b49fa11e63..344068b04e9 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-f70306e92c2a2d69e0e75eb0cf614f3cc4f9c96c2a51a157b83fff9b411beeb9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_compression_jobs_fri\n WHERE\n status = $2\n AND protocol_version = $4\n AND protocol_version_patch = $5\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n proof_compression_jobs_fri.l1_batch_number\n ", + "query": "\n UPDATE proof_compression_jobs_fri\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n proof_compression_jobs_fri\n WHERE\n status = $2\n AND protocol_version = $4\n AND protocol_version_patch = $5\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n proof_compression_jobs_fri.l1_batch_number\n ", "describe": { "columns": [ { @@ -22,5 +22,5 @@ false ] }, - "hash": "0b70c98c2edd8370ad09ac553c18dbc21cccb9a95e3db1c93da239845a5e9036" + "hash": "f70306e92c2a2d69e0e75eb0cf614f3cc4f9c96c2a51a157b83fff9b411beeb9" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4.json b/prover/crates/lib/prover_dal/.sqlx/query-f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4.json deleted file mode 100644 index c0c2637fe5a..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n prover_jobs_fri (\n l1_batch_number,\n circuit_id,\n circuit_blob_url,\n aggregation_round,\n sequence_number,\n depth,\n is_node_final_proof,\n protocol_version,\n status,\n created_at,\n updated_at,\n protocol_version_patch\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, 'queued', NOW(), NOW(), $9)\n ON CONFLICT (l1_batch_number, aggregation_round, circuit_id, depth, sequence_number) DO\n UPDATE\n SET\n updated_at = NOW()\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int2", - "Text", - "Int2", - "Int4", - "Int4", - "Bool", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "f99c34313e2717ec74b4f274e33dae905acac53b46eeaeb059d23e48a71df3b4" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388.json b/prover/crates/lib/prover_dal/.sqlx/query-fd29394931eed5b99d2fb4fac907218d106d5852fa6f65aca22e12b3a4b7f9c2.json similarity index 92% rename from prover/crates/lib/prover_dal/.sqlx/query-28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388.json rename to prover/crates/lib/prover_dal/.sqlx/query-fd29394931eed5b99d2fb4fac907218d106d5852fa6f65aca22e12b3a4b7f9c2.json index 89e159989ae..b3e3d945b32 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-fd29394931eed5b99d2fb4fac907218d106d5852fa6f65aca22e12b3a4b7f9c2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $2\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n scheduler_witness_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $3\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n scheduler_witness_jobs_fri.*\n ", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $2\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n scheduler_witness_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $3\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n scheduler_witness_jobs_fri.*\n ", "describe": { "columns": [ { @@ -86,5 +86,5 @@ false ] }, - "hash": "28f03acf565c4b50fe86f606c18a8b699386b3c5b4e02d5ce046f0f2e0ddc388" + "hash": "fd29394931eed5b99d2fb4fac907218d106d5852fa6f65aca22e12b3a4b7f9c2" } From a9c0b3b93d348859e2448c25237d239d02bc0dc9 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:16:16 -0300 Subject: [PATCH 19/36] Update contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index ece4eb73efd..a92ae777d59 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit ece4eb73efd31ce73ead0936a2fa43b0f777d41a +Subproject commit a92ae777d590b5dcaefd458d4a2406072130e096 From a6f496b38d6ad0eebb826f06fbdc491b0b1b6d4c Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:17:04 -0300 Subject: [PATCH 20/36] Format --- eigenda-integration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eigenda-integration.md b/eigenda-integration.md index 8666392b900..85dac512d59 100644 --- a/eigenda-integration.md +++ b/eigenda-integration.md @@ -18,7 +18,7 @@ da_client: eigenda-proxy: image: ghcr.io/layr-labs/eigenda-proxy ports: - - "4242:4242" + - '4242:4242' command: ./eigenda-proxy --addr 0.0.0.0 --port 4242 --memstore.enabled --eigenda-max-blob-length "2MiB" ``` From 9eb946b0f43a46b25c890818da403f9e4695a151 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 13:40:30 -0300 Subject: [PATCH 21/36] Update contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index a92ae777d59..80ab7a821dd 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit a92ae777d590b5dcaefd458d4a2406072130e096 +Subproject commit 80ab7a821ddab19a04867d3437ed1064f70b53a2 From deafa460715334a77edf9fe8aa76fa90029342c4 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 8 Oct 2024 21:08:04 +0300 Subject: [PATCH 22/36] =?UTF-8?q?feat(vm):=20EVM=20emulator=20support=20?= =?UTF-8?q?=E2=80=93=20base=20(#2979)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Modifies the Era codebase to support the EVM emulator. Intentionally avoids changing the `contracts` submodule yet; as a consequence, there are no EVM emulation tests. ## Why ❔ Stepping stone for EVM equivalence. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: IAvecilla Co-authored-by: Javier Chatruc --- core/bin/genesis_generator/src/main.rs | 1 + .../system-constants-generator/src/utils.rs | 3 + core/lib/config/src/configs/chain.rs | 3 + core/lib/config/src/configs/genesis.rs | 2 + core/lib/config/src/testonly.rs | 2 + core/lib/constants/src/contracts.rs | 5 + core/lib/contracts/src/lib.rs | 22 +- ...2dba1d37493d4c1db4b957cfec476a791b32.json} | 26 +- ...bef013ad12b66bdca7251be2af21e98fe870.json} | 26 +- ...dfcb3522a0772ac3d2476652df4216d823e04.json | 31 ++ ...7ca0e4b83f50724a0b18256aafab69909a98.json} | 28 +- ...12784694d2f8fe9a67159ad4c7abc2279ca6.json} | 16 +- ...7f64d0c9620506bb41890548181bccca9ee5.json} | 12 +- ...fe9b944b2dd80eb56965a5874ce3168e8c5e.json} | 14 +- ...9749bd5fc78b09578589c26d3017cc6bd192.json} | 30 +- ...70e7a5fe02b60d5d23e4d153190138112c5b.json} | 12 +- ...9fabba23fa52a17a54146931476681edbd24.json} | 26 +- ...76eb8a6a508aea04d93342df50dd9745c361.json} | 26 +- ...e0c8a39a49d1cea78ef771d4c64fbbc16756.json} | 18 +- ...5e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json} | 10 +- ...b2d1aa6b398c6981c8d4f35e499f42b01731.json} | 26 +- ...e82c5aa84c85b9486e81261d17901a786917.json} | 5 +- ...65162bce330edd9b16587e8f9fdab17a8456.json} | 12 +- ...9f1fd7606fdf3e6d4c882cea76eb579c24a93.json | 30 -- ...fc5d8943e65a30508898d90a098432050bc7.json} | 26 +- ...21ca4cc94c38a7d18023ef1e89de484e60d8.json} | 18 +- ...14f15fd7a5fa3d7f7bc56906817c70b04950.json} | 5 +- ...b243bb067514b67daaf084353e5ada15b23a.json} | 10 +- .../20240911161714_evm-simulator.down.sql | 3 + .../20240911161714_evm-simulator.up.sql | 4 + core/lib/dal/src/blocks_dal.rs | 59 +++ core/lib/dal/src/blocks_web3_dal.rs | 4 +- core/lib/dal/src/consensus/mod.rs | 2 +- core/lib/dal/src/factory_deps_dal.rs | 18 + core/lib/dal/src/models/storage_block.rs | 16 + .../src/models/storage_protocol_version.rs | 10 + core/lib/dal/src/models/storage_sync.rs | 7 + .../lib/dal/src/models/storage_transaction.rs | 26 +- core/lib/dal/src/protocol_versions_dal.rs | 12 +- .../lib/dal/src/protocol_versions_web3_dal.rs | 1 + core/lib/dal/src/sync_dal.rs | 1 + core/lib/dal/src/tests/mod.rs | 2 + core/lib/dal/src/transactions_web3_dal.rs | 60 ++- core/lib/env_config/src/chain.rs | 1 + core/lib/env_config/src/genesis.rs | 1 + .../src/multicall3/mod.rs | 2 + core/lib/multivm/Cargo.toml | 2 +- .../src/glue/types/vm/vm_block_result.rs | 6 + .../types/vm/vm_partial_execution_result.rs | 3 + .../glue/types/vm/vm_tx_execution_result.rs | 5 + .../vm_1_4_1/implementation/execution.rs | 1 + .../vm_1_4_2/implementation/execution.rs | 1 + .../implementation/execution.rs | 1 + .../vm_fast/tests/get_used_contracts.rs | 20 +- .../versions/vm_fast/tests/require_eip712.rs | 4 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 1 + .../vm_latest/implementation/execution.rs | 8 + .../versions/vm_latest/implementation/tx.rs | 7 +- .../vm_latest/old_vm/oracles/decommitter.rs | 76 ++-- .../versions/vm_latest/tests/evm_emulator.rs | 76 ++++ .../vm_latest/tests/get_used_contracts.rs | 19 +- .../vm_latest/tests/l1_tx_execution.rs | 2 +- .../src/versions/vm_latest/tests/mod.rs | 1 + .../versions/vm_latest/tests/nonce_holder.rs | 24 +- .../src/versions/vm_latest/tests/refunds.rs | 2 +- .../vm_latest/tests/require_eip712.rs | 4 +- .../vm_latest/tracers/default_tracers.rs | 18 +- .../vm_latest/tracers/evm_deploy_tracer.rs | 105 +++++ .../src/versions/vm_latest/tracers/mod.rs | 2 + .../types/internals/transaction_data.rs | 19 +- .../vm_latest/types/internals/vm_state.rs | 20 +- .../src/versions/vm_latest/utils/mod.rs | 53 ++- .../vm_latest/utils/transaction_encoding.rs | 4 +- core/lib/multivm/src/versions/vm_latest/vm.rs | 18 +- .../implementation/execution.rs | 1 + .../implementation/execution.rs | 1 + core/lib/protobuf_config/src/chain.rs | 1 + core/lib/protobuf_config/src/genesis.rs | 7 + core/lib/protobuf_config/src/lib.rs | 11 +- .../src/proto/config/genesis.proto | 1 + core/lib/prover_interface/src/inputs.rs | 2 + core/lib/tee_verifier/src/lib.rs | 1 + core/lib/types/src/api/mod.rs | 16 +- core/lib/types/src/commitment/mod.rs | 11 +- core/lib/types/src/commitment/tests/mod.rs | 5 + .../post_boojum_1_5_0_test_with_evm.json | 359 ++++++++++++++++++ core/lib/types/src/l2/mod.rs | 4 +- core/lib/types/src/lib.rs | 13 +- core/lib/types/src/protocol_upgrade.rs | 32 +- core/lib/types/src/storage/mod.rs | 11 +- core/lib/types/src/system_contracts.rs | 51 ++- core/lib/types/src/transaction_request.rs | 56 ++- core/lib/types/src/tx/execute.rs | 26 +- core/lib/vm_executor/src/oneshot/block.rs | 24 +- core/lib/vm_executor/src/oneshot/contracts.rs | 16 +- core/lib/vm_executor/src/oneshot/mock.rs | 1 + core/lib/vm_executor/src/storage.rs | 6 +- .../lib/vm_interface/src/storage/in_memory.rs | 2 +- .../src/types/outputs/execution_result.rs | 6 + .../src/types/outputs/finished_l1batch.rs | 1 + .../src/execution_sandbox/execute.rs | 8 +- .../api_server/src/execution_sandbox/mod.rs | 19 +- .../api_server/src/execution_sandbox/tests.rs | 49 ++- .../src/tx_sender/gas_estimation.rs | 13 +- core/node/api_server/src/tx_sender/mod.rs | 10 +- .../api_server/src/tx_sender/tests/call.rs | 2 +- .../src/tx_sender/tests/gas_estimation.rs | 18 +- .../api_server/src/tx_sender/tests/mod.rs | 5 + .../api_server/src/tx_sender/tests/send_tx.rs | 21 +- .../api_server/src/web3/namespaces/debug.rs | 6 +- .../node/api_server/src/web3/namespaces/en.rs | 4 + .../api_server/src/web3/namespaces/eth.rs | 22 +- .../api_server/src/web3/namespaces/zks.rs | 38 +- core/node/api_server/src/web3/state.rs | 13 +- core/node/api_server/src/web3/tests/vm.rs | 84 +++- core/node/commitment_generator/src/lib.rs | 1 + core/node/consensus/src/batch.rs | 2 +- core/node/consensus/src/storage/testonly.rs | 2 +- core/node/eth_sender/src/eth_tx_aggregator.rs | 56 ++- core/node/eth_sender/src/tester.rs | 10 +- core/node/eth_sender/src/tests.rs | 154 ++++++-- core/node/eth_sender/src/zksync_functions.rs | 4 + core/node/eth_watch/src/tests.rs | 18 +- core/node/genesis/src/lib.rs | 12 +- core/node/genesis/src/utils.rs | 3 +- core/node/node_sync/src/external_io.rs | 15 + core/node/node_sync/src/genesis.rs | 23 +- core/node/node_sync/src/tests.rs | 8 + core/node/proof_data_handler/src/tests.rs | 1 + core/node/state_keeper/src/executor/mod.rs | 2 +- .../state_keeper/src/executor/tests/tester.rs | 2 +- core/node/state_keeper/src/io/persistence.rs | 3 +- core/node/state_keeper/src/io/tests/mod.rs | 6 +- core/node/state_keeper/src/io/tests/tester.rs | 2 +- core/node/state_keeper/src/keeper.rs | 9 +- .../state_keeper/src/seal_criteria/mod.rs | 3 + core/node/state_keeper/src/testonly/mod.rs | 1 + .../src/testonly/test_batch_executor.rs | 2 + core/node/state_keeper/src/tests/mod.rs | 1 + .../src/updates/l1_batch_updates.rs | 3 + .../src/updates/l2_block_updates.rs | 38 +- core/node/state_keeper/src/updates/mod.rs | 8 +- core/node/test_utils/src/lib.rs | 3 + core/node/vm_runner/src/impls/bwip.rs | 18 + core/node/vm_runner/src/tests/mod.rs | 1 + .../vm_runner/src/tests/output_handler.rs | 1 + core/tests/test_account/src/lib.rs | 7 +- .../contracts/mock-evm/mock-evm.sol | 92 +++++ etc/env/file_based/genesis.yaml | 2 + prover/Cargo.lock | 1 + .../src/rounds/basic_circuits/utils.rs | 7 +- .../forge_interface/deploy_ecosystem/input.rs | 2 + 152 files changed, 2189 insertions(+), 487 deletions(-) rename core/lib/dal/.sqlx/{query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json => query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json} (73%) rename core/lib/dal/.sqlx/{query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json => query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json} (74%) create mode 100644 core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json rename core/lib/dal/.sqlx/{query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json => query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json} (83%) rename core/lib/dal/.sqlx/{query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json => query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json} (81%) rename core/lib/dal/.sqlx/{query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json => query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json} (74%) rename core/lib/dal/.sqlx/{query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json => query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json} (82%) rename core/lib/dal/.sqlx/{query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json => query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json} (80%) rename core/lib/dal/.sqlx/{query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json => query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json} (62%) rename core/lib/dal/.sqlx/{query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json => query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json} (80%) rename core/lib/dal/.sqlx/{query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json => query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json} (69%) rename core/lib/dal/.sqlx/{query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json => query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json} (81%) rename core/lib/dal/.sqlx/{query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json => query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json} (73%) rename core/lib/dal/.sqlx/{query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json => query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json} (80%) rename core/lib/dal/.sqlx/{query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json => query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json} (54%) rename core/lib/dal/.sqlx/{query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json => query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json} (52%) delete mode 100644 core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json rename core/lib/dal/.sqlx/{query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json => query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json} (78%) rename core/lib/dal/.sqlx/{query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json => query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json} (83%) rename core/lib/dal/.sqlx/{query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json => query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json} (52%) rename core/lib/dal/.sqlx/{query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json => query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json} (71%) create mode 100644 core/lib/dal/migrations/20240911161714_evm-simulator.down.sql create mode 100644 core/lib/dal/migrations/20240911161714_evm-simulator.up.sql create mode 100644 core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs create mode 100644 core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs create mode 100644 core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json create mode 100644 etc/contracts-test-data/contracts/mock-evm/mock-evm.sol diff --git a/core/bin/genesis_generator/src/main.rs b/core/bin/genesis_generator/src/main.rs index 4f8200b3af7..2a96cdc6c6c 100644 --- a/core/bin/genesis_generator/src/main.rs +++ b/core/bin/genesis_generator/src/main.rs @@ -87,6 +87,7 @@ async fn generate_new_config( genesis_commitment: None, bootloader_hash: Some(base_system_contracts.bootloader), default_aa_hash: Some(base_system_contracts.default_aa), + evm_emulator_hash: base_system_contracts.evm_emulator, ..genesis_config }; diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 43ac9841c40..8d36f734467 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -71,12 +71,14 @@ pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); let hash = hash_bytecode(&bytecode); + BaseSystemContracts { default_aa: SystemContractCode { code: bytes_to_be_words(bytecode), hash, }, bootloader, + evm_emulator: None, } }); @@ -221,6 +223,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let base_system_smart_contracts = BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, }; let system_env = SystemEnv { diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 7e33f6964bb..c117064dbc4 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -138,6 +138,8 @@ pub struct StateKeeperConfig { pub bootloader_hash: Option, #[deprecated(note = "Use GenesisConfig::default_aa_hash instead")] pub default_aa_hash: Option, + #[deprecated(note = "Use GenesisConfig::evm_emulator_hash instead")] + pub evm_emulator_hash: Option, #[deprecated(note = "Use GenesisConfig::l1_batch_commit_data_generator_mode instead")] #[serde(default)] pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, @@ -178,6 +180,7 @@ impl StateKeeperConfig { protective_reads_persistence_enabled: true, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, } } diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index 6c4bacc3a6e..9e1ffbd87cb 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -17,6 +17,7 @@ pub struct GenesisConfig { pub genesis_commitment: Option, pub bootloader_hash: Option, pub default_aa_hash: Option, + pub evm_emulator_hash: Option, pub l1_chain_id: L1ChainId, pub sl_chain_id: Option, pub l2_chain_id: L2ChainId, @@ -49,6 +50,7 @@ impl GenesisConfig { genesis_commitment: Some(H256::repeat_byte(0x17)), bootloader_hash: Default::default(), default_aa_hash: Default::default(), + evm_emulator_hash: Default::default(), l1_chain_id: L1ChainId(9), sl_chain_id: None, protocol_version: Some(ProtocolSemanticVersion { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 1d90034410b..a6ff30e04a9 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -192,6 +192,7 @@ impl Distribution for EncodeDist { fee_account_addr: None, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: Default::default(), } } @@ -732,6 +733,7 @@ impl Distribution for EncodeDist { genesis_commitment: Some(rng.gen()), bootloader_hash: Some(rng.gen()), default_aa_hash: Some(rng.gen()), + evm_emulator_hash: Some(rng.gen()), fee_account: rng.gen(), l1_chain_id: L1ChainId(self.sample(rng)), sl_chain_id: None, diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 73b4a0ffaaa..fe37ef6c69f 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -130,6 +130,11 @@ pub const CODE_ORACLE_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x12, ]); +pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x13, +]); + /// Note, that the `Create2Factory` is explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a72b5c95d1b..fb28693887a 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -293,6 +293,7 @@ fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) } + /// Hash of code and code which consists of 32 bytes words #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SystemContractCode { @@ -304,18 +305,23 @@ pub struct SystemContractCode { pub struct BaseSystemContracts { pub bootloader: SystemContractCode, pub default_aa: SystemContractCode, + /// Never filled in constructors for now. The only way to get the EVM emulator enabled is to call [`Self::with_evm_emulator()`]. + pub evm_emulator: Option, } #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq)] pub struct BaseSystemContractsHashes { pub bootloader: H256, pub default_aa: H256, + pub evm_emulator: Option, } impl PartialEq for BaseSystemContracts { fn eq(&self, other: &Self) -> bool { self.bootloader.hash == other.bootloader.hash && self.default_aa.hash == other.default_aa.hash + && self.evm_emulator.as_ref().map(|contract| contract.hash) + == other.evm_emulator.as_ref().map(|contract| contract.hash) } } @@ -339,14 +345,27 @@ impl BaseSystemContracts { BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, } } - // BaseSystemContracts with proved bootloader - for handling transactions. + + /// BaseSystemContracts with proved bootloader - for handling transactions. pub fn load_from_disk() -> Self { let bootloader_bytecode = read_proved_batch_bootloader_bytecode(); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + /// Loads the latest EVM emulator for these base system contracts. Logically, it only makes sense to do for the latest protocol version. + pub fn with_latest_evm_emulator(mut self) -> Self { + let bytecode = read_sys_contract_bytecode("", "EvmInterpreter", ContractLanguage::Yul); + let hash = hash_bytecode(&bytecode); + self.evm_emulator = Some(SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }); + self + } + /// BaseSystemContracts with playground bootloader - used for handling eth_calls. pub fn playground() -> Self { let bootloader_bytecode = read_playground_batch_bootloader_bytecode(); @@ -475,6 +494,7 @@ impl BaseSystemContracts { BaseSystemContractsHashes { bootloader: self.bootloader.hash, default_aa: self.default_aa.hash, + evm_emulator: self.evm_emulator.as_ref().map(|contract| contract.hash), } } } diff --git a/core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json b/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json similarity index 73% rename from core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json rename to core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json index 9e212249490..c93e6aef3e7 100644 --- a/core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json +++ b/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -161,6 +166,7 @@ true, true, true, + true, false, true, true, @@ -169,5 +175,5 @@ true ] }, - "hash": "ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa" + "hash": "05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32" } diff --git a/core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json b/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json similarity index 74% rename from core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json rename to core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json index 8bf22e1b6fb..a3d356f4bea 100644 --- a/core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json +++ b/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -165,11 +170,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada" + "hash": "16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870" } diff --git a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json b/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json new file mode 100644 index 00000000000..35c606bf22b --- /dev/null +++ b/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json @@ -0,0 +1,31 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n NOW(),\n NOW()\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Bytea", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "Int8", + "Int8", + "Int8", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04" +} diff --git a/core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json b/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json similarity index 83% rename from core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json rename to core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json index 93d522f5fb7..752e171f58c 100644 --- a/core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json +++ b/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "compressed_state_diffs", - "type_info": "Bytea" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,16 +120,21 @@ }, { "ordinal": 23, - "name": "events_queue_commitment", + "name": "compressed_state_diffs", "type_info": "Bytea" }, { "ordinal": 24, - "name": "bootloader_initial_content_commitment", + "name": "events_queue_commitment", "type_info": "Bytea" }, { "ordinal": 25, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -165,8 +170,9 @@ false, true, true, + true, true ] }, - "hash": "5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb" + "hash": "4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98" } diff --git a/core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json b/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json similarity index 81% rename from core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json rename to core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json index aa7d4c65a39..6f77a656072 100644 --- a/core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json +++ b/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -50,21 +50,26 @@ }, { "ordinal": 9, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 10, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 11, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 12, "name": "protocol_version!", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 13, "name": "fee_account_address!", "type_info": "Bytea" } @@ -85,11 +90,12 @@ true, true, true, + true, false, false, true, false ] }, - "hash": "778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d" + "hash": "51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6" } diff --git a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json b/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json similarity index 74% rename from core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json rename to core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json index f440a265593..b2f195c4e5c 100644 --- a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json +++ b/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -90,11 +90,16 @@ }, { "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 18, + "ordinal": 19, "name": "fee_account_address", "type_info": "Bytea" } @@ -123,8 +128,9 @@ true, true, true, + true, false ] }, - "hash": "b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad" + "hash": "7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5" } diff --git a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json b/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json similarity index 82% rename from core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json rename to core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json index 4a73fde57e2..28fbea09998 100644 --- a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json +++ b/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -55,16 +55,21 @@ }, { "ordinal": 10, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 11, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 11, + "ordinal": 12, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 12, + "ordinal": 13, "name": "pubdata_input", "type_info": "Bytea" } @@ -86,9 +91,10 @@ true, true, true, + true, false, true ] }, - "hash": "454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd" + "hash": "7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e" } diff --git a/core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json b/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json similarity index 80% rename from core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json rename to core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json index a9eae0bd01d..8f41bf3b491 100644 --- a/core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json +++ b/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "system_logs", - "type_info": "ByteaArray" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,16 +120,21 @@ }, { "ordinal": 23, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -161,12 +166,13 @@ true, true, true, - false, true, true, + false, + true, true, true ] }, - "hash": "60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7" + "hash": "860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192" } diff --git a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json similarity index 62% rename from core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json rename to core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json index 162c722add9..d944b6abf9e 100644 --- a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json +++ b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_versions.evm_emulator_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -25,11 +25,16 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "patch", "type_info": "Int4" }, { - "ordinal": 5, + "ordinal": 6, "name": "snark_wrapper_vk_hash", "type_info": "Bytea" } @@ -44,9 +49,10 @@ false, false, false, + true, false, false ] }, - "hash": "c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7" + "hash": "89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b" } diff --git a/core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json b/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json similarity index 80% rename from core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json rename to core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json index a96d94a5c55..9eb67bb8299 100644 --- a/core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json +++ b/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -160,11 +165,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e" + "hash": "9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24" } diff --git a/core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json b/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json similarity index 69% rename from core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json rename to core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json index 671b56760d6..55d56cc4ab0 100644 --- a/core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json +++ b/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -166,11 +171,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3" + "hash": "9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361" } diff --git a/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json b/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json similarity index 81% rename from core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json rename to core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json index 26a3458bff9..c8c438295e4 100644 --- a/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json +++ b/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -65,26 +65,31 @@ }, { "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 14, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" } @@ -106,11 +111,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded" + "hash": "a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756" } diff --git a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json similarity index 73% rename from core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json rename to core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json index 81ae6c590f9..28ffcc5ae46 100644 --- a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json +++ b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "default_aa_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -105,8 +110,9 @@ false, true, true, + true, true ] }, - "hash": "2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1" + "hash": "a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6" } diff --git a/core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json b/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json similarity index 80% rename from core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json rename to core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json index 0b1daaa10e5..6588ee2f11e 100644 --- a/core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json +++ b/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -164,11 +169,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d" + "hash": "b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731" } diff --git a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json similarity index 54% rename from core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json rename to core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json index 8c41c0ab976..9d9fa72595d 100644 --- a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json +++ b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW())\n ON CONFLICT DO NOTHING\n ", + "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, NOW())\n ON CONFLICT DO NOTHING\n ", "describe": { "columns": [], "parameters": { @@ -9,10 +9,11 @@ "Int8", "Bytea", "Bytea", + "Bytea", "Bytea" ] }, "nullable": [] }, - "hash": "048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016" + "hash": "b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917" } diff --git a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json similarity index 52% rename from core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json rename to core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json index eba36994fb3..2689716c38a 100644 --- a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json +++ b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -12,6 +12,11 @@ "ordinal": 1, "name": "default_account_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -21,8 +26,9 @@ }, "nullable": [ false, - false + false, + true ] }, - "hash": "5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6" + "hash": "bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456" } diff --git a/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json b/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json deleted file mode 100644 index 09e34a7e33a..00000000000 --- a/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Bytea", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4", - "Int8", - "Int8", - "Int8", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93" -} diff --git a/core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json b/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json similarity index 78% rename from core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json rename to core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json index 10e2a76618f..032cf987fc0 100644 --- a/core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json +++ b/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -162,11 +167,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862" + "hash": "da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7" } diff --git a/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json b/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json similarity index 83% rename from core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json rename to core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json index 74a6187e644..700352c1a8b 100644 --- a/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json +++ b/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -65,26 +65,31 @@ }, { "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 14, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" } @@ -108,11 +113,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d" + "hash": "f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8" } diff --git a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json similarity index 52% rename from core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json rename to core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json index d2c999a70d4..4fe32531a3f 100644 --- a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json +++ b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -19,6 +19,7 @@ "Jsonb", "Bytea", "Bytea", + "Bytea", "Int4", "ByteaArray", "Int8Array", @@ -29,5 +30,5 @@ }, "nullable": [] }, - "hash": "9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e" + "hash": "f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950" } diff --git a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json similarity index 71% rename from core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json rename to core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json index 5e9051587bb..1b50a750dac 100644 --- a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json +++ b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -25,6 +25,11 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "upgrade_tx_hash", "type_info": "Bytea" } @@ -39,8 +44,9 @@ false, false, false, + true, true ] }, - "hash": "5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355" + "hash": "f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a" } diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql new file mode 100644 index 00000000000..74ac4e60383 --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE protocol_versions DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE miniblocks DROP COLUMN IF EXISTS evm_emulator_code_hash; diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql new file mode 100644 index 00000000000..43ae361e7ee --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql @@ -0,0 +1,4 @@ +ALTER TABLE protocol_versions ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +-- We need this column in `miniblocks` as well in order to store data for the pending L1 batch +ALTER TABLE miniblocks ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 5b351511a06..59cc557f36e 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -325,6 +325,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -366,6 +367,7 @@ impl BlocksDal<'_, '_> { used_contract_hashes, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, system_logs, pubdata_input @@ -610,6 +612,7 @@ impl BlocksDal<'_, '_> { used_contract_hashes, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, system_logs, storage_refunds, @@ -641,6 +644,7 @@ impl BlocksDal<'_, '_> { $18, $19, $20, + $21, NOW(), NOW() ) @@ -659,6 +663,11 @@ impl BlocksDal<'_, '_> { used_contract_hashes, header.base_system_contracts_hashes.bootloader.as_bytes(), header.base_system_contracts_hashes.default_aa.as_bytes(), + header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), header.protocol_version.map(|v| v as i32), &system_logs, &storage_refunds, @@ -703,6 +712,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -730,6 +740,7 @@ impl BlocksDal<'_, '_> { $15, $16, $17, + $18, NOW(), NOW() ) @@ -752,6 +763,11 @@ impl BlocksDal<'_, '_> { .base_system_contracts_hashes .default_aa .as_bytes(), + l2_block_header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), l2_block_header.protocol_version.map(|v| v as i32), i64::from(l2_block_header.virtual_blocks), l2_block_header.batch_fee_input.fair_pubdata_price() as i64, @@ -780,6 +796,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -820,6 +837,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -1038,6 +1056,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1224,6 +1243,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1304,6 +1324,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1377,6 +1398,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1504,6 +1526,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1568,6 +1591,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1646,6 +1670,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -2695,6 +2720,40 @@ mod tests { .is_err()); } + #[tokio::test] + async fn persisting_evm_emulator_hash() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut l2_block_header = create_l2_block_header(1); + l2_block_header.base_system_contracts_hashes.evm_emulator = Some(H256::repeat_byte(0x23)); + conn.blocks_dal() + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + let mut fetched_block_header = conn + .blocks_dal() + .get_last_sealed_l2_block_header() + .await + .unwrap() + .expect("no block"); + // Batch fee input isn't restored exactly + fetched_block_header.batch_fee_input = l2_block_header.batch_fee_input; + + assert_eq!(fetched_block_header, l2_block_header); + // ...and a sanity check just to be sure + assert!(fetched_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some()); + } + #[tokio::test] async fn loading_l1_batch_header() { let pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 904e167d1a6..c1a1e6765b6 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -673,6 +673,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, miniblocks.protocol_version, miniblocks.fee_account_address FROM @@ -744,7 +745,8 @@ impl BlocksWeb3Dal<'_, '_> { mb.l2_fair_gas_price, mb.fair_pubdata_price, l1_batches.bootloader_code_hash, - l1_batches.default_aa_code_hash + l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash FROM l1_batches INNER JOIN mb ON TRUE diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index f01655d56a9..876dfe14bed 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -244,7 +244,7 @@ impl ProtoRepr for proto::TransactionV25 { }, T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), }; - tx.try_into() + Transaction::from_abi(tx, true) } fn build(tx: &Self::Type) -> Self { diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 36dfaa1a466..857e2973ae3 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -94,6 +94,7 @@ impl FactoryDepsDal<'_, '_> { &mut self, bootloader_hash: H256, default_aa_hash: H256, + evm_emulator_hash: Option, ) -> anyhow::Result { let bootloader_bytecode = self .get_sealed_factory_dep(bootloader_hash) @@ -115,9 +116,26 @@ impl FactoryDepsDal<'_, '_> { code: bytes_to_be_words(default_aa_bytecode), hash: default_aa_hash, }; + + let evm_emulator_code = if let Some(evm_emulator_hash) = evm_emulator_hash { + let evm_emulator_bytecode = self + .get_sealed_factory_dep(evm_emulator_hash) + .await + .context("failed loading EVM emulator code")? + .with_context(|| format!("EVM emulator code with hash {evm_emulator_hash:?} should be present in the database"))?; + + Some(SystemContractCode { + code: bytes_to_be_words(evm_emulator_bytecode), + hash: evm_emulator_hash, + }) + } else { + None + }; + Ok(BaseSystemContracts { bootloader: bootloader_code, default_aa: default_aa_code, + evm_emulator: evm_emulator_code, }) } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 34e14387ca6..7e9a9eca9d4 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -44,6 +44,7 @@ pub(crate) struct StorageL1BatchHeader { pub used_contract_hashes: serde_json::Value, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, // `system_logs` are introduced as part of boojum and will be absent in all batches generated prior to boojum. @@ -82,6 +83,7 @@ impl StorageL1BatchHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self @@ -103,6 +105,7 @@ fn convert_l2_to_l1_logs(raw_logs: Vec>) -> Vec { fn convert_base_system_contracts_hashes( bootloader_code_hash: Option>, default_aa_code_hash: Option>, + evm_emulator_code_hash: Option>, ) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: bootloader_code_hash @@ -111,6 +114,7 @@ fn convert_base_system_contracts_hashes( default_aa: default_aa_code_hash .map(|hash| H256::from_slice(&hash)) .expect("should not be none"), + evm_emulator: evm_emulator_code_hash.map(|hash| H256::from_slice(&hash)), } } @@ -134,6 +138,7 @@ pub(crate) struct StorageL1Batch { pub zkporter_is_available: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub l2_to_l1_messages: Vec>, pub l2_l1_merkle_root: Option>, @@ -177,6 +182,7 @@ impl StorageL1Batch { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self @@ -240,6 +246,10 @@ impl TryFrom for L1BatchMetadata { .default_aa_code_hash .ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?, ), + evm_emulator_code_hash: batch + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), protocol_version: batch .protocol_version .map(|v| (v as u16).try_into().unwrap()), @@ -275,6 +285,7 @@ pub(crate) struct StorageBlockDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: Option, } @@ -320,6 +331,7 @@ impl From for api::BlockDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::BlockDetails { @@ -352,6 +364,7 @@ pub(crate) struct StorageL1BatchDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, } impl From for api::L1BatchDetails { @@ -395,6 +408,7 @@ impl From for api::L1BatchDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::L1BatchDetails { @@ -418,6 +432,7 @@ pub(crate) struct StorageL2BlockHeader { // L2 gas price assumed in the corresponding batch pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, pub fair_pubdata_price: Option, @@ -471,6 +486,7 @@ impl From for L2BlockHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( row.bootloader_code_hash, row.default_aa_code_hash, + row.evm_emulator_code_hash, ), gas_per_pubdata_limit: row.gas_per_pubdata_limit as u64, protocol_version, diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index e53bf7b9d0a..a833236a7b6 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -16,6 +16,7 @@ pub struct StorageProtocolVersion { pub snark_wrapper_vk_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, } pub(crate) fn protocol_version_from_storage( @@ -34,6 +35,10 @@ pub(crate) fn protocol_version_from_storage( base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: H256::from_slice(&storage_version.bootloader_code_hash), default_aa: H256::from_slice(&storage_version.default_account_code_hash), + evm_emulator: storage_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), }, tx, } @@ -45,6 +50,7 @@ pub struct StorageApiProtocolVersion { pub timestamp: i64, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, pub upgrade_tx_hash: Option>, } @@ -60,6 +66,10 @@ impl From for api::ProtocolVersion { storage_protocol_version.timestamp as u64, H256::from_slice(&storage_protocol_version.bootloader_code_hash), H256::from_slice(&storage_protocol_version.default_account_code_hash), + storage_protocol_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), l2_system_upgrade_tx_hash, ) } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 688a6f99790..cf7b76d8163 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -22,6 +22,7 @@ pub(crate) struct StorageSyncBlock { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: i32, pub virtual_blocks: i64, @@ -75,6 +76,12 @@ impl TryFrom for SyncBlock { .decode_column("bootloader_code_hash")?, default_aa: parse_h256_opt(block.default_aa_code_hash.as_deref()) .decode_column("default_aa_code_hash")?, + evm_emulator: block + .evm_emulator_code_hash + .as_deref() + .map(parse_h256) + .transpose() + .decode_column("evm_emulator_code_hash")?, }, fee_account_address: parse_h160(&block.fee_account_address) .decode_column("fee_account_address")?, diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index bb219ee1d61..78daaebb335 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -352,6 +352,16 @@ impl From for TransactionReceipt { .index_in_block .map_or_else(Default::default, U64::from); + // For better compatibility with various clients, we never return `None` recipient address. + let to = storage_receipt + .transfer_to + .or(storage_receipt.execute_contract_address) + .and_then(|addr| { + serde_json::from_value::>(addr) + .expect("invalid address value in the database") + }) + .unwrap_or_else(Address::zero); + let block_hash = H256::from_slice(&storage_receipt.block_hash); TransactionReceipt { transaction_hash: H256::from_slice(&storage_receipt.tx_hash), @@ -361,15 +371,7 @@ impl From for TransactionReceipt { l1_batch_tx_index: storage_receipt.l1_batch_tx_index.map(U64::from), l1_batch_number: storage_receipt.l1_batch_number.map(U64::from), from: H160::from_slice(&storage_receipt.initiator_address), - to: storage_receipt - .transfer_to - .or(storage_receipt.execute_contract_address) - .map(|addr| { - serde_json::from_value::
(addr) - .expect("invalid address value in the database") - }) - // For better compatibility with various clients, we never return null. - .or_else(|| Some(Address::default())), + to: Some(to), cumulative_gas_used: Default::default(), // TODO: Should be actually calculated (SMA-1183). gas_used: { let refunded_gas: U256 = storage_receipt.refunded_gas.into(); @@ -508,6 +510,10 @@ impl StorageApiTransaction { .signature .and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); + let to = serde_json::from_value(self.execute_contract_address) + .ok() + .unwrap_or_default(); + // For legacy and EIP-2930 transactions it is gas price willing to be paid by the sender in wei. // For other transactions it should be the effective gas price if transaction is included in block, // otherwise this value should be set equal to the max fee per gas. @@ -528,7 +534,7 @@ impl StorageApiTransaction { block_number: self.block_number.map(|number| U64::from(number as u64)), transaction_index: self.index_in_block.map(|idx| U64::from(idx as u64)), from: Some(Address::from_slice(&self.initiator_address)), - to: Some(serde_json::from_value(self.execute_contract_address).unwrap()), + to, value: bigdecimal_to_u256(self.value), gas_price: Some(bigdecimal_to_u256(gas_price)), gas: bigdecimal_to_u256(self.gas_limit.unwrap_or_else(BigDecimal::zero)), diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 72ae811ce76..3b500e07a08 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -45,17 +45,22 @@ impl ProtocolVersionsDal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash, created_at ) VALUES - ($1, $2, $3, $4, $5, NOW()) + ($1, $2, $3, $4, $5, $6, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, timestamp as i64, base_system_contracts_hashes.bootloader.as_bytes(), base_system_contracts_hashes.default_aa.as_bytes(), + base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), tx_hash.as_ref().map(H256::as_bytes), ) .instrument("save_protocol_version#minor") @@ -193,7 +198,8 @@ impl ProtocolVersionsDal<'_, '_> { r#" SELECT bootloader_code_hash, - default_account_code_hash + default_account_code_hash, + evm_emulator_code_hash FROM protocol_versions WHERE @@ -212,6 +218,7 @@ impl ProtocolVersionsDal<'_, '_> { .get_base_system_contracts( H256::from_slice(&row.bootloader_code_hash), H256::from_slice(&row.default_account_code_hash), + row.evm_emulator_code_hash.as_deref().map(H256::from_slice), ) .await?; Some(contracts) @@ -232,6 +239,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.timestamp, protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, + protocol_versions.evm_emulator_code_hash, protocol_patches.patch, protocol_patches.snark_wrapper_vk_hash FROM diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index a3a7a162c3d..adc3957f872 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -21,6 +21,7 @@ impl ProtocolVersionsWeb3Dal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash FROM protocol_versions diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index ec6ee0f9281..ab5684007d0 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -50,6 +50,7 @@ impl SyncDal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + miniblocks.evm_emulator_code_hash, miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index dc672fa1f80..bf85008f7b5 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -54,6 +54,7 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { logs_bloom: Default::default(), } } + pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { L1BatchHeader::new( L1BatchNumber(number), @@ -61,6 +62,7 @@ pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: Some(H256::repeat_byte(43)), }, ProtocolVersionId::latest(), ) diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index dcf5f25f104..c2209bb9c93 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -607,6 +607,39 @@ mod tests { ); } + #[tokio::test] + async fn getting_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + let mut tx = mock_l2_transaction(); + tx.execute.contract_address = None; + let tx_hash = tx.hash(); + prepare_transactions(&mut conn, vec![tx.clone()]).await; + + let fetched_tx = conn + .transactions_dal() + .get_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("no transaction"); + let mut fetched_tx = L2Tx::try_from(fetched_tx).unwrap(); + assert_eq!(fetched_tx.execute.contract_address, None); + fetched_tx.raw_bytes = tx.raw_bytes.clone(); + assert_eq!(fetched_tx, tx); + + let web3_tx = conn + .transactions_web3_dal() + .get_transaction_by_position(L2BlockNumber(1), 0, L2ChainId::from(270)) + .await; + let web3_tx = web3_tx.unwrap().expect("no transaction"); + assert_eq!(web3_tx.hash, tx_hash); + assert_eq!(web3_tx.to, None); + } + #[tokio::test] async fn getting_receipts() { let connection_pool = ConnectionPool::::test_pool().await; @@ -621,7 +654,7 @@ mod tests { let tx2 = mock_l2_transaction(); let tx2_hash = tx2.hash(); - prepare_transactions(&mut conn, vec![tx1.clone(), tx2.clone()]).await; + prepare_transactions(&mut conn, vec![tx1, tx2]).await; let mut receipts = conn .transactions_web3_dal() @@ -636,6 +669,31 @@ mod tests { assert_eq!(receipts[1].transaction_hash, tx2_hash); } + #[tokio::test] + async fn getting_receipt_for_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + tx.execute.contract_address = None; + prepare_transactions(&mut conn, vec![tx]).await; + + let receipts = conn + .transactions_web3_dal() + .get_transaction_receipts(&[tx_hash]) + .await + .unwrap(); + assert_eq!(receipts.len(), 1); + let receipt = receipts.into_iter().next().unwrap(); + assert_eq!(receipt.transaction_hash, tx_hash); + assert_eq!(receipt.to, Some(Address::zero())); + } + #[tokio::test] async fn getting_l2_block_transactions() { let connection_pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index a25c593bd88..a125f331496 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -102,6 +102,7 @@ mod tests { default_aa_hash: Some(hash( "0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066", )), + evm_emulator_hash: None, l1_batch_commit_data_generator_mode, max_circuits_per_batch: 24100, protective_reads_persistence_enabled: true, diff --git a/core/lib/env_config/src/genesis.rs b/core/lib/env_config/src/genesis.rs index bf30fd4cc33..55c79eceb50 100644 --- a/core/lib/env_config/src/genesis.rs +++ b/core/lib/env_config/src/genesis.rs @@ -68,6 +68,7 @@ impl FromEnv for GenesisConfig { genesis_commitment: contracts_config.genesis_batch_commitment, bootloader_hash: state_keeper.bootloader_hash, default_aa_hash: state_keeper.default_aa_hash, + evm_emulator_hash: state_keeper.evm_emulator_hash, // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network l1_chain_id: L1ChainId(network_config.network.chain_id().0), sl_chain_id: Some(network_config.network.chain_id()), diff --git a/core/lib/l1_contract_interface/src/multicall3/mod.rs b/core/lib/l1_contract_interface/src/multicall3/mod.rs index 7d922668f94..52df37e0430 100644 --- a/core/lib/l1_contract_interface/src/multicall3/mod.rs +++ b/core/lib/l1_contract_interface/src/multicall3/mod.rs @@ -7,6 +7,7 @@ use zksync_types::{ }; /// Multicall3 contract aggregate method input vector struct. +#[derive(Debug)] pub struct Multicall3Call { pub target: Address, pub allow_failure: bool, @@ -21,6 +22,7 @@ impl Tokenizable for Multicall3Call { self.calldata.into_token(), ]) } + fn from_token(token: Token) -> Result { let Token::Tuple(mut result_token) = token else { return Err(error(&[token], "Multicall3Call")); diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 7d604157d1a..ab418d24cd1 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -37,10 +37,10 @@ once_cell.workspace = true thiserror.workspace = true tracing.workspace = true vise.workspace = true +ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true pretty_assertions.workspace = true zksync_test_account.workspace = true -ethabi.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index ce928e652d7..50bb19938fe 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -47,6 +47,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -103,6 +104,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -158,6 +160,7 @@ impl GlueFrom for crate::interface: circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -227,6 +230,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -259,6 +263,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -307,6 +312,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs index 3cb61b461a4..4c4cffcc687 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -22,6 +22,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -48,6 +49,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -74,6 +76,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 2dc680ba77d..8978d4348ed 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -66,12 +66,14 @@ impl GlueFrom VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -100,12 +102,14 @@ impl logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, TxRevertReason::Halt(halt) => VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -129,6 +133,7 @@ impl GlueFrom { unreachable!("Halt is the only revert reason for VM 5") diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs index 2160c4b56a0..cc199fef941 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs @@ -99,6 +99,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs index d42d1880933..f6e49cd8b14 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs @@ -96,6 +96,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index 79669eddd56..b8b939f8673 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -93,6 +93,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 62fa82f52f2..0447304f69f 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -31,7 +31,7 @@ fn test_get_used_contracts() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build(); - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); // create and push and execute some not-empty factory deps transaction with success status // to check that `get_decommitted_hashes()` updates @@ -50,7 +50,7 @@ fn test_get_used_contracts() { // Note: `Default_AA` will be in the list of used contracts if L2 tx is used assert_eq!( vm.vm.decommitted_hashes().collect::>(), - known_bytecodes_without_aa_code(&vm.vm) + known_bytecodes_without_base_system_contracts(&vm.vm) ); // create push and execute some non-empty factory deps transaction that fails @@ -83,20 +83,26 @@ fn test_get_used_contracts() { for factory_dep in tx2.execute.factory_deps { let hash = hash_bytecode(&factory_dep); let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm).contains(&hash_to_u256)); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).contains(&hash_to_u256)); assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); } } -fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet { - let mut known_bytecodes_without_aa_code = vm +fn known_bytecodes_without_base_system_contracts(vm: &Vm) -> HashSet { + let mut known_bytecodes_without_base_system_contracts = vm .world .bytecode_cache .keys() .cloned() .collect::>(); - known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - known_bytecodes_without_aa_code + known_bytecodes_without_base_system_contracts + .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + let was_removed = + known_bytecodes_without_base_system_contracts.remove(&h256_to_u256(evm_emulator.hash)); + assert!(was_removed); + } + known_bytecodes_without_base_system_contracts } /// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index 88fe2dab5c9..b4448683cf7 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -107,7 +107,7 @@ fn test_require_eip712() { let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. l2_tx.common_data.initiator_address = account_abstraction.address; @@ -157,7 +157,7 @@ fn test_require_eip712() { let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); let transaction: Transaction = l2_tx.into(); diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 10be6d88b04..0c20af57e03 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -639,6 +639,7 @@ impl VmInterface for Vm { total_log_queries: 0, }, refunds: result.refunds, + new_known_factory_deps: None, } } diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index b8242fa7ca8..e70f05f85ef 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -14,6 +14,7 @@ use crate::{ circuits_capacity::circuit_statistic_from_cycles, dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, }, + utils::extract_bytecodes_marked_as_known, vm::Vm, }, HistoryMode, @@ -55,6 +56,10 @@ impl Vm { .then_some(RefundsTracer::new(self.batch_env.clone(), self.subversion)); let mut tx_tracer: DefaultExecutionTracer = DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, + self.system_env + .base_system_smart_contracts + .evm_emulator + .is_some(), execution_mode, mem::take(dispatcher), self.storage.clone(), @@ -95,6 +100,8 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); + let factory_deps_marked_as_known = extract_bytecodes_marked_as_known(&logs.events); + let new_known_factory_deps = self.decommit_bytecodes(&factory_deps_marked_as_known); *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { @@ -102,6 +109,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: Some(new_known_factory_deps), }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs index 98d71efa00f..6dd73866adf 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs @@ -59,7 +59,12 @@ impl Vm { tx: Transaction, with_compression: bool, ) { - let tx: TransactionData = tx.into(); + let use_evm_emulator = self + .system_env + .base_system_smart_contracts + .evm_emulator + .is_some(); + let tx = TransactionData::new(tx, use_evm_emulator); let overhead = tx.overhead_gas(); self.push_raw_transaction(tx, overhead, 0, with_compression); } diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index 0315aa38327..d91fbfdb24d 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -5,9 +5,7 @@ use zk_evm_1_5_0::{ aux_structures::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, - zkevm_opcode_defs::{ - ContractCodeSha256, VersionedHashDef, VersionedHashHeader, VersionedHashNormalizedPreimage, - }, + zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, }; use zksync_types::{H256, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; @@ -166,8 +164,8 @@ impl DecommittmentProcess _monotonic_cycle_counter: u32, mut partial_query: DecommittmentQuery, ) -> anyhow::Result { - let (stored_hash, length) = stored_hash_from_query(&partial_query); - partial_query.decommitted_length = length; + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); if let Some(memory_page) = self .decommitted_code_hashes @@ -178,10 +176,10 @@ impl DecommittmentProcess { partial_query.is_fresh = false; partial_query.memory_page = MemoryPage(memory_page); + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } else { - partial_query.is_fresh = true; if self .decommitted_code_hashes .inner() @@ -190,7 +188,9 @@ impl DecommittmentProcess { self.decommitted_code_hashes .insert(stored_hash, None, partial_query.timestamp); - } + }; + partial_query.is_fresh = true; + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } @@ -204,11 +204,10 @@ impl DecommittmentProcess memory: &mut M, ) -> anyhow::Result>> { assert!(partial_query.is_fresh); - self.decommitment_requests.push((), partial_query.timestamp); - let stored_hash = stored_hash_from_query(&partial_query).0; - + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); // We are fetching a fresh bytecode that we didn't read before. let values = self.get_bytecode(stored_hash, partial_query.timestamp); let page_to_use = partial_query.memory_page; @@ -251,28 +250,49 @@ impl DecommittmentProcess } } -fn concat_header_and_preimage( - header: VersionedHashHeader, - normalized_preimage: VersionedHashNormalizedPreimage, -) -> [u8; 32] { - let mut buffer = [0u8; 32]; +#[derive(Debug)] +// TODO: consider moving this to the zk-evm crate +enum VersionedCodeHash { + ZkEVM(VersionedHashHeader, VersionedHashNormalizedPreimage), + Evm(VersionedHashHeader, VersionedHashNormalizedPreimage), +} - buffer[0..4].copy_from_slice(&header.0); - buffer[4..32].copy_from_slice(&normalized_preimage.0); +impl VersionedCodeHash { + fn from_query(query: &DecommittmentQuery) -> Self { + match query.header.0[0] { + 1 => Self::ZkEVM(query.header, query.normalized_preimage), + 2 => Self::Evm(query.header, query.normalized_preimage), + _ => panic!("Unsupported hash version"), + } + } - buffer -} + /// Returns the hash in the format it is stored in the DB. + fn to_stored_hash(&self) -> U256 { + let (header, preimage) = match self { + Self::ZkEVM(header, preimage) => (header, preimage), + Self::Evm(header, preimage) => (header, preimage), + }; -/// For a given decommitment query, returns a pair of the stored hash as U256 and the length of the preimage in 32-byte words. -fn stored_hash_from_query(partial_query: &DecommittmentQuery) -> (U256, u16) { - let full_hash = - concat_header_and_preimage(partial_query.header, partial_query.normalized_preimage); + let mut hash = [0u8; 32]; + hash[0..4].copy_from_slice(&header.0); + hash[4..32].copy_from_slice(&preimage.0); - let versioned_hash = - ContractCodeSha256::try_deserialize(full_hash).expect("Invalid ContractCodeSha256 hash"); + // Hash[1] is used in both of the versions to denote whether the bytecode is being constructed. + // We ignore this param. + hash[1] = 0; - let stored_hash = H256(ContractCodeSha256::serialize_to_stored(versioned_hash).unwrap()); - let length = versioned_hash.code_length_in_words; + h256_to_u256(H256(hash)) + } - (h256_to_u256(stored_hash), length) + fn get_preimage_length(&self) -> u32 { + // In zkEVM the hash[2..3] denotes the length of the preimage in words, while + // in EVM the hash[2..3] denotes the length of the preimage in bytes. + match self { + Self::ZkEVM(header, _) => { + let length_in_words = header.0[2] as u32 * 256 + header.0[3] as u32; + length_in_words * 32 + } + Self::Evm(header, _) => header.0[2] as u32 * 256 + header.0[3] as u32, + } + } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs new file mode 100644 index 00000000000..ca8157b170d --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -0,0 +1,76 @@ +use ethabi::Token; +use zksync_contracts::read_bytecode; +use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_types::{get_code_key, get_known_code_key, Execute, H256}; +use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::VmInterfaceExt; + +use crate::{ + interface::{storage::InMemoryStorage, TxExecutionMode}, + versions::testonly::default_system_env, + vm_latest::{tests::tester::VmTesterBuilder, utils::hash_evm_bytecode, HistoryEnabled}, +}; + +const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; +const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; + +#[test] +fn tracing_evm_contract_deployment() { + let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); + let mock_deployer_hash = hash_bytecode(&mock_deployer); + let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); + let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); + + // Override + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); + storage.set_value( + get_known_code_key(&mock_deployer_hash), + H256::from_low_u64_be(1), + ); + storage.set_value( + get_code_key(&KNOWN_CODES_STORAGE_ADDRESS), + mock_known_code_storage_hash, + ); + storage.set_value( + get_known_code_key(&mock_known_code_storage_hash), + H256::from_low_u64_be(1), + ); + storage.store_factory_dep(mock_deployer_hash, mock_deployer); + storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); + + let mut system_env = default_system_env(); + // The EVM emulator will not be accessed, so we set it to a dummy value. + system_env.base_system_smart_contracts.evm_emulator = + Some(system_env.base_system_smart_contracts.default_aa.clone()); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + + let args = [Token::Bytes((0..=u8::MAX).collect())]; + let evm_bytecode = ethabi::encode(&args); + let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); + let deploy_tx = account.get_l2_tx_for_execute(execute, None); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Check that the surrogate EVM bytecode was added to the decommitter. + let known_bytecodes = vm.vm.state.decommittment_processor.known_bytecodes.inner(); + let known_evm_bytecode = + be_words_to_bytes(&known_bytecodes[&h256_to_u256(expected_bytecode_hash)]); + assert_eq!(known_evm_bytecode, evm_bytecode); + + let new_known_factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!(new_known_factory_deps.len(), 2); // the deployed EraVM contract + EVM contract + assert_eq!( + new_known_factory_deps[&expected_bytecode_hash], + evm_bytecode + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index ef19717a627..d7cadc54b44 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -41,7 +41,7 @@ fn test_get_used_contracts() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build(); - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); // create and push and execute some not-empty factory deps transaction with success status // to check that `get_used_contracts()` updates @@ -63,7 +63,7 @@ fn test_get_used_contracts() { .get_used_contracts() .into_iter() .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) + known_bytecodes_without_base_system_contracts(&vm.vm) .keys() .cloned() .collect::>() @@ -99,7 +99,7 @@ fn test_get_used_contracts() { for factory_dep in tx2.execute.factory_deps { let hash = hash_bytecode(&factory_dep); let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) + assert!(known_bytecodes_without_base_system_contracts(&vm.vm) .keys() .contains(&hash_to_u256)); assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); @@ -147,19 +147,24 @@ fn test_contract_is_used_right_after_prepare_to_decommit() { assert_eq!(vm.vm.get_used_contracts(), vec![bytecode_hash]); } -fn known_bytecodes_without_aa_code( +fn known_bytecodes_without_base_system_contracts( vm: &Vm, ) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm + let mut known_bytecodes_without_base_system_contracts = vm .state .decommittment_processor .known_bytecodes .inner() .clone(); - known_bytecodes_without_aa_code + known_bytecodes_without_base_system_contracts .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) .unwrap(); - known_bytecodes_without_aa_code + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + known_bytecodes_without_base_system_contracts + .remove(&h256_to_u256(evm_emulator.hash)) + .unwrap(); + } + known_bytecodes_without_base_system_contracts } /// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 0fc12848227..4bb32cdf7ae 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -52,7 +52,7 @@ fn test_l1_tx_execution() { let contract_code = read_test_contract(); let account = &mut vm.rich_accounts[0]; let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); + let tx_data = TransactionData::new(deploy_tx.tx.clone(), false); let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { shard_id: 0, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 1203d61b80b..112be637fe0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -8,6 +8,7 @@ mod call_tracer; mod circuits; mod code_oracle; mod constants; +mod evm_emulator; mod gas_limit; mod get_used_contracts; mod is_write_initial; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 91d78c69a93..6be49367d39 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -61,19 +61,17 @@ fn test_nonce_holder() { // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ) - .into(); - + let tx = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: Some(account.address), + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let mut transaction_data = TransactionData::new(tx, false); transaction_data.signature = vec![test_mode.into()]; vm.vm.push_raw_transaction(transaction_data, 0, 0, true); let result = vm.vm.execute(VmExecutionMode::OneTx); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index cc0085f2025..c00192aa8f1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -62,7 +62,7 @@ fn test_predetermined_refunded_gas() { .with_rich_accounts(vec![account.clone()]) .build(); - let tx: TransactionData = tx.into(); + let tx = TransactionData::new(tx, false); // Overhead let overhead = tx.overhead_gas(); vm.vm diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index a6dc7118005..1f38c6f947e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -98,7 +98,7 @@ fn test_require_eip712() { let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. l2_tx.common_data.initiator_address = account_abstraction.address; @@ -148,7 +148,7 @@ fn test_require_eip712() { let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); let transaction: Transaction = l2_tx.into(); diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs index 6a908c2a73e..2ae5e81a328 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs @@ -13,7 +13,7 @@ use zk_evm_1_5_0::{ zkevm_opcode_defs::{decoding::EncodingModeProduction, Opcode, RetOpcode}, }; -use super::PubdataTracer; +use super::{EvmDeployTracer, PubdataTracer}; use crate::{ glue::GlueInto, interface::{ @@ -38,7 +38,7 @@ use crate::{ }; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. -pub(crate) struct DefaultExecutionTracer { +pub struct DefaultExecutionTracer { tx_has_been_processed: bool, execution_mode: VmExecutionMode, @@ -63,14 +63,18 @@ pub(crate) struct DefaultExecutionTracer { // It only takes into account circuits that are generated for actual execution. It doesn't // take into account e.g circuits produced by the initial bootloader memory commitment. pub(crate) circuits_tracer: CircuitsTracer, + // This tracer is responsible for handling EVM deployments and providing the data to the code decommitter. + pub(crate) evm_deploy_tracer: Option>, subversion: MultiVMSubversion, storage: StoragePtr, _phantom: PhantomData, } impl DefaultExecutionTracer { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( computational_gas_limit: u32, + use_evm_emulator: bool, execution_mode: VmExecutionMode, dispatcher: TracerDispatcher, storage: StoragePtr, @@ -92,6 +96,7 @@ impl DefaultExecutionTracer { pubdata_tracer, ret_from_the_bootloader: None, circuits_tracer: CircuitsTracer::new(), + evm_deploy_tracer: use_evm_emulator.then(EvmDeployTracer::new), storage, _phantom: PhantomData, } @@ -172,6 +177,9 @@ macro_rules! dispatch_tracers { tracer.$function($( $params ),*); } $self.circuits_tracer.$function($( $params ),*); + if let Some(tracer) = &mut $self.evm_deploy_tracer { + tracer.$function($( $params ),*); + } }; } @@ -289,6 +297,12 @@ impl DefaultExecutionTracer { .finish_cycle(state, bootloader_state) .stricter(&result); + if let Some(evm_deploy_tracer) = &mut self.evm_deploy_tracer { + result = evm_deploy_tracer + .finish_cycle(state, bootloader_state) + .stricter(&result); + } + result.stricter(&self.should_stop_execution()) } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs new file mode 100644 index 00000000000..d91ee13a920 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -0,0 +1,105 @@ +use std::{marker::PhantomData, mem}; + +use zk_evm_1_5_0::{ + aux_structures::Timestamp, + tracing::{AfterExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{ + FarCallOpcode, FatPointer, Opcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + }, +}; +use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::storage::StoragePtr; + +use super::{traits::VmTracer, utils::read_pointer}; +use crate::{ + interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, + tracers::dynamic::vm_1_5_0::DynTracer, + vm_latest::{ + utils::hash_evm_bytecode, BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState, + }, +}; + +/// Tracer responsible for collecting information about EVM deploys and providing those +/// to the code decommitter. +#[derive(Debug)] +pub(crate) struct EvmDeployTracer { + tracked_signature: [u8; 4], + pending_bytecodes: Vec>, + _phantom: PhantomData, +} + +impl EvmDeployTracer { + pub(crate) fn new() -> Self { + let tracked_signature = + ethabi::short_signature("publishEVMBytecode", &[ethabi::ParamType::Bytes]); + + Self { + tracked_signature, + pending_bytecodes: vec![], + _phantom: PhantomData, + } + } +} + +impl DynTracer> for EvmDeployTracer { + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &SimpleMemory, + _storage: StoragePtr, + ) { + if !matches!( + data.opcode.variant.opcode, + Opcode::FarCall(FarCallOpcode::Normal) + ) { + return; + }; + + let current = state.vm_local_state.callstack.current; + let from = current.msg_sender; + let to = current.this_address; + if from != CONTRACT_DEPLOYER_ADDRESS || to != KNOWN_CODES_STORAGE_ADDRESS { + return; + } + + let calldata_ptr = + state.vm_local_state.registers[usize::from(CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER)]; + let data = read_pointer(memory, FatPointer::from_u256(calldata_ptr.value)); + if data.len() < 4 { + return; + } + let (signature, data) = data.split_at(4); + if signature != self.tracked_signature { + return; + } + + match ethabi::decode(&[ethabi::ParamType::Bytes], data) { + Ok(decoded) => { + let published_bytecode = decoded.into_iter().next().unwrap().into_bytes().unwrap(); + self.pending_bytecodes.push(published_bytecode); + } + Err(err) => tracing::error!("Unable to decode `publishEVMBytecode` call: {err}"), + } + } +} + +impl VmTracer for EvmDeployTracer { + fn finish_cycle( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &mut BootloaderState, + ) -> TracerExecutionStatus { + for published_bytecode in mem::take(&mut self.pending_bytecodes) { + let hash = hash_evm_bytecode(&published_bytecode); + let as_words = bytes_to_be_words(published_bytecode); + + state.decommittment_processor.populate( + vec![(h256_to_u256(hash), as_words)], + Timestamp(state.local_state.timestamp), + ); + } + TracerExecutionStatus::Continue + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs index fe916e19e8c..82721a32264 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs @@ -1,11 +1,13 @@ pub(crate) use circuits_tracer::CircuitsTracer; pub(crate) use default_tracers::DefaultExecutionTracer; +pub(crate) use evm_deploy_tracer::EvmDeployTracer; pub(crate) use pubdata_tracer::PubdataTracer; pub(crate) use refunds::RefundsTracer; pub(crate) use result_tracer::ResultTracer; pub(crate) mod circuits_tracer; pub(crate) mod default_tracers; +pub(crate) mod evm_deploy_tracer; pub(crate) mod pubdata_tracer; pub(crate) mod refunds; pub(crate) mod result_tracer; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 2ec86eb3cea..90948f2f89f 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -46,8 +46,8 @@ pub(crate) struct TransactionData { pub(crate) raw_bytes: Option>, } -impl From for TransactionData { - fn from(execute_tx: Transaction) -> Self { +impl TransactionData { + pub(crate) fn new(execute_tx: Transaction, use_evm_emulator: bool) -> Self { match execute_tx.common_data { ExecuteTransactionCommon::L2(common_data) => { let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); @@ -62,6 +62,19 @@ impl From for TransactionData { U256::zero() }; + let should_deploy_contract = if execute_tx.execute.contract_address.is_none() { + // Transactions with no `contract_address` should be filtered out by the API server, + // so this is more of a sanity check. + assert!( + use_evm_emulator, + "`execute.contract_address` not set for transaction {:?} with EVM emulation disabled", + common_data.hash() + ); + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + // Ethereum transactions do not sign gas per pubdata limit, and so for them we need to use // some default value. We use the maximum possible value that is allowed by the bootloader // (i.e. we can not use u64::MAX, because the bootloader requires gas per pubdata for such @@ -85,7 +98,7 @@ impl From for TransactionData { value: execute_tx.execute.value, reserved: [ should_check_chain_id, - U256::zero(), + should_deploy_contract, U256::zero(), U256::zero(), ], diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index 6f9522572ad..cb4b13eecdf 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -98,6 +98,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + if let Some(evm_emulator) = &system_env.base_system_smart_contracts.evm_emulator { + decommittment_processor.populate( + vec![(h256_to_u256(evm_emulator.hash), evm_emulator.code.clone())], + Timestamp(0), + ); + } + memory.populate( vec![( BOOTLOADER_CODE_PAGE, @@ -117,6 +124,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + // By convention, default AA is used as a fallback if the EVM emulator is not available. + let evm_emulator_code_hash = system_env + .base_system_smart_contracts + .evm_emulator + .as_ref() + .unwrap_or(&system_env.base_system_smart_contracts.default_aa) + .hash; let mut vm = VmState::empty_state( storage_oracle, memory, @@ -128,11 +142,7 @@ pub(crate) fn new_vm_state( default_aa_code_hash: h256_to_u256( system_env.base_system_smart_contracts.default_aa.hash, ), - // For now, the default account hash is used as the code hash for the EVM simulator. - // In the 1.5.0 version, it is not possible to instantiate EVM bytecode. - evm_simulator_code_hash: h256_to_u256( - system_env.base_system_smart_contracts.default_aa.hash, - ), + evm_simulator_code_hash: h256_to_u256(evm_emulator_code_hash), zkporter_is_available: system_env.zk_porter_available, }, ); diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index 0fb803de5d4..e07d3eda7c4 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,6 +1,57 @@ -/// Utility functions for the VM. +//! Utility functions for the VM. + +use once_cell::sync::Lazy; +use zk_evm_1_5_0::{ + aux_structures::MemoryPage, + sha2, + zkevm_opcode_defs::{BlobSha256Format, VersionedHashLen32}, +}; +use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_vm_interface::VmEvent; + pub mod fee; pub mod l2_blocks; pub(crate) mod logs; pub mod overhead; pub mod transaction_encoding; + +pub(crate) fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { + use sha2::{Digest, Sha256}; + let mut hasher = Sha256::new(); + let len = bytecode.len() as u16; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = BlobSha256Format::VERSION_BYTE; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + H256(output) +} + +pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 2) +} + +/// Extracts all bytecodes marked as known on the system contracts. +pub fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { + static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { + ethabi::long_signature( + "MarkedAsKnown", + &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], + ) + }); + + all_generated_events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == KNOWN_CODES_STORAGE_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE + }) + .map(|event| event.indexed_topics[1]) + .collect() +} diff --git a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs index 86c49a3eb15..ed532f89dbc 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs @@ -10,7 +10,9 @@ pub trait TransactionVmExt { impl TransactionVmExt for Transaction { fn bootloader_encoding_size(&self) -> usize { - let transaction_data: TransactionData = self.clone().into(); + // Since we want to just measure the encoding size, `use_evm_emulator` arg doesn't matter here, + // so we use a more lenient option. + let transaction_data = TransactionData::new(self.clone(), true); transaction_data.into_tokens().len() } } diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 8ccd600a79e..f4cc1580e93 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,10 +1,12 @@ +use std::collections::HashMap; + use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, Transaction, H256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{be_words_to_bytes, h256_to_u256, u256_to_h256}; use crate::{ glue::GlueInto, @@ -79,6 +81,20 @@ impl Vm { self.state.local_state.callstack.current.ergs_remaining } + pub(crate) fn decommit_bytecodes(&self, hashes: &[H256]) -> HashMap> { + let bytecodes = hashes.iter().map(|&hash| { + let bytecode_words = self + .state + .decommittment_processor + .known_bytecodes + .inner() + .get(&h256_to_u256(hash)) + .unwrap_or_else(|| panic!("Bytecode with hash {hash:?} not found")); + (hash, be_words_to_bytes(bytecode_words)) + }); + bytecodes.collect() + } + // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index 8196760a621..9462a89be2a 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -90,6 +90,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index c48d48edd3b..b1ad4d257b7 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -88,6 +88,7 @@ impl Vm { .refund_tracer .map(|r| r.get_refunds()) .unwrap_or_default(), + new_known_factory_deps: None, }; tx_tracer.dispatcher.save_results(&mut result); diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index f91bf07e43f..2f8ac8df07e 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -86,6 +86,7 @@ impl ProtoRepr for proto::StateKeeper { // needed during the initialization from files bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, fee_account_addr: None, l1_batch_commit_data_generator_mode: Default::default(), }) diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 59896aa244d..7ecc768100f 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -75,6 +75,12 @@ impl ProtoRepr for proto::Genesis { .and_then(|x| parse_h256(x)) .context("default_aa_hash")?, ), + evm_emulator_hash: self + .evm_emulator_hash + .as_deref() + .map(parse_h256) + .transpose() + .context("evm_emulator_hash")?, l1_chain_id: required(&self.l1_chain_id) .map(|x| L1ChainId(*x)) .context("l1_chain_id")?, @@ -105,6 +111,7 @@ impl ProtoRepr for proto::Genesis { genesis_protocol_semantic_version: this.protocol_version.map(|x| x.to_string()), default_aa_hash: this.default_aa_hash.map(|x| format!("{:?}", x)), bootloader_hash: this.bootloader_hash.map(|x| format!("{:?}", x)), + evm_emulator_hash: this.evm_emulator_hash.map(|x| format!("{:?}", x)), fee_account: Some(format!("{:?}", this.fee_account)), l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index a0f4d45214f..c89199359aa 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -12,11 +12,14 @@ mod commitment_generator; mod consensus; mod contract_verifier; mod contracts; +mod da_client; mod da_dispatcher; mod database; mod en; mod eth; mod experimental; +mod external_price_api_client; +mod external_proof_integration_api; mod general; mod genesis; mod house_keeper; @@ -25,15 +28,11 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod prover_job_monitor; mod pruning; mod secrets; -mod snapshots_creator; - -mod da_client; -mod external_price_api_client; -mod external_proof_integration_api; -mod prover_job_monitor; mod snapshot_recovery; +mod snapshots_creator; #[cfg(test)] mod tests; mod utils; diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 08cbb954fcb..e3a9a45366f 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -28,5 +28,6 @@ message Genesis { optional Prover prover = 10; optional L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 29; // optional, default to rollup optional string genesis_protocol_semantic_version = 12; // optional; + optional string evm_emulator_hash = 13; // optional; h256 reserved 11; reserved "shared_bridge"; } diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 22a20223c8b..8fe192a5f51 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -144,6 +144,8 @@ pub struct VMRunWitnessInputData { pub protocol_version: ProtocolVersionId, pub bootloader_code: Vec<[u8; 32]>, pub default_account_code_hash: U256, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub evm_emulator_code_hash: Option, pub storage_refunds: Vec, pub pubdata_costs: Vec, pub witness_block_state: WitnessStorageState, diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 68b25416d66..86b563f823e 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -306,6 +306,7 @@ mod tests { code: vec![U256([1; 4])], hash: H256([1; 32]), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 1e5a1b3fe65..103b6de1fb3 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -643,7 +643,7 @@ pub struct ProtocolVersion { /// Verifier configuration #[deprecated] pub verification_keys_hashes: Option, - /// Hashes of base system contracts (bootloader and default account) + /// Hashes of base system contracts (bootloader, default account and evm simulator) #[deprecated] pub base_system_contracts: Option, /// Bootloader code hash @@ -652,6 +652,9 @@ pub struct ProtocolVersion { /// Default account code hash #[serde(rename = "defaultAccountCodeHash")] pub default_account_code_hash: Option, + /// EVM emulator code hash + #[serde(rename = "evmSimulatorCodeHash")] + pub evm_emulator_code_hash: Option, /// L2 Upgrade transaction hash #[deprecated] pub l2_system_upgrade_tx_hash: Option, @@ -667,6 +670,7 @@ impl ProtocolVersion { timestamp: u64, bootloader_code_hash: H256, default_account_code_hash: H256, + evm_emulator_code_hash: Option, l2_system_upgrade_tx_hash: Option, ) -> Self { Self { @@ -677,9 +681,11 @@ impl ProtocolVersion { base_system_contracts: Some(BaseSystemContractsHashes { bootloader: bootloader_code_hash, default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, }), bootloader_code_hash: Some(bootloader_code_hash), default_account_code_hash: Some(default_account_code_hash), + evm_emulator_code_hash, l2_system_upgrade_tx_hash, l2_system_upgrade_tx_hash_new: l2_system_upgrade_tx_hash, } @@ -695,6 +701,13 @@ impl ProtocolVersion { .or_else(|| self.base_system_contracts.map(|hashes| hashes.default_aa)) } + pub fn evm_emulator_code_hash(&self) -> Option { + self.evm_emulator_code_hash.or_else(|| { + self.base_system_contracts + .and_then(|hashes| hashes.evm_emulator) + }) + } + pub fn minor_version(&self) -> Option { self.minor_version.or(self.version_id) } @@ -917,6 +930,7 @@ mod tests { base_system_contracts: Some(Default::default()), bootloader_code_hash: Some(Default::default()), default_account_code_hash: Some(Default::default()), + evm_emulator_code_hash: Some(Default::default()), l2_system_upgrade_tx_hash: Default::default(), l2_system_upgrade_tx_hash_new: Default::default(), }; diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 63d1bad486f..759ee8947ba 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -467,6 +467,7 @@ pub struct L1BatchMetaParameters { pub zkporter_is_available: bool, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: Option, } @@ -482,10 +483,11 @@ impl L1BatchMetaParameters { .protocol_version .map_or(false, |ver| ver.is_post_1_5_0()) { - // EVM simulator hash for now is the same as the default AA hash. - result.extend(self.default_aa_code_hash.as_bytes()); + let evm_emulator_code_hash = self + .evm_emulator_code_hash + .unwrap_or(self.default_aa_code_hash); + result.extend(evm_emulator_code_hash.as_bytes()); } - result } @@ -551,6 +553,7 @@ impl L1BatchCommitment { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: input.common().bootloader_code_hash, default_aa_code_hash: input.common().default_aa_code_hash, + evm_emulator_code_hash: input.common().evm_emulator_code_hash, protocol_version: Some(input.common().protocol_version), }; @@ -653,6 +656,7 @@ pub struct CommitmentCommonInput { pub rollup_root_hash: H256, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: ProtocolVersionId, } @@ -693,6 +697,7 @@ impl CommitmentInput { rollup_root_hash, bootloader_code_hash: base_system_contracts_hashes.bootloader, default_aa_code_hash: base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: base_system_contracts_hashes.evm_emulator, protocol_version, }; if protocol_version.is_pre_boojum() { diff --git a/core/lib/types/src/commitment/tests/mod.rs b/core/lib/types/src/commitment/tests/mod.rs index 34e308cfd0a..33fb0142b04 100644 --- a/core/lib/types/src/commitment/tests/mod.rs +++ b/core/lib/types/src/commitment/tests/mod.rs @@ -50,3 +50,8 @@ fn post_boojum_1_4_2() { fn post_boojum_1_5_0() { run_test("post_boojum_1_5_0_test"); } + +#[test] +fn post_boojum_1_5_0_with_evm() { + run_test("post_boojum_1_5_0_test_with_evm"); +} diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json new file mode 100644 index 00000000000..4e8c0e0814a --- /dev/null +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json @@ -0,0 +1,359 @@ +{ + "input": { + "PostBoojum": { + "common": { + "l2_to_l1_logs": [ + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x7814f203b8e02f6a676b8f7faefcf732d8b4368bab25239ea4525010aa85d5ee", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "rollup_last_leaf_index": 89, + "rollup_root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b", + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "system_logs": [ + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 0, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + }, + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 1, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x00000000000000000000000065c22f8000000000000000000000000065c22f81" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x155c82febe94e07df0065c153e8ed403b5351fd64d657c8dffbfbee8ec3d2ba3" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x85a7fb853512ba6575c99ee121dd560559523a4587a2cd7e83cd359cd9ea2aed" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000008" + } + ], + "state_diffs": [ + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1", + "derived_key": [ + 113, 233, 23, 33, 249, 145, 133, 118, 215, 96, 240, 47, 3, 202, 196, + 124, 111, 64, 3, 49, 96, 49, 132, 142, 60, 29, 153, 230, 232, 58, + 71, 67 + ], + "enumeration_index": 49, + "initial_value": "0x18776f28c303800", + "final_value": "0x708da482cab20760" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x294a00337abeee2b3cd948ffeed92231e2a3acc2eb11210400e0aa9557f23e26", + "derived_key": [ + 45, 90, 105, 98, 204, 206, 229, 212, 173, 180, 138, 54, 187, 191, + 68, 58, 83, 23, 33, 72, 67, 129, 18, 89, 55, 243, 0, 26, 197, 255, + 135, 91 + ], + "enumeration_index": 50, + "initial_value": "0xf5559e28fd66c0", + "final_value": "0xf5a19b324caf80" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, 166, 75, 35, 133, + 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, 64, 49, 220, + 193, 72, 27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x6f05e193353286a0" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x7", + "derived_key": [ + 18, 59, 175, 197, 134, 247, 119, 100, 72, 140, 210, 76, 106, 119, + 84, 110, 90, 15, 232, 189, 251, 79, 162, 3, 207, 175, 252, 54, 204, + 228, 221, 91 + ], + "enumeration_index": 53, + "initial_value": "0x100000000000000000000000065c22e3e", + "final_value": "0x200000000000000000000000065c22f80" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x9", + "derived_key": [ + 142, 125, 208, 106, 197, 183, 59, 71, 59, 230, 188, 90, 81, 3, 15, + 76, 116, 55, 101, 124, 183, 178, 155, 243, 118, 197, 100, 184, 209, + 103, 90, 94 + ], + "enumeration_index": 54, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xd", + "derived_key": [ + 235, 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, + 40, 14, 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, + 247, 152, 97 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xebbe609cd3ccd11f273eb94374d6d3a2f7856c5f1039dc4877c6a334188ac7c1" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xe", + "derived_key": [ + 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, 58, + 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x708e7fcf68ebab6c87322686cac4bcdb5f2bd4c71f337b18d147fd9a6c44ad13" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10c", + "derived_key": [ + 121, 9, 53, 136, 208, 232, 71, 239, 167, 58, 16, 206, 32, 228, 121, + 159, 177, 228, 102, 66, 214, 86, 23, 199, 229, 33, 63, 160, 73, 137, + 217, 45 + ], + "enumeration_index": 57, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xad67d757c34507f157cacfa2e3153e9f260a2244f30428821be7be64587ac55f", + "derived_key": [ + 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, 44, 164, 124, 169, + 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, 57, 36, 22, + 48, 203, 70 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + } + ], + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + }, + "pass_through_data": { + "shared_states": [ + { + "last_leaf_index": 89, + "root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b" + }, + { + "last_leaf_index": 0, + "root_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "meta_parameters": { + "zkporter_is_available": false, + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "auxiliary_output": { + "PostBoojum": { + "common": { + "l2_l1_logs_merkle_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff", + "protocol_version": "Version23" + }, + "system_logs_linear_hash": "0x602dacc0a26e3347f0679924c4ae151ff5200e7dd80902fe0fc11c806c4d3ffb", + "state_diffs_compressed": [ + 1, 0, 1, 72, 4, 0, 4, 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, + 166, 75, 35, 133, 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, + 64, 49, 220, 193, 72, 27, 65, 111, 5, 225, 147, 53, 50, 134, 160, 235, + 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, 40, 14, + 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, 247, 152, 97, + 0, 235, 190, 96, 156, 211, 204, 209, 31, 39, 62, 185, 67, 116, 214, 211, + 162, 247, 133, 108, 95, 16, 57, 220, 72, 119, 198, 163, 52, 24, 138, + 199, 193, 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, + 58, 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216, 0, 112, 142, 127, 207, 104, 235, 171, 108, 135, 50, 38, 134, + 202, 196, 188, 219, 95, 43, 212, 199, 31, 51, 123, 24, 209, 71, 253, + 154, 108, 68, 173, 19, 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, + 44, 164, 124, 169, 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, + 57, 36, 22, 48, 203, 70, 0, 85, 97, 141, 181, 255, 36, 174, 228, 210, + 54, 146, 27, 111, 66, 114, 16, 17, 97, 19, 113, 21, 163, 180, 196, 166, + 95, 134, 119, 177, 36, 192, 28, 0, 0, 0, 49, 65, 111, 6, 45, 144, 62, + 129, 207, 96, 0, 0, 0, 50, 49, 75, 253, 9, 79, 72, 192, 0, 0, 0, 53, + 137, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 54, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 57, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66 + ], + "state_diffs_hash": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0", + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_linear_hashes": [ + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000007", + "0x0000000000000000000000000000000000000000000000000000000000000008", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + }, + "hashes": { + "pass_through_data": "0x6a3ffc0f55d4abce9498b8bcb01a3018bc2b83d96acb27e23772fe9347954725", + "aux_output": "0xadc63d9c45f85598f3e3c232970315d1f6ac96222e379e16ced7a204524a4061", + "meta_parameters": "0x02531e5cc22688523a4ac9317e5097743771f6914015cf1152491cf22084bd58", + "commitment": "0x4fdd8c5b231dfc9fc81aba744a90fbec78627f529ac29f9fc758a7b9e62fa321" + } +} diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 036d2a7a036..48e813e571d 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -216,7 +216,9 @@ impl L2Tx { let raw = req.get_signed_bytes(&sig).context("get_signed_bytes")?; let (req, hash) = TransactionRequest::from_bytes_unverified(&raw).context("from_bytes_unverified()")?; - let mut tx = L2Tx::from_request_unverified(req).context("from_request_unverified()")?; + // Since we allow users to specify `None` recipient, EVM emulation is implicitly enabled. + let mut tx = + L2Tx::from_request_unverified(req, true).context("from_request_unverified()")?; tx.set_input(raw, hash); Ok(tx) } diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 67661eb14ad..a50fc8a655b 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -331,9 +331,14 @@ impl TryFrom for abi::Transaction { } } -impl TryFrom for Transaction { - type Error = anyhow::Error; - fn try_from(tx: abi::Transaction) -> anyhow::Result { +impl Transaction { + /// Converts a transaction from its ABI representation. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables L2 transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_abi(tx: abi::Transaction, allow_no_target: bool) -> anyhow::Result { Ok(match tx { abi::Transaction::L1 { tx, @@ -405,7 +410,7 @@ impl TryFrom for Transaction { abi::Transaction::L2(raw) => { let (req, hash) = transaction_request::TransactionRequest::from_bytes_unverified(&raw)?; - let mut tx = L2Tx::from_request_unverified(req)?; + let mut tx = L2Tx::from_request_unverified(req, allow_no_target)?; tx.set_input(raw, hash); tx.into() } diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index 1afb108a053..48f26dfd5c7 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -62,6 +62,8 @@ pub struct ProtocolUpgrade { pub bootloader_code_hash: Option, /// New default account code hash. pub default_account_code_hash: Option, + /// New EVM emulator code hash + pub evm_emulator_code_hash: Option, /// New verifier params. pub verifier_params: Option, /// New verifier address. @@ -118,17 +120,21 @@ impl ProtocolUpgrade { bootloader_code_hash: (bootloader_hash != H256::zero()).then_some(bootloader_hash), default_account_code_hash: (default_account_hash != H256::zero()) .then_some(default_account_hash), + evm_emulator_code_hash: None, // EVM emulator upgrades are not supported yet verifier_params: (upgrade.verifier_params != abi::VerifierParams::default()) .then_some(upgrade.verifier_params.into()), verifier_address: (upgrade.verifier != Address::zero()).then_some(upgrade.verifier), timestamp: upgrade.upgrade_timestamp.try_into().unwrap(), tx: (upgrade.l2_protocol_upgrade_tx.tx_type != U256::zero()) .then(|| { - Transaction::try_from(abi::Transaction::L1 { - tx: upgrade.l2_protocol_upgrade_tx, - factory_deps: upgrade.factory_deps, - eth_block: 0, - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: upgrade.l2_protocol_upgrade_tx, + factory_deps: upgrade.factory_deps, + eth_block: 0, + }, + true, + ) .context("Transaction::try_from()")? .try_into() .map_err(|err| anyhow::format_err!("try_into::(): {err}")) @@ -149,11 +155,14 @@ pub fn decode_set_chain_id_event( .unwrap_or_else(|_| panic!("Version is not supported, packed version: {full_version_id}")); Ok(( protocol_version, - Transaction::try_from(abi::Transaction::L1 { - tx: tx.into(), - eth_block: 0, - factory_deps: vec![], - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: tx.into(), + eth_block: 0, + factory_deps: vec![], + }, + true, + ) .unwrap() .try_into() .unwrap(), @@ -298,6 +307,9 @@ impl ProtocolVersion { default_aa: upgrade .default_account_code_hash .unwrap_or(self.base_system_contracts_hashes.default_aa), + evm_emulator: upgrade + .evm_emulator_code_hash + .or(self.base_system_contracts_hashes.evm_emulator), }, tx: upgrade.tx, } diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index a30a57bffa5..9ef037dc29b 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -5,7 +5,7 @@ pub use log::*; use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::keccak256, L2ChainId}; pub use zksync_system_constants::*; -use zksync_utils::address_to_h256; +use zksync_utils::{address_to_h256, u256_to_h256}; use crate::{AccountTreeId, Address, H160, H256, U256}; @@ -78,6 +78,10 @@ pub fn get_code_key(account: &Address) -> StorageKey { StorageKey::new(account_code_storage, address_to_h256(account)) } +pub fn get_evm_code_hash_key(account: &Address) -> StorageKey { + get_deployer_key(get_address_mapping_key(account, u256_to_h256(1.into()))) +} + pub fn get_known_code_key(hash: &H256) -> StorageKey { let known_codes_storage = AccountTreeId::new(KNOWN_CODES_STORAGE_ADDRESS); StorageKey::new(known_codes_storage, *hash) @@ -88,6 +92,11 @@ pub fn get_system_context_key(key: H256) -> StorageKey { StorageKey::new(system_context, key) } +pub fn get_deployer_key(key: H256) -> StorageKey { + let deployer_contract = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); + StorageKey::new(deployer_contract, key) +} + pub fn get_is_account_key(account: &Address) -> StorageKey { let deployer = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index a28c45b8fea..4329680991c 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -1,11 +1,11 @@ use std::path::PathBuf; -use once_cell::sync::Lazy; use zksync_basic_types::{AccountTreeId, Address, U256}; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage, SystemContractsRepo}; use zksync_system_constants::{ BOOTLOADER_UTILITIES_ADDRESS, CODE_ORACLE_ADDRESS, COMPRESSOR_ADDRESS, CREATE2_FACTORY_ADDRESS, - EVENT_WRITER_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, PUBDATA_CHUNK_PUBLISHER_ADDRESS, + EVENT_WRITER_ADDRESS, EVM_GAS_MANAGER_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, + PUBDATA_CHUNK_PUBLISHER_ADDRESS, }; use crate::{ @@ -25,7 +25,7 @@ use crate::{ pub const TX_NONCE_INCREMENT: U256 = U256([1, 0, 0, 0]); // 1 pub const DEPLOYMENT_NONCE_INCREMENT: U256 = U256([0, 0, 1, 0]); // 2^128 -static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ +static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 26] = [ ( "", "AccountCodeStorage", @@ -147,6 +147,12 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ COMPLEX_UPGRADER_ADDRESS, ContractLanguage::Sol, ), + ( + "", + "EvmGasManager", + EVM_GAS_MANAGER_ADDRESS, + ContractLanguage::Sol, + ), // For now, only zero address and the bootloader address have empty bytecode at the init // In the future, we might want to set all of the system contracts this way. ("", "EmptyContract", Address::zero(), ContractLanguage::Sol), @@ -170,29 +176,40 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ ), ]; -static SYSTEM_CONTRACTS: Lazy> = Lazy::new(|| { +/// Gets default set of system contracts, based on Cargo workspace location. +pub fn get_system_smart_contracts(use_evm_emulator: bool) -> Vec { SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) - .collect::>() -}); - -/// Gets default set of system contracts, based on Cargo workspace location. -pub fn get_system_smart_contracts() -> Vec { - SYSTEM_CONTRACTS.clone() + .collect() } /// Loads system contracts from a given directory. -pub fn get_system_smart_contracts_from_dir(path: PathBuf) -> Vec { +pub fn get_system_smart_contracts_from_dir( + path: PathBuf, + use_evm_emulator: bool, +) -> Vec { let repo = SystemContractsRepo { root: path }; SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) .collect::>() } diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 5f26b1d6a6a..a8713f301ba 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -809,6 +809,7 @@ impl TransactionRequest { impl L2Tx { pub(crate) fn from_request_unverified( mut value: TransactionRequest, + allow_no_target: bool, ) -> Result { let fee = value.get_fee_data_checked()?; let nonce = value.get_nonce_checked()?; @@ -817,8 +818,7 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; - // TODO: Remove this check when evm equivalence gets enabled - if value.to.is_none() { + if value.to.is_none() && !allow_no_target { return Err(SerializationTransactionError::ToAddressIsNull); } @@ -848,11 +848,18 @@ impl L2Tx { Ok(tx) } + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. pub fn from_request( - value: TransactionRequest, + request: TransactionRequest, max_tx_size: usize, + allow_no_target: bool, ) -> Result { - let tx = Self::from_request_unverified(value)?; + let tx = Self::from_request_unverified(request, allow_no_target)?; tx.check_encoded_size(max_tx_size)?; Ok(tx) } @@ -916,11 +923,19 @@ impl From for TransactionRequest { } } -impl TryFrom for L1Tx { - type Error = SerializationTransactionError; - fn try_from(tx: CallRequest) -> Result { +impl L1Tx { + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_request( + request: CallRequest, + allow_no_target: bool, + ) -> Result { // L1 transactions have no limitations on the transaction size. - let tx: L2Tx = L2Tx::from_request(tx.into(), MAX_ENCODED_TX_SIZE)?; + let tx: L2Tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE, allow_no_target)?; // Note, that while the user has theoretically provided the fee for ETH on L1, // the payment to the operator as well as refunds happen on L2 and so all the ETH @@ -1316,7 +1331,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert!(execute_tx1.is_ok()); let tx2 = TransactionRequest { @@ -1327,7 +1342,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::TooBigNonce @@ -1344,7 +1359,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert_eq!( execute_tx1.unwrap_err(), SerializationTransactionError::MaxFeePerGasNotU64 @@ -1358,7 +1373,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::MaxPriorityFeePerGasNotU64 @@ -1376,7 +1391,7 @@ mod tests { }; let execute_tx3: Result = - L2Tx::from_request(tx3, usize::MAX); + L2Tx::from_request(tx3, usize::MAX, true); assert_eq!( execute_tx3.unwrap_err(), SerializationTransactionError::MaxFeePerPubdataByteNotU64 @@ -1432,7 +1447,7 @@ mod tests { let request = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); assert_matches!( - L2Tx::from_request(request.0, random_tx_max_size), + L2Tx::from_request(request.0, random_tx_max_size, true), Err(SerializationTransactionError::OversizedData(_, _)) ) } @@ -1458,7 +1473,7 @@ mod tests { }; let try_to_l2_tx: Result = - L2Tx::from_request(call_request.into(), random_tx_max_size); + L2Tx::from_request(call_request.into(), random_tx_max_size, true); assert_matches!( try_to_l2_tx, @@ -1483,15 +1498,20 @@ mod tests { access_list: None, eip712_meta: None, }; - let l2_tx = L2Tx::from_request(call_request_with_nonce.clone().into(), MAX_ENCODED_TX_SIZE) - .unwrap(); + let l2_tx = L2Tx::from_request( + call_request_with_nonce.clone().into(), + MAX_ENCODED_TX_SIZE, + true, + ) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(123u32)); let mut call_request_without_nonce = call_request_with_nonce; call_request_without_nonce.nonce = None; let l2_tx = - L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE).unwrap(); + L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE, true) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(0u32)); } diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index c133261bc23..0edece9e46b 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -1,6 +1,7 @@ use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use zksync_utils::ZeroPrefixHexSerde; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_utils::{bytecode::hash_bytecode, ZeroPrefixHexSerde}; use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; @@ -89,8 +90,7 @@ impl Execute { &self.calldata } - /// Prepares calldata to invoke deployer contract. - /// This method encodes parameters for the `create` method. + /// Prepares calldata to invoke deployer contract. This method encodes parameters for the `create` method. pub fn encode_deploy_params_create( salt: H256, contract_hash: H256, @@ -116,4 +116,24 @@ impl Execute { FUNCTION_SIGNATURE.iter().copied().chain(params).collect() } + + /// Creates an instance for deploying the specified bytecode without additional dependencies. If necessary, + /// additional deps can be added to `Self.factory_deps` after this call. + pub fn for_deploy( + salt: H256, + contract_bytecode: Vec, + constructor_input: &[ethabi::Token], + ) -> Self { + let bytecode_hash = hash_bytecode(&contract_bytecode); + Self { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: Self::encode_deploy_params_create( + salt, + bytecode_hash, + ethabi::encode(constructor_input), + ), + value: 0.into(), + factory_deps: vec![contract_bytecode], + } + } } diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index 8ba77305ad7..c820ea794fe 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -133,26 +133,33 @@ impl BlockInfo { let protocol_version = l2_block_header .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()); - + // We cannot use the EVM emulator mentioned in the block as is because of batch vs playground settings etc. + // Instead, we just check whether EVM emulation in general is enabled for a block, and store this binary flag for further use. + let use_evm_emulator = l2_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some(); Ok(ResolvedBlockInfo { state_l2_block_number, state_l2_block_hash: l2_block_header.hash, vm_l1_batch_number, l1_batch_timestamp, protocol_version, + use_evm_emulator, is_pending: self.is_pending_l2_block(), }) } } /// Resolved [`BlockInfo`] containing additional data from VM state. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, state_l2_block_hash: H256, vm_l1_batch_number: L1BatchNumber, l1_batch_timestamp: u64, protocol_version: ProtocolVersionId, + use_evm_emulator: bool, is_pending: bool, } @@ -161,6 +168,14 @@ impl ResolvedBlockInfo { pub fn state_l2_block_number(&self) -> L2BlockNumber { self.state_l2_block_number } + + pub fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } + + pub fn use_evm_emulator(&self) -> bool { + self.use_evm_emulator + } } impl OneshotEnvParameters { @@ -213,7 +228,10 @@ impl OneshotEnvParameters { version: resolved_block_info.protocol_version, base_system_smart_contracts: self .base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version) + .get_by_protocol_version( + resolved_block_info.protocol_version, + resolved_block_info.use_evm_emulator, + ) .clone(), bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, execution_mode, diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index 3b3a65fe30b..0e1fb9b2762 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -26,8 +26,12 @@ pub(super) struct MultiVMBaseSystemContracts { impl MultiVMBaseSystemContracts { /// Gets contracts for a certain version. - pub fn get_by_protocol_version(&self, version: ProtocolVersionId) -> &BaseSystemContracts { - match version { + pub fn get_by_protocol_version( + &self, + version: ProtocolVersionId, + use_evm_emulator: bool, + ) -> BaseSystemContracts { + let base = match version { ProtocolVersionId::Version0 | ProtocolVersionId::Version1 | ProtocolVersionId::Version2 @@ -54,6 +58,14 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version24 | ProtocolVersionId::Version25 => { &self.vm_1_5_0_increased_memory } + }; + let base = base.clone(); + + if version.is_post_1_5_0() && use_evm_emulator { + // EVM emulator is not versioned now; the latest version is always checked out + base.with_latest_evm_emulator() + } else { + base } } diff --git a/core/lib/vm_executor/src/oneshot/mock.rs b/core/lib/vm_executor/src/oneshot/mock.rs index 8f3a12603c1..a7363c633c6 100644 --- a/core/lib/vm_executor/src/oneshot/mock.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -68,6 +68,7 @@ impl MockOneshotExecutor { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }, ) diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index a2369820a5b..fa0e530c190 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -309,7 +309,11 @@ impl L1BatchParamsProvider { let contract_hashes = first_l2_block_in_batch.header.base_system_contracts_hashes; let base_system_contracts = storage .factory_deps_dal() - .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) + .get_base_system_contracts( + contract_hashes.bootloader, + contract_hashes.default_aa, + contract_hashes.evm_emulator, + ) .await .context("failed getting base system contracts")?; diff --git a/core/lib/vm_interface/src/storage/in_memory.rs b/core/lib/vm_interface/src/storage/in_memory.rs index 6a8b5643345..d83f675cd54 100644 --- a/core/lib/vm_interface/src/storage/in_memory.rs +++ b/core/lib/vm_interface/src/storage/in_memory.rs @@ -36,7 +36,7 @@ impl InMemoryStorage { Self::with_custom_system_contracts_and_chain_id( chain_id, bytecode_hasher, - get_system_smart_contracts(), + get_system_smart_contracts(false), ) } diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 3e53aad85f1..018ea075db5 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, @@ -118,6 +120,10 @@ pub struct VmExecutionResultAndLogs { pub logs: VmExecutionLogs, pub statistics: VmExecutionStatistics, pub refunds: Refunds, + /// Bytecodes decommitted during VM execution. `None` if not computed by the VM. + // FIXME: currently, this is only filled up by `vm_latest`; probably makes sense to narrow down + // to *dynamic* factory deps, so that `HashMap::new()` is a valid value for VMs not supporting EVM emulation. + pub new_known_factory_deps: Option>>, } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs index 27241c2c0fa..8f7c1d4fb0d 100644 --- a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs +++ b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs @@ -26,6 +26,7 @@ impl FinishedL1Batch { logs: VmExecutionLogs::default(), statistics: VmExecutionStatistics::default(), refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: vec![], diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index d974f2e9aa1..14ac37e5936 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -175,7 +175,7 @@ impl SandboxExecutor { let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); let resolve_started_at = Instant::now(); let resolve_time = resolve_started_at.elapsed(); - let resolved_block_info = block_args.inner.resolve(&mut connection).await?; + let resolved_block_info = &block_args.resolved; // We don't want to emit too many logs. if resolve_time > Duration::from_millis(10) { tracing::debug!("Resolved block numbers (took {resolve_time:?})"); @@ -185,7 +185,7 @@ impl SandboxExecutor { SandboxAction::Execution { fee_input, tx } => { self.options .eth_call - .to_execute_env(&mut connection, &resolved_block_info, *fee_input, tx) + .to_execute_env(&mut connection, resolved_block_info, *fee_input, tx) .await? } &SandboxAction::Call { @@ -197,7 +197,7 @@ impl SandboxExecutor { .eth_call .to_call_env( &mut connection, - &resolved_block_info, + resolved_block_info, fee_input, enforced_base_fee, ) @@ -210,7 +210,7 @@ impl SandboxExecutor { } => { self.options .estimate_gas - .to_env(&mut connection, &resolved_block_info, fee_input, base_fee) + .to_env(&mut connection, resolved_block_info, fee_input, base_fee) .await? } }; diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index 36f10b8e9b0..b560d161ab5 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -10,7 +10,7 @@ use zksync_multivm::utils::get_eth_call_gas_limit; use zksync_types::{ api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, ProtocolVersionId, U256, }; -use zksync_vm_executor::oneshot::BlockInfo; +use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; use self::vm_metrics::SandboxStage; pub(super) use self::{ @@ -285,21 +285,32 @@ pub enum BlockArgsError { } /// Information about a block provided to VM. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub(crate) struct BlockArgs { inner: BlockInfo, + resolved: ResolvedBlockInfo, block_id: api::BlockId, } impl BlockArgs { pub async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { let inner = BlockInfo::pending(connection).await?; + let resolved = inner.resolve(connection).await?; Ok(Self { inner, + resolved, block_id: api::BlockId::Number(api::BlockNumber::Pending), }) } + pub fn protocol_version(&self) -> ProtocolVersionId { + self.resolved.protocol_version() + } + + pub fn use_evm_emulator(&self) -> bool { + self.resolved.use_evm_emulator() + } + /// Loads block information from DB. pub async fn new( connection: &mut Connection<'_, Core>, @@ -326,8 +337,10 @@ impl BlockArgs { return Err(BlockArgsError::Missing); }; + let inner = BlockInfo::for_existing_block(connection, block_number).await?; Ok(Self { - inner: BlockInfo::for_existing_block(connection, block_number).await?, + inner, + resolved: inner.resolve(connection).await?, block_id, }) } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 306018e1543..75788d48058 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -7,7 +7,7 @@ use test_casing::test_casing; use zksync_dal::ConnectionPool; use zksync_multivm::{interface::ExecutionResult, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api::state_override::{OverrideAccount, StateOverride}, @@ -93,17 +93,6 @@ async fn creating_block_args_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); - assert_eq!( - pending_block_args.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - ); - assert_eq!( - pending_block_args.resolved_block_number(), - snapshot_recovery.l2_block_number + 1 - ); - assert!(pending_block_args.is_pending()); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) .await .unwrap(); @@ -122,6 +111,35 @@ async fn creating_block_args_after_snapshot_recovery() { .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); + // Ensure there is a batch in the storage. + let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); + storage + .blocks_dal() + .insert_l2_block(&l2_block) + .await + .unwrap(); + storage + .blocks_dal() + .insert_mock_l1_batch(&create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1)) + .await + .unwrap(); + storage + .blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(snapshot_recovery.l1_batch_number + 1) + .await + .unwrap(); + + let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); + assert_eq!( + pending_block_args.block_id, + api::BlockId::Number(api::BlockNumber::Pending) + ); + assert_eq!( + pending_block_args.resolved_block_number(), + snapshot_recovery.l2_block_number + 2 + ); + assert!(pending_block_args.is_pending()); + let pruned_blocks = [ api::BlockNumber::Earliest, 0.into(), @@ -147,13 +165,6 @@ async fn creating_block_args_after_snapshot_recovery() { assert_matches!(err, BlockArgsError::Missing); } - let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); - storage - .blocks_dal() - .insert_l2_block(&l2_block) - .await - .unwrap(); - let latest_block_args = BlockArgs::new(&mut storage, latest_block, &start_info) .await .unwrap(); diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index f5e42875a3d..44e568ce418 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -44,13 +44,14 @@ impl TxSender { pub async fn get_txs_fee_in_wei( &self, tx: Transaction, + block_args: BlockArgs, estimated_fee_scale_factor: f64, acceptable_overestimation: u64, state_override: Option, kind: BinarySearchKind, ) -> Result { let estimation_started_at = Instant::now(); - let mut estimator = GasEstimator::new(self, tx, state_override).await?; + let mut estimator = GasEstimator::new(self, tx, block_args, state_override).await?; estimator.adjust_transaction_fee(); let initial_estimate = estimator.initialize().await?; @@ -309,16 +310,10 @@ impl<'a> GasEstimator<'a> { pub(super) async fn new( sender: &'a TxSender, mut transaction: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { - let mut connection = sender.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - let protocol_version = connection - .blocks_dal() - .pending_protocol_version() - .await - .context("failed getting pending protocol version")?; - drop(connection); + let protocol_version = block_args.protocol_version(); let max_gas_limit = get_max_batch_gas_limit(protocol_version.into()); let fee_input = adjust_pubdata_price_for_tx( diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index ad8e38ef3cc..2dbc0d5a0dd 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -280,13 +280,11 @@ impl TxSender { pub async fn submit_tx( &self, tx: L2Tx, + block_args: BlockArgs, ) -> Result<(L2TxSubmissionResult, VmExecutionResultAndLogs), SubmitTxError> { let tx_hash = tx.hash(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::Validate); - let mut connection = self.acquire_replica_connection().await?; - let protocol_version = connection.blocks_dal().pending_protocol_version().await?; - drop(connection); - self.validate_tx(&tx, protocol_version).await?; + self.validate_tx(&tx, block_args.protocol_version()).await?; stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); @@ -305,9 +303,7 @@ impl TxSender { tx: tx.clone(), }; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - let mut connection = self.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - + let connection = self.acquire_replica_connection().await?; let execution_output = self .0 .executor diff --git a/core/node/api_server/src/tx_sender/tests/call.rs b/core/node/api_server/src/tx_sender/tests/call.rs index bdddb8e3895..e43f55b2b9a 100644 --- a/core/node/api_server/src/tx_sender/tests/call.rs +++ b/core/node/api_server/src/tx_sender/tests/call.rs @@ -56,7 +56,7 @@ async fn test_call( mut call: CallRequest, ) -> Result, SubmitTxError> { call.gas = call.gas.max(Some(10_000_000.into())); - let call = L2Tx::from_request(call.into(), usize::MAX).unwrap(); + let call = L2Tx::from_request(call.into(), usize::MAX, true).unwrap(); let mut storage = tx_sender .0 diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs index 086313a8562..3fd5fcb5188 100644 --- a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -25,6 +25,7 @@ const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; async fn initial_gas_estimation_is_somewhat_accurate() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let transfer_value = U256::from(1_000_000_000); @@ -35,7 +36,7 @@ async fn initial_gas_estimation_is_somewhat_accurate() { let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); let tx = alice.create_transfer(transfer_value); - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) .await .unwrap(); estimator.adjust_transaction_fee(); @@ -131,7 +132,8 @@ async fn test_initial_estimate( ) -> VmExecutionResultAndLogs { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) .await .unwrap(); estimator.adjust_transaction_fee(); @@ -153,7 +155,8 @@ async fn test_initial_estimate( async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) .await .unwrap(); estimator.adjust_transaction_fee(); @@ -292,6 +295,7 @@ async fn out_of_gas_during_initial_estimate() { async fn insufficient_funds_error_for_transfer() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let tx = alice.create_transfer(1_000_000_000.into()); @@ -300,6 +304,7 @@ async fn insufficient_funds_error_for_transfer() { let err = tx_sender .get_txs_fee_in_wei( tx.clone().into(), + block_args, fee_scale_factor, 1_000, None, @@ -317,11 +322,13 @@ async fn test_estimating_gas( ) { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let fee_scale_factor = 1.0; let fee = tx_sender .get_txs_fee_in_wei( tx.clone().into(), + block_args.clone(), fee_scale_factor, acceptable_overestimation, Some(state_override.clone()), @@ -339,6 +346,7 @@ async fn test_estimating_gas( let fee = tx_sender .get_txs_fee_in_wei( tx.into(), + block_args, fee_scale_factor, acceptable_overestimation, Some(state_override.clone()), @@ -419,6 +427,7 @@ async fn estimating_gas_for_reverting_tx() { let tx = alice.create_counter_tx(1.into(), true); let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let fee_scale_factor = 1.0; let acceptable_overestimation = 0; @@ -426,6 +435,7 @@ async fn estimating_gas_for_reverting_tx() { let err = tx_sender .get_txs_fee_in_wei( tx.clone().into(), + block_args.clone(), fee_scale_factor, acceptable_overestimation, Some(state_override.clone()), @@ -447,6 +457,7 @@ async fn estimating_gas_for_infinite_loop_tx() { let tx = alice.create_infinite_loop_tx(); let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let fee_scale_factor = 1.0; let acceptable_overestimation = 0; @@ -454,6 +465,7 @@ async fn estimating_gas_for_infinite_loop_tx() { let err = tx_sender .get_txs_fee_in_wei( tx.clone().into(), + block_args.clone(), fee_scale_factor, acceptable_overestimation, Some(state_override.clone()), diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs index 3d48e320abc..cacd616202d 100644 --- a/core/node/api_server/src/tx_sender/tests/mod.rs +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -159,3 +159,8 @@ async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { .await .0 } + +async fn pending_block_args(tx_sender: &TxSender) -> BlockArgs { + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + BlockArgs::pending(&mut storage).await.unwrap() +} diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs index 678b88dab94..fdd63254cf0 100644 --- a/core/node/api_server/src/tx_sender/tests/send_tx.rs +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -42,8 +42,9 @@ async fn submitting_tx_requires_one_connection() { }); let tx_executor = SandboxExecutor::mock(tx_executor).await; let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let block_args = pending_block_args(&tx_sender).await; - let submission_result = tx_sender.submit_tx(tx).await.unwrap(); + let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!(submission_result.0, L2TxSubmissionResult::Added); let mut storage = pool.connection().await.unwrap(); @@ -178,6 +179,7 @@ async fn fee_validation_errors() { async fn sending_transfer() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); // Manually set sufficient balance for the tx initiator. @@ -189,7 +191,7 @@ async fn sending_transfer() { drop(storage); let transfer = alice.create_transfer(1_000_000_000.into()); - let (sub_result, vm_result) = tx_sender.submit_tx(transfer).await.unwrap(); + let (sub_result, vm_result) = tx_sender.submit_tx(transfer, block_args).await.unwrap(); assert_matches!(sub_result, L2TxSubmissionResult::Added); assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); } @@ -198,11 +200,12 @@ async fn sending_transfer() { async fn sending_transfer_with_insufficient_balance() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let transfer_value = 1_000_000_000.into(); let transfer = alice.create_transfer(transfer_value); - let err = tx_sender.submit_tx(transfer).await.unwrap_err(); + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); assert_matches!( err, SubmitTxError::NotEnoughBalanceForFeeValue(balance, _, value) if balance.is_zero() @@ -214,6 +217,7 @@ async fn sending_transfer_with_insufficient_balance() { async fn sending_transfer_with_incorrect_signature() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let transfer_value = 1_000_000_000.into(); @@ -226,7 +230,7 @@ async fn sending_transfer_with_incorrect_signature() { let mut transfer = alice.create_transfer(transfer_value); transfer.execute.value = transfer_value / 2; // This should invalidate tx signature - let err = tx_sender.submit_tx(transfer).await.unwrap_err(); + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); assert_matches!(err, SubmitTxError::ValidationFailed(_)); } @@ -235,6 +239,7 @@ async fn sending_transfer_with_incorrect_signature() { async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParams) { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); @@ -246,7 +251,7 @@ async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParam drop(storage); let tx = alice.create_load_test_tx(tx_params); - let (sub_result, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + let (sub_result, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!(sub_result, L2TxSubmissionResult::Added); assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); } @@ -255,6 +260,7 @@ async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParam async fn sending_reverting_transaction() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); @@ -266,7 +272,7 @@ async fn sending_reverting_transaction() { drop(storage); let tx = alice.create_counter_tx(1.into(), true); - let (_, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!( vm_result.result, ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") @@ -277,6 +283,7 @@ async fn sending_reverting_transaction() { async fn sending_transaction_out_of_gas() { let pool = ConnectionPool::::constrained_test_pool(1).await; let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; let alice = K256PrivateKey::random(); let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); @@ -288,6 +295,6 @@ async fn sending_transaction_out_of_gas() { drop(storage); let tx = alice.create_infinite_loop_tx(); - let (_, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); } diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 71560e4ddb8..7e99808dbc7 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -259,7 +259,11 @@ impl DebugNamespace { }; let call_overrides = request.get_call_overrides()?; - let call = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; + let call = L2Tx::from_request( + request.into(), + MAX_ENCODED_TX_SIZE, + false, // Even with EVM emulation enabled, calls must specify `to` field + )?; let vm_permit = self .state diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index a412c064fac..721ca985ceb 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -177,6 +177,10 @@ impl EnNamespace { genesis_commitment: Some(genesis_batch.metadata.commitment), bootloader_hash: Some(genesis_batch.header.base_system_contracts_hashes.bootloader), default_aa_hash: Some(genesis_batch.header.base_system_contracts_hashes.default_aa), + evm_emulator_hash: genesis_batch + .header + .base_system_contracts_hashes + .evm_emulator, l1_chain_id: self.state.api_config.l1_chain_id, sl_chain_id: Some(self.state.api_config.l1_chain_id.into()), l2_chain_id: self.state.api_config.l2_chain_id, diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 1d60d839e4e..44362dd098e 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -19,6 +19,7 @@ use zksync_web3_decl::{ }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, @@ -77,7 +78,11 @@ impl EthNamespace { drop(connection); let call_overrides = request.get_call_overrides()?; - let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; + let tx = L2Tx::from_request( + request.into(), + self.state.api_config.max_tx_size, + false, // Even with EVM emulation enabled, calls must specify `to` field + )?; // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. let call_result: Vec = self @@ -108,10 +113,13 @@ impl EthNamespace { let is_eip712 = request_with_gas_per_pubdata_overridden .eip712_meta .is_some(); - + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx: L2Tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // The user may not include the proper transaction type during the estimation of @@ -137,6 +145,7 @@ impl EthNamespace { .tx_sender .get_txs_fee_in_wei( tx.into(), + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -619,10 +628,15 @@ impl EthNamespace { } pub async fn send_raw_transaction_impl(&self, tx_bytes: Bytes) -> Result { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|_| hash).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 2192f11eb14..bcfd7daf346 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, convert::TryInto}; +use std::collections::HashMap; use anyhow::Context as _; use zksync_dal::{Connection, Core, CoreDal, DalError}; @@ -30,6 +30,7 @@ use zksync_web3_decl::{ }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, RpcState}, @@ -63,16 +64,21 @@ impl ZksNamespace { eip712_meta.gas_per_pubdata = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); } + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // When we're estimating fee, we are trying to deduce values related to fee, so we should // not consider provided ones. tx.common_data.fee.max_priority_fee_per_gas = 0u64.into(); tx.common_data.fee.gas_per_pubdata_limit = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); - self.estimate_fee(tx.into(), state_override).await + self.estimate_fee(tx.into(), block_args, state_override) + .await } pub async fn estimate_l1_to_l2_gas_impl( @@ -89,17 +95,25 @@ impl ZksNamespace { } } - let tx: L1Tx = request_with_gas_per_pubdata_overridden - .try_into() - .map_err(Web3Error::SerializationError)?; - - let fee = self.estimate_fee(tx.into(), state_override).await?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let tx = L1Tx::from_request( + request_with_gas_per_pubdata_overridden, + block_args.use_evm_emulator(), + ) + .map_err(Web3Error::SerializationError)?; + + let fee = self + .estimate_fee(tx.into(), block_args, state_override) + .await?; Ok(fee.gas_limit) } async fn estimate_fee( &self, tx: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { let scale_factor = self.state.api_config.estimate_gas_scale_factor; @@ -112,6 +126,7 @@ impl ZksNamespace { .tx_sender .get_txs_fee_in_wei( tx, + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -583,10 +598,15 @@ impl ZksNamespace { &self, tx_bytes: Bytes, ) -> Result<(H256, VmExecutionResultAndLogs), Web3Error> { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|result| (hash, result.1)).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 723661ab908..18c206eaf58 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -245,12 +245,19 @@ pub(crate) struct RpcState { } impl RpcState { - pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { + pub fn parse_transaction_bytes( + &self, + bytes: &[u8], + block_args: &BlockArgs, + ) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id)?; - Ok(( - L2Tx::from_request(tx_request, self.api_config.max_tx_size)?, + L2Tx::from_request( + tx_request, + self.api_config.max_tx_size, + block_args.use_evm_emulator(), + )?, hash, )) } diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index d8086c6c6ad..e29ea246213 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -257,12 +257,12 @@ struct SendRawTransactionTest { } impl SendRawTransactionTest { - fn transaction_bytes_and_hash() -> (Vec, H256) { + fn transaction_bytes_and_hash(include_to: bool) -> (Vec, H256) { let private_key = Self::private_key(); let tx_request = api::TransactionRequest { chain_id: Some(L2ChainId::default().as_u64()), from: Some(private_key.address()), - to: Some(Address::repeat_byte(2)), + to: include_to.then(|| Address::repeat_byte(2)), value: 123_456.into(), gas: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(), gas_price: StateKeeperConfig::for_tests().minimal_l2_gas_price.into(), @@ -313,7 +313,7 @@ impl HttpTest for SendRawTransactionTest { L2BlockNumber(1) }; tx_executor.set_tx_responses(move |tx, env| { - assert_eq!(tx.hash(), Self::transaction_bytes_and_hash().1); + assert_eq!(tx.hash(), Self::transaction_bytes_and_hash(true).1); assert_eq!(env.l1_batch.first_l2_block.number, pending_block.0); ExecutionResult::Success { output: vec![] } }); @@ -334,7 +334,7 @@ impl HttpTest for SendRawTransactionTest { .await?; } - let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(true); let send_result = client.send_raw_transaction(tx_bytes.into()).await?; assert_eq!(send_result, tx_hash); Ok(()) @@ -357,6 +357,50 @@ async fn send_raw_transaction_after_snapshot_recovery() { .await; } +fn assert_null_to_address_error(error: &ClientError) { + if let ClientError::Call(error) = error { + assert_eq!(error.code(), 3); + assert!(error.message().contains("toAddressIsNull"), "{error:?}"); + assert!(error.data().is_none(), "{error:?}"); + } else { + panic!("Unexpected error: {error:?}"); + } +} + +#[derive(Debug)] +struct SendRawTransactionWithoutToAddressTest; + +#[async_trait] +impl HttpTest for SendRawTransactionWithoutToAddressTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, _) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let err = client + .send_raw_transaction(tx_bytes.into()) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_fails_without_to_address() { + test_http_server(SendRawTransactionWithoutToAddressTest).await; +} + #[derive(Debug)] struct SendTransactionWithDetailedOutputTest; @@ -405,7 +449,7 @@ impl SendTransactionWithDetailedOutputTest { impl HttpTest for SendTransactionWithDetailedOutputTest { fn transaction_executor(&self) -> MockOneshotExecutor { let mut tx_executor = MockOneshotExecutor::default(); - let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(); + let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(true); let vm_execution_logs = VmExecutionLogs { storage_logs: self.storage_logs(), events: self.vm_events(), @@ -423,6 +467,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { logs: vm_execution_logs.clone(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }); tx_executor @@ -443,7 +488,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { ) .await?; - let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); let send_result = client .send_raw_transaction_with_detailed_output(tx_bytes.into()) .await?; @@ -835,3 +880,30 @@ async fn estimate_gas_with_state_override() { let inner = EstimateGasTest::new(false); test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } + +#[derive(Debug)] +struct EstimateGasWithoutToAddessTest; + +#[async_trait] +impl HttpTest for EstimateGasWithoutToAddessTest { + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut l2_transaction = create_l2_transaction(10, 100); + l2_transaction.execute.contract_address = None; + l2_transaction.common_data.signature = vec![]; // Remove invalidated signature so that it doesn't trip estimation logic + let err = client + .estimate_gas(l2_transaction.clone().into(), None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[tokio::test] +async fn estimate_gas_fails_without_to_address() { + test_http_server(EstimateGasWithoutToAddessTest).await; +} diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index 6cb14cfda53..cf6971b041c 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -176,6 +176,7 @@ impl CommitmentGenerator { rollup_root_hash: tree_data.hash, bootloader_code_hash: header.base_system_contracts_hashes.bootloader, default_aa_code_hash: header.base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: header.base_system_contracts_hashes.evm_emulator, protocol_version, }; let touched_slots = connection diff --git a/core/node/consensus/src/batch.rs b/core/node/consensus/src/batch.rs index 08246c4e5c0..af38f446c1b 100644 --- a/core/node/consensus/src/batch.rs +++ b/core/node/consensus/src/batch.rs @@ -261,7 +261,7 @@ impl L1BatchWithWitness { // TODO: make consensus payload contain `abi::Transaction` instead. // TODO: currently the payload doesn't contain the block number, which is // annoying. Consider adding it to payload. - let t2: Transaction = abi::Transaction::try_from(t.clone())?.try_into()?; + let t2 = Transaction::from_abi(abi::Transaction::try_from(t.clone())?, true)?; anyhow::ensure!(t == &t2); hasher.push_tx_hash(t.hash()); } diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 65c464d98b9..5817e766c6b 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -57,7 +57,7 @@ pub(crate) fn mock_genesis_params(protocol_version: ProtocolVersionId) -> Genesi GenesisParams::from_genesis_config( cfg, BaseSystemContracts::load_from_disk(), - get_system_smart_contracts(), + get_system_smart_contracts(false), ) .unwrap() } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 6e9e71d74ea..a08d16f456a 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -144,19 +144,19 @@ impl EthTxAggregator { } pub(super) async fn get_multicall_data(&mut self) -> Result { - let calldata = self.generate_calldata_for_multicall(); + let (calldata, evm_emulator_hash_requested) = self.generate_calldata_for_multicall(); let args = CallFunctionArgs::new(&self.functions.aggregate3.name, calldata).for_contract( self.l1_multicall3_address, &self.functions.multicall_contract, ); let aggregate3_result: Token = args.call((*self.eth_client).as_ref()).await?; - self.parse_multicall_data(aggregate3_result) + self.parse_multicall_data(aggregate3_result, evm_emulator_hash_requested) } // Multicall's aggregate function accepts 1 argument - arrays of different contract calls. // The role of the method below is to tokenize input for multicall, which is actually a vector of tokens. // Each token describes a specific contract call. - pub(super) fn generate_calldata_for_multicall(&self) -> Vec { + pub(super) fn generate_calldata_for_multicall(&self) -> (Vec, bool) { const ALLOW_FAILURE: bool = false; // First zksync contract call @@ -215,14 +215,31 @@ impl EthTxAggregator { calldata: get_protocol_version_input, }; - // Convert structs into tokens and return vector with them - vec![ + let mut token_vec = vec![ get_bootloader_hash_call.into_token(), get_default_aa_hash_call.into_token(), get_verifier_params_call.into_token(), get_verifier_call.into_token(), get_protocol_version_call.into_token(), - ] + ]; + + let mut evm_emulator_hash_requested = false; + let get_l2_evm_emulator_hash_input = self + .functions + .get_evm_emulator_bytecode_hash + .as_ref() + .and_then(|f| f.encode_input(&[]).ok()); + if let Some(input) = get_l2_evm_emulator_hash_input { + let call = Multicall3Call { + target: self.state_transition_chain_contract, + allow_failure: ALLOW_FAILURE, + calldata: input, + }; + token_vec.insert(2, call.into_token()); + evm_emulator_hash_requested = true; + } + + (token_vec, evm_emulator_hash_requested) } // The role of the method below is to de-tokenize multicall call's result, which is actually a token. @@ -230,6 +247,7 @@ impl EthTxAggregator { pub(super) fn parse_multicall_data( &self, token: Token, + evm_emulator_hash_requested: bool, ) -> Result { let parse_error = |tokens: &[Token]| { Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( @@ -238,8 +256,9 @@ impl EthTxAggregator { }; if let Token::Array(call_results) = token { - // 5 calls are aggregated in multicall - if call_results.len() != 5 { + let number_of_calls = if evm_emulator_hash_requested { 6 } else { 5 }; + // 5 or 6 calls are aggregated in multicall + if call_results.len() != number_of_calls { return parse_error(&call_results); } let mut call_results_iterator = call_results.into_iter(); @@ -268,12 +287,31 @@ impl EthTxAggregator { ))); } let default_aa = H256::from_slice(&multicall3_default_aa); + + let evm_emulator = if evm_emulator_hash_requested { + let multicall3_evm_emulator = + Multicall3Result::from_token(call_results_iterator.next().unwrap())? + .return_data; + if multicall3_evm_emulator.len() != 32 { + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( + "multicall3 EVM emulator hash data is not of the len of 32: {:?}", + multicall3_evm_emulator + ), + ))); + } + Some(H256::from_slice(&multicall3_evm_emulator)) + } else { + None + }; + let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader, default_aa, + evm_emulator, }; - call_results_iterator.next().unwrap(); + call_results_iterator.next().unwrap(); // FIXME: why is this value requested? let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 9be1384daae..86a8c477f9f 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -23,6 +23,8 @@ use crate::{ Aggregator, EthTxAggregator, EthTxManager, }; +pub(super) const STATE_TRANSITION_CONTRACT_ADDRESS: Address = Address::repeat_byte(0xa0); + // Alias to conveniently call static methods of `ETHSender`. type MockEthTxManager = EthTxManager; @@ -172,7 +174,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -192,7 +194,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); l2_gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -212,7 +214,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway_blobs.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -261,7 +263,7 @@ impl EthSenderTester { // ZKsync contract address Address::random(), contracts_config.l1_multicall3_addr, - Address::random(), + STATE_TRANSITION_CONTRACT_ADDRESS, Default::default(), custom_commit_sender_addr, SettlementMode::SettlesToL1, diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index e03532458f1..9e844a8b853 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -1,7 +1,9 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_l1_contract_interface::i_executor::methods::ExecuteBatches; +use zksync_l1_contract_interface::{ + i_executor::methods::ExecuteBatches, multicall3::Multicall3Call, Tokenizable, +}; use zksync_node_test_utils::create_l1_batch; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -9,16 +11,19 @@ use zksync_types::{ commitment::{ L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata, }, + ethabi, ethabi::Token, helpers::unix_timestamp_ms, + web3, web3::contract::Error, - ProtocolVersionId, H256, + Address, ProtocolVersionId, H256, }; use crate::{ abstract_l1_interface::OperatorType, aggregated_operations::AggregatedOperation, - tester::{EthSenderTester, TestL1Batch}, + tester::{EthSenderTester, TestL1Batch, STATE_TRANSITION_CONTRACT_ADDRESS}, + zksync_functions::ZkSyncFunctions, EthSenderError, }; @@ -37,21 +42,59 @@ const COMMITMENT_MODES: [L1BatchCommitmentMode; 2] = [ L1BatchCommitmentMode::Validium, ]; -pub(crate) fn mock_multicall_response() -> Token { - Token::Array(vec![ - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 96])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 32])]), - Token::Tuple(vec![ - Token::Bool(true), - Token::Bytes( +pub(crate) fn mock_multicall_response(call: &web3::CallRequest) -> Token { + let functions = ZkSyncFunctions::default(); + let evm_emulator_getter_signature = functions + .get_evm_emulator_bytecode_hash + .as_ref() + .map(ethabi::Function::short_signature); + let bootloader_signature = functions.get_l2_bootloader_bytecode_hash.short_signature(); + let default_aa_signature = functions + .get_l2_default_account_bytecode_hash + .short_signature(); + let evm_emulator_getter_signature = evm_emulator_getter_signature.as_ref().map(|sig| &sig[..]); + + let calldata = &call.data.as_ref().expect("no calldata").0; + assert_eq!(calldata[..4], functions.aggregate3.short_signature()); + let mut tokens = functions + .aggregate3 + .decode_input(&calldata[4..]) + .expect("invalid multicall"); + assert_eq!(tokens.len(), 1); + let Token::Array(tokens) = tokens.pop().unwrap() else { + panic!("Unexpected input: {tokens:?}"); + }; + + let calls = tokens.into_iter().map(Multicall3Call::from_token); + let response = calls.map(|call| { + let call = call.unwrap(); + assert_eq!(call.target, STATE_TRANSITION_CONTRACT_ADDRESS); + let output = match &call.calldata[..4] { + selector if selector == bootloader_signature => { + vec![1u8; 32] + } + selector if selector == default_aa_signature => { + vec![2u8; 32] + } + selector if Some(selector) == evm_emulator_getter_signature => { + vec![3u8; 32] + } + selector if selector == functions.get_verifier_params.short_signature() => { + vec![4u8; 96] + } + selector if selector == functions.get_verifier.short_signature() => { + vec![5u8; 32] + } + selector if selector == functions.get_protocol_version.short_signature() => { H256::from_low_u64_be(ProtocolVersionId::default() as u64) .0 - .to_vec(), - ), - ]), - ]) + .to_vec() + } + _ => panic!("unexpected call: {call:?}"), + }; + Token::Tuple(vec![Token::Bool(true), Token::Bytes(output)]) + }); + Token::Array(response.collect()) } pub(crate) fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { @@ -74,6 +117,7 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { zkporter_is_available: false, bootloader_code_hash: H256::default(), default_aa_code_hash: H256::default(), + evm_emulator_code_hash: None, protocol_version: Some(ProtocolVersionId::default()), }, aux_data_hash: H256::default(), @@ -656,22 +700,71 @@ async fn skipped_l1_batch_in_the_middle( Ok(()) } -#[test_casing(2, COMMITMENT_MODES)] +#[test_casing(2, [false, true])] #[test_log::test(tokio::test)] -async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { +async fn parsing_multicall_data(with_evm_emulator: bool) { let tester = EthSenderTester::new( ConnectionPool::::test_pool().await, vec![100; 100], false, true, - commitment_mode, + L1BatchCommitmentMode::Rollup, ) .await; - assert!(tester + let mut mock_response = vec![ + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 96])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![5u8; 32])]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes( + H256::from_low_u64_be(ProtocolVersionId::latest() as u64) + .0 + .to_vec(), + ), + ]), + ]; + if with_evm_emulator { + mock_response.insert( + 2, + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 32])]), + ); + } + let mock_response = Token::Array(mock_response); + + let parsed = tester .aggregator - .parse_multicall_data(mock_multicall_response()) - .is_ok()); + .parse_multicall_data(mock_response, with_evm_emulator) + .unwrap(); + assert_eq!( + parsed.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + parsed.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + let expected_evm_emulator_hash = with_evm_emulator.then(|| H256::repeat_byte(3)); + assert_eq!( + parsed.base_system_contracts_hashes.evm_emulator, + expected_evm_emulator_hash + ); + assert_eq!(parsed.verifier_address, Address::repeat_byte(5)); + assert_eq!(parsed.protocol_version_id, ProtocolVersionId::latest()); +} + +#[test_log::test(tokio::test)] +async fn parsing_multicall_data_errors() { + let tester = EthSenderTester::new( + ConnectionPool::::test_pool().await, + vec![100; 100], + false, + true, + L1BatchCommitmentMode::Rollup, + ) + .await; let original_wrong_form_data = vec![ // should contain 5 tuples @@ -722,7 +815,7 @@ async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { assert_matches!( tester .aggregator - .parse_multicall_data(wrong_data_instance.clone()), + .parse_multicall_data(wrong_data_instance.clone(), true), Err(EthSenderError::Parse(Error::InvalidOutputType(_))) ); } @@ -739,6 +832,17 @@ async fn get_multicall_data(commitment_mode: L1BatchCommitmentMode) { commitment_mode, ) .await; - let multicall_data = tester.aggregator.get_multicall_data().await; - assert!(multicall_data.is_ok()); + + let data = tester.aggregator.get_multicall_data().await.unwrap(); + assert_eq!( + data.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + data.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + assert_eq!(data.base_system_contracts_hashes.evm_emulator, None); + assert_eq!(data.verifier_address, Address::repeat_byte(5)); + assert_eq!(data.protocol_version_id, ProtocolVersionId::latest()); } diff --git a/core/node/eth_sender/src/zksync_functions.rs b/core/node/eth_sender/src/zksync_functions.rs index 8f13f0e63ae..85508c71c03 100644 --- a/core/node/eth_sender/src/zksync_functions.rs +++ b/core/node/eth_sender/src/zksync_functions.rs @@ -12,6 +12,7 @@ pub(super) struct ZkSyncFunctions { pub(super) get_l2_bootloader_bytecode_hash: Function, pub(super) get_l2_default_account_bytecode_hash: Function, pub(super) get_verifier: Function, + pub(super) get_evm_emulator_bytecode_hash: Option, pub(super) get_verifier_params: Function, pub(super) get_protocol_version: Function, @@ -59,6 +60,8 @@ impl Default for ZkSyncFunctions { get_function(&zksync_contract, "getL2BootloaderBytecodeHash"); let get_l2_default_account_bytecode_hash = get_function(&zksync_contract, "getL2DefaultAccountBytecodeHash"); + let get_evm_emulator_bytecode_hash = + get_optional_function(&zksync_contract, "getL2EvmSimulatorBytecodeHash"); let get_verifier = get_function(&zksync_contract, "getVerifier"); let get_verifier_params = get_function(&zksync_contract, "getVerifierParams"); let get_protocol_version = get_function(&zksync_contract, "getProtocolVersion"); @@ -74,6 +77,7 @@ impl Default for ZkSyncFunctions { post_shared_bridge_execute, get_l2_bootloader_bytecode_hash, get_l2_default_account_bytecode_hash, + get_evm_emulator_bytecode_hash, get_verifier, get_verifier_params, get_protocol_version, diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index feb9eff35b5..d9faf7b664e 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -245,8 +245,11 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - let tx = - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()).unwrap(); + let tx = Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap(); tx.try_into().unwrap() } @@ -272,10 +275,13 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()) - .unwrap() - .try_into() - .unwrap() + Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap() + .try_into() + .unwrap() } async fn create_test_watcher( diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 1f30d314bb0..5c17add2e98 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -104,6 +104,7 @@ impl GenesisParams { default_aa: config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: config.evm_emulator_hash, }; if base_system_contracts_hashes != base_system_contracts.hashes() { return Err(GenesisError::BaseSystemContractsHashes(Box::new( @@ -124,15 +125,18 @@ impl GenesisParams { } pub fn load_genesis_params(config: GenesisConfig) -> Result { - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let system_contracts = get_system_smart_contracts(); + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if config.evm_emulator_hash.is_some() { + base_system_contracts = base_system_contracts.with_latest_evm_emulator(); + } + let system_contracts = get_system_smart_contracts(config.evm_emulator_hash.is_some()); Self::from_genesis_config(config, base_system_contracts, system_contracts) } pub fn mock() -> Self { Self { base_system_contracts: BaseSystemContracts::load_from_disk(), - system_contracts: get_system_smart_contracts(), + system_contracts: get_system_smart_contracts(false), config: mock_genesis_config(), } } @@ -172,6 +176,7 @@ pub fn mock_genesis_config() -> GenesisConfig { genesis_commitment: Some(H256::default()), bootloader_hash: Some(base_system_contracts_hashes.bootloader), default_aa_hash: Some(base_system_contracts_hashes.default_aa), + evm_emulator_hash: base_system_contracts_hashes.evm_emulator, l1_chain_id: L1ChainId(9), sl_chain_id: None, l2_chain_id: L2ChainId::default(), @@ -235,6 +240,7 @@ pub async fn insert_genesis_batch( .config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: genesis_params.config.evm_emulator_hash, }; let commitment_input = CommitmentInput::for_genesis_batch( genesis_root_hash, diff --git a/core/node/genesis/src/utils.rs b/core/node/genesis/src/utils.rs index a6c9513dbde..6042513537c 100644 --- a/core/node/genesis/src/utils.rs +++ b/core/node/genesis/src/utils.rs @@ -130,7 +130,8 @@ pub(super) async fn insert_base_system_contracts_to_factory_deps( contracts: &BaseSystemContracts, ) -> Result<(), GenesisError> { let factory_deps = [&contracts.bootloader, &contracts.default_aa] - .iter() + .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, be_words_to_bytes(&c.code))) .collect(); diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 6075ff048bf..7687595740a 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -345,6 +345,7 @@ impl StateKeeperIO for ExternalIO { let default_account_code_hash = protocol_version .default_account_code_hash() .context("Missing default account code hash")?; + let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); self.pool .connection_tagged("sync_layer") @@ -362,6 +363,7 @@ impl StateKeeperIO for ExternalIO { BaseSystemContractsHashes { bootloader: bootloader_code_hash, default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, }, l2_system_upgrade_tx_hash, ) @@ -375,9 +377,22 @@ impl StateKeeperIO for ExternalIO { .get_base_system_contract(default_account_code_hash, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch default AA code for {protocol_version:?}"))?; + let evm_emulator = if let Some(hash) = evm_emulator_code_hash { + Some( + self.get_base_system_contract(hash, cursor.next_l2_block) + .await + .with_context(|| { + format!("cannot fetch EVM emulator code for {protocol_version:?}") + })?, + ) + } else { + None + }; + Ok(BaseSystemContracts { bootloader, default_aa, + evm_emulator, }) } diff --git a/core/node/node_sync/src/genesis.rs b/core/node/node_sync/src/genesis.rs index ccc26b417e9..0ff8d0d448c 100644 --- a/core/node/node_sync/src/genesis.rs +++ b/core/node/node_sync/src/genesis.rs @@ -38,6 +38,7 @@ async fn create_genesis_params( let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader: config.bootloader_hash.context("Genesis is not finished")?, default_aa: config.default_aa_hash.context("Genesis is not finished")?, + evm_emulator: config.evm_emulator_hash, }; if zksync_chain_id != config.l2_chain_id { @@ -47,10 +48,11 @@ async fn create_genesis_params( // Load the list of addresses that are known to contain system contracts at any point in time. // Not every of these addresses is guaranteed to be present in the genesis state, but we'll iterate through // them and try to fetch the contract bytecode for each of them. - let system_contract_addresses: Vec<_> = get_system_smart_contracts() - .into_iter() - .map(|contract| *contract.account_id.address()) - .collect(); + let system_contract_addresses: Vec<_> = + get_system_smart_contracts(config.evm_emulator_hash.is_some()) + .into_iter() + .map(|contract| *contract.account_id.address()) + .collect(); // These have to be *initial* base contract hashes of main node // (those that were used during genesis), not necessarily the current ones. @@ -103,6 +105,18 @@ async fn fetch_base_system_contracts( .fetch_system_contract_by_hash(contract_hashes.default_aa) .await? .context("default AA bytecode is missing on main node")?; + let evm_emulator = if let Some(hash) = contract_hashes.evm_emulator { + let bytes = client + .fetch_system_contract_by_hash(hash) + .await? + .context("EVM Simulator bytecode is missing on main node")?; + Some(SystemContractCode { + code: zksync_utils::bytes_to_be_words(bytes), + hash, + }) + } else { + None + }; Ok(BaseSystemContracts { bootloader: SystemContractCode { code: zksync_utils::bytes_to_be_words(bootloader_bytecode), @@ -112,5 +126,6 @@ async fn fetch_base_system_contracts( code: zksync_utils::bytes_to_be_words(default_aa_bytecode), hash: contract_hashes.default_aa, }, + evm_emulator, }) } diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index d9a98c2bce3..3f5791cdf24 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -304,6 +304,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo timestamp: snapshot.l2_block_timestamp + 1, bootloader_code_hash: Some(H256::repeat_byte(1)), default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), ..api::ProtocolVersion::default() }; client.insert_protocol_version(next_protocol_version.clone()); @@ -345,6 +346,13 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo next_protocol_version.default_account_code_hash.unwrap() ); + assert_eq!( + persisted_protocol_version + .base_system_contracts_hashes + .evm_emulator, + next_protocol_version.evm_emulator_code_hash + ); + let l2_block = storage .blocks_dal() .get_l2_block_header(snapshot.l2_block_number + 1) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 86cc5323448..8220aef5da0 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -62,6 +62,7 @@ async fn request_tee_proof_inputs() { code: vec![U256([1; 4])], hash: H256([1; 32]), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/node/state_keeper/src/executor/mod.rs b/core/node/state_keeper/src/executor/mod.rs index 2fa5c3b9c12..903dae2f1ca 100644 --- a/core/node/state_keeper/src/executor/mod.rs +++ b/core/node/state_keeper/src/executor/mod.rs @@ -40,7 +40,7 @@ impl TxExecutionResult { _ => Self::Success { tx_metrics: Box::new(ExecutionMetricsForCriteria::new(Some(tx), &res.tx_result)), gas_remaining: res.tx_result.statistics.gas_remaining, - tx_result: res.tx_result, + tx_result: res.tx_result.clone(), compressed_bytecodes: res.compressed_bytecodes, call_tracer_result: res.call_traces, }, diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 7a1871dbfea..79072f23aed 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -259,7 +259,7 @@ impl Tester { patch: 0.into(), }, &BASE_SYSTEM_CONTRACTS, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), Default::default(), ) .await diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 24b1ffca631..97340d6496a 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -347,7 +347,7 @@ impl StateKeeperOutputHandler for TreeWritesPersistence { #[cfg(test)] mod tests { - use std::collections::HashSet; + use std::collections::{HashMap, HashSet}; use assert_matches::assert_matches; use futures::FutureExt; @@ -462,6 +462,7 @@ mod tests { tx, tx_result, vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 7ea01e6af1e..e2a90f30691 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{collections::HashMap, time::Duration}; use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; @@ -249,6 +249,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -267,6 +268,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -356,6 +358,7 @@ async fn processing_events_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); } @@ -457,6 +460,7 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom tx.into(), create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 2dc45a5eaaa..02170283e94 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -156,7 +156,7 @@ impl Tester { patch: 0.into(), }, &self.base_system_contracts, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), L1VerifierConfig::default(), ) .await diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index d36ceec7d70..22f24573070 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -498,8 +498,9 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -624,8 +625,9 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -704,8 +706,9 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, vec![], diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index e3fe849e802..962cc807318 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -277,6 +277,8 @@ impl L2BlockMaxPayloadSizeSealer { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_utils::time::seconds_since_epoch; use super::*; @@ -287,6 +289,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index edcf3ccc4f5..d1e82c44bd6 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -33,6 +33,7 @@ pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index ffca8dff864..cb282f3b7d6 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -264,6 +264,7 @@ pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { }, statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -278,6 +279,7 @@ pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index 80de0f0beff..9e971541b20 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -138,6 +138,7 @@ pub(super) fn create_execution_result( circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index aa2e22cac48..2979ebbd8c2 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -49,6 +49,8 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::vm_latest::TransactionVmExt; use zksync_types::{L2BlockNumber, ProtocolVersionId, H256}; @@ -76,6 +78,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index d8673088dc3..27995b384ab 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -1,17 +1,14 @@ use std::collections::HashMap; -use once_cell::sync::Lazy; use zksync_multivm::{ interface::{ Call, CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, }, - vm_latest::TransactionVmExt, + vm_latest::{utils::extract_bytecodes_marked_as_known, TransactionVmExt}, }; -use zksync_system_constants::KNOWN_CODES_STORAGE_ADDRESS; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, - ethabi, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, H256, }; @@ -19,27 +16,6 @@ use zksync_utils::bytecode::hash_bytecode; use crate::metrics::KEEPER_METRICS; -/// Extracts all bytecodes marked as known on the system contracts. -fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { - static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "MarkedAsKnown", - &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], - ) - }); - - all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == KNOWN_CODES_STORAGE_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE - }) - .map(|event| event.indexed_topics[1]) - .collect() -} - #[derive(Debug, Clone, PartialEq)] pub struct L2BlockUpdates { pub executed_transactions: Vec, @@ -104,6 +80,7 @@ impl L2BlockUpdates { self.block_execution_metrics += execution_metrics; } + #[allow(clippy::too_many_arguments)] pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, @@ -111,6 +88,7 @@ impl L2BlockUpdates { tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, call_traces: Vec, ) { let saved_factory_deps = @@ -145,12 +123,15 @@ impl L2BlockUpdates { // Get transaction factory deps let factory_deps = &tx.execute.factory_deps; - let tx_factory_deps: HashMap<_, _> = factory_deps + let mut tx_factory_deps: HashMap<_, _> = factory_deps .iter() - .map(|bytecode| (hash_bytecode(bytecode), bytecode)) + .map(|bytecode| (hash_bytecode(bytecode), bytecode.clone())) .collect(); + // Ensure that *dynamic* factory deps (ones that may be created when executing EVM contracts) + // are added into the lookup map as well. + tx_factory_deps.extend(new_known_factory_deps); - // Save all bytecodes that were marked as known on the bootloader + // Save all bytecodes that were marked as known in the bootloader let known_bytecodes = saved_factory_deps.into_iter().map(|bytecode_hash| { let bytecode = tx_factory_deps.get(&bytecode_hash).unwrap_or_else(|| { panic!( @@ -230,6 +211,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 2fad56a9929..0cebc5d8b47 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ @@ -8,7 +10,7 @@ use zksync_multivm::{ }; use zksync_types::{ block::BlockGasCount, fee_model::BatchFeeInput, Address, L1BatchNumber, L2BlockNumber, - ProtocolVersionId, Transaction, + ProtocolVersionId, Transaction, H256, }; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; @@ -104,11 +106,13 @@ impl UpdatesManager { self.protocol_version } + #[allow(clippy::too_many_arguments)] pub fn extend_from_executed_transaction( &mut self, tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, call_traces: Vec, @@ -124,6 +128,7 @@ impl UpdatesManager { tx_l1_gas_this_tx, execution_metrics, compressed_bytecodes, + new_known_factory_deps, call_traces, ); latency.observe(); @@ -233,6 +238,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), new_block_gas_count(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index b9984b78211..9eb53994eee 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -56,6 +56,7 @@ pub fn create_l1_batch(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: None, }, ProtocolVersionId::latest(), ); @@ -88,6 +89,7 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: BaseSystemContractsHashes::default().bootloader, default_aa_code_hash: BaseSystemContractsHashes::default().default_aa, + evm_emulator_code_hash: BaseSystemContractsHashes::default().evm_emulator, protocol_version: Some(ProtocolVersionId::latest()), }, aux_data_hash: H256::zero(), @@ -217,6 +219,7 @@ impl Snapshot { l2_block, factory_deps: [&contracts.bootloader, &contracts.default_aa] .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, zksync_utils::be_words_to_bytes(&c.code))) .collect(), storage_logs, diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 6c2933635b4..dc94752d988 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -209,6 +209,7 @@ async fn get_updates_manager_witness_input_data( ) -> anyhow::Result { let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; + let evm_emulator = system_env.base_system_smart_contracts.hashes().evm_emulator; let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; let bootloader_code_bytes = connection .factory_deps_dal() @@ -240,6 +241,22 @@ async fn get_updates_manager_witness_input_data( used_bytecodes.insert(account_code_hash, account_bytecode); } + let evm_emulator_code_hash = if let Some(evm_emulator) = evm_emulator { + let evm_emulator_code_hash = h256_to_u256(evm_emulator); + if used_contract_hashes.contains(&evm_emulator_code_hash) { + let evm_emulator_bytecode = connection + .factory_deps_dal() + .get_sealed_factory_dep(evm_emulator) + .await? + .ok_or_else(|| anyhow!("EVM Simulator bytecode should exist"))?; + let evm_emulator_bytecode = bytes_to_chunks(&evm_emulator_bytecode); + used_bytecodes.insert(evm_emulator_code_hash, evm_emulator_bytecode); + } + Some(evm_emulator_code_hash) + } else { + None + }; + let storage_refunds = output.batch.final_execution_state.storage_refunds.clone(); let pubdata_costs = output.batch.final_execution_state.pubdata_costs.clone(); let witness_block_state = WitnessStorageState { @@ -254,6 +271,7 @@ async fn get_updates_manager_witness_input_data( protocol_version: system_env.version, bootloader_code, default_account_code_hash: account_code_hash, + evm_emulator_code_hash, storage_refunds, pubdata_costs, witness_block_state, diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 53bef106a8f..575fd59be04 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -322,6 +322,7 @@ async fn store_l1_batches( .iter() .map(|contract| hash_bytecode(&contract.bytecode)) .chain([genesis_params.base_system_contracts().hashes().default_aa]) + .chain(genesis_params.base_system_contracts().hashes().evm_emulator) .map(h256_to_u256) .collect(); diff --git a/core/node/vm_runner/src/tests/output_handler.rs b/core/node/vm_runner/src/tests/output_handler.rs index 1bf30effdbe..f57814ea449 100644 --- a/core/node/vm_runner/src/tests/output_handler.rs +++ b/core/node/vm_runner/src/tests/output_handler.rs @@ -66,6 +66,7 @@ impl OutputHandlerTester { code: vec![], hash: Default::default(), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 999ea6eb6e0..39a36694526 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -154,7 +154,7 @@ impl Account { let max_fee_per_gas = U256::from(0u32); let gas_limit = U256::from(20_000_000); let factory_deps = execute.factory_deps; - abi::Transaction::L1 { + let tx = abi::Transaction::L1 { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), @@ -186,9 +186,8 @@ impl Account { .into(), factory_deps, eth_block: 0, - } - .try_into() - .unwrap() + }; + Transaction::from_abi(tx, false).unwrap() } pub fn get_test_contract_transaction( diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol new file mode 100644 index 00000000000..5f4de59681f --- /dev/null +++ b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +/** + * Mock `KnownCodeStorage` counterpart producing `MarkedAsKnown` events and having `publishEVMBytecode` method + * added for EVM emulation, calls to which should be traced by the host. + */ +contract MockKnownCodeStorage { + event MarkedAsKnown(bytes32 indexed bytecodeHash, bool indexed sendBytecodeToL1); + + function markFactoryDeps(bool _shouldSendToL1, bytes32[] calldata _hashes) external { + unchecked { + uint256 hashesLen = _hashes.length; + for (uint256 i = 0; i < hashesLen; ++i) { + _markBytecodeAsPublished(_hashes[i], _shouldSendToL1); + } + } + } + + function markBytecodeAsPublished(bytes32 _bytecodeHash) external { + _markBytecodeAsPublished(_bytecodeHash, false); + } + + function _markBytecodeAsPublished(bytes32 _bytecodeHash, bool _shouldSendToL1) internal { + if (getMarker(_bytecodeHash) == 0) { + assembly { + sstore(_bytecodeHash, 1) + } + emit MarkedAsKnown(_bytecodeHash, _shouldSendToL1); + } + } + + bytes32 evmBytecodeHash; // For tests, it's OK to potentially collide with the marker slot for hash `bytes32(0)` + + /// Sets the EVM bytecode hash to be used in the next `publishEVMBytecode` call. + function setEVMBytecodeHash(bytes32 _bytecodeHash) external { + evmBytecodeHash = _bytecodeHash; + } + + function publishEVMBytecode(bytes calldata _bytecode) external { + bytes32 hash = evmBytecodeHash; + require(hash != bytes32(0), "EVM bytecode hash not set"); + + if (getMarker(evmBytecodeHash) == 0) { + assembly { + sstore(hash, 1) + } + } + emit MarkedAsKnown(hash, getMarker(hash) == 0); + evmBytecodeHash = bytes32(0); + } + + function getMarker(bytes32 _hash) public view returns (uint256 marker) { + assembly { + marker := sload(_hash) + } + } +} + +/** + * Mock `ContractDeployer` counterpart focusing on EVM bytecode deployment (via `create`; this isn't how real EVM bytecode deployment works, + * but it's good enough for low-level tests). + */ +contract MockContractDeployer { + enum AccountAbstractionVersion { + None, + Version1 + } + + address constant CODE_ORACLE_ADDR = address(0x8012); + MockKnownCodeStorage constant KNOWN_CODE_STORAGE_CONTRACT = MockKnownCodeStorage(address(0x8004)); + + /// The returned value is obviously incorrect in the general case, but works well enough when called by the bootloader. + function extendedAccountVersion(address _address) public view returns (AccountAbstractionVersion) { + return AccountAbstractionVersion.Version1; + } + + /// Replaces real deployment with publishing a surrogate EVM "bytecode". + /// @param _salt bytecode hash + /// @param _bytecodeHash ignored, since it's not possible to set arbitrarily + /// @param _input bytecode to publish + function create( + bytes32 _salt, + bytes32 _bytecodeHash, + bytes calldata _input + ) external payable returns (address) { + KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_salt); + KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); + return address(0); + } +} diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 220a75944e0..b7d4ffebcf9 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -13,3 +13,5 @@ prover: recursion_scheduler_level_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 dummy_verifier: true l1_batch_commit_data_generator_mode: Rollup +# Uncomment to enable EVM emulation (requires to run genesis) +# evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 92366b0912b..22ec5c53485 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7697,6 +7697,7 @@ dependencies = [ "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", "circuit_sequencer_api 0.150.5", + "ethabi", "hex", "itertools 0.10.5", "once_cell", diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index 23ae1b0f2af..a8bc59bd45e 100644 --- a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -114,6 +114,10 @@ pub(super) async fn generate_witness( } }; + let evm_emulator_code_hash = input.vm_run_data.evm_emulator_code_hash; + // By convention, default AA is used instead of the EVM emulator if the latter is disabled. + let evm_emulator_code_hash = + evm_emulator_code_hash.unwrap_or(input.vm_run_data.default_account_code_hash); let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, @@ -121,8 +125,7 @@ pub(super) async fn generate_witness( bootloader_contents, false, input.vm_run_data.default_account_code_hash, - // NOTE: this will be evm_simulator_code_hash in future releases - input.vm_run_data.default_account_code_hash, + evm_emulator_code_hash, input.vm_run_data.used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 30ec0eeb9c4..41ce906f455 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -146,6 +146,7 @@ impl DeployL1Config { .diamond_init_minimal_l2_gas_price, bootloader_hash: genesis_config.bootloader_hash.unwrap(), default_aa_hash: genesis_config.default_aa_hash.unwrap(), + evm_emulator_hash: genesis_config.evm_emulator_hash, diamond_init_priority_tx_max_pubdata: initial_deployment_config .diamond_init_priority_tx_max_pubdata, diamond_init_pubdata_pricing_mode: initial_deployment_config @@ -194,6 +195,7 @@ pub struct ContractsDeployL1Config { pub diamond_init_minimal_l2_gas_price: u64, pub bootloader_hash: H256, pub default_aa_hash: H256, + pub evm_emulator_hash: Option, } #[derive(Debug, Deserialize, Serialize, Clone)] From c84e2718da2fc26368b8b4c42f536c2a7acc27dc Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:37:18 -0300 Subject: [PATCH 23/36] Add dashboard step --- eigenda-integration.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/eigenda-integration.md b/eigenda-integration.md index 85dac512d59..fe18f806404 100644 --- a/eigenda-integration.md +++ b/eigenda-integration.md @@ -36,6 +36,12 @@ eigenda-proxy: zk_inception containers --observability true ``` +3. Add EigenDA Dashboard + +```bash +mv era-observability/additional_dashboards/EigenDA.json era-observability/dashboards/EigenDA.json +``` + 3. Create `eigen_da` chain ```bash From 6a573c58786dbad3667b3f44a1206e60c9e033a8 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 16:00:02 -0300 Subject: [PATCH 24/36] Update queries --- ...636469ed35acab9237802753bd970cda6835.json} | 18 +- ...046d12cef9ac2629758decfcf33545d7cb462.json | 199 ----------------- ...410d8d7497e18aeeed85eaaf6ecdfbb77e10d.json | 196 ----------------- ...46fc28edbc885e3123584c8d056ba3d3ce96c.json | 197 ----------------- ...2c6539a0bdfc0571df9433d5ce97b6a929ed.json} | 18 +- ...21849f9aad29de043fabb8f781bcb520266ff.json | 184 ++++++++++++++++ ...5e29ed8ab2042aae39403c66ae0a668a7c986.json | 185 ++++++++++++++++ ...81e3d713e93e16523e111989f0de47e9dad8.json} | 18 +- ...23fe5461502b1607d441e5057923313fc3ab8.json | 200 ------------------ ...20db9c633cfc433ade18980a9f6d76293aaf4.json | 187 ++++++++++++++++ ...994cc05ebeb4e5aeeaee50b7c4d8baf58ca44.json | 33 --- ...b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json | 188 ++++++++++++++++ ...ba3af74e8e7b5944cb2943b5badb906167046.json | 30 --- ...4547e783aab9c9f772eaee3d464b3a671f04.json} | 18 +- ...914f15fd7a5fa3d7f7bc56906817c70b04950.json | 34 +++ 15 files changed, 790 insertions(+), 915 deletions(-) rename core/lib/dal/.sqlx/{query-6c2dc484f78fb1a08299c1cda6bbfbc69332a0ad8cd288ae99c960f31d34fae9.json => query-14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835.json} (52%) delete mode 100644 core/lib/dal/.sqlx/query-465b5e247ba7105ca138ca39965046d12cef9ac2629758decfcf33545d7cb462.json delete mode 100644 core/lib/dal/.sqlx/query-511b99d53e4559deb1d5fd65193410d8d7497e18aeeed85eaaf6ecdfbb77e10d.json delete mode 100644 core/lib/dal/.sqlx/query-52655240d4c434e9e01246a659646fc28edbc885e3123584c8d056ba3d3ce96c.json rename core/lib/dal/.sqlx/{query-16f6b0ea452c3402a60a41628e9d6fcff54624e85e5c2c5bd1fc21c8956b9948.json => query-680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed.json} (51%) create mode 100644 core/lib/dal/.sqlx/query-73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff.json create mode 100644 core/lib/dal/.sqlx/query-768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986.json rename core/lib/dal/.sqlx/{query-288e6ac4a1430740be80e89771d20b9c3e72d512db8747bf4c8af2b82855725a.json => query-77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8.json} (55%) delete mode 100644 core/lib/dal/.sqlx/query-808d0d7aa2dfe1b27b9f023db9723fe5461502b1607d441e5057923313fc3ab8.json create mode 100644 core/lib/dal/.sqlx/query-8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4.json delete mode 100644 core/lib/dal/.sqlx/query-9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44.json create mode 100644 core/lib/dal/.sqlx/query-a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json delete mode 100644 core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json rename core/lib/dal/.sqlx/{query-70d2f1a59e5e12c4c0c843fe14c674d9dab0ebd6256b0d3c9142a62f99de111d.json => query-ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04.json} (52%) create mode 100644 core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json diff --git a/core/lib/dal/.sqlx/query-6c2dc484f78fb1a08299c1cda6bbfbc69332a0ad8cd288ae99c960f31d34fae9.json b/core/lib/dal/.sqlx/query-14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835.json similarity index 52% rename from core/lib/dal/.sqlx/query-6c2dc484f78fb1a08299c1cda6bbfbc69332a0ad8cd288ae99c960f31d34fae9.json rename to core/lib/dal/.sqlx/query-14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835.json index 87f38f5c0c3..fb7aceb0549 100644 --- a/core/lib/dal/.sqlx/query-6c2dc484f78fb1a08299c1cda6bbfbc69332a0ad8cd288ae99c960f31d34fae9.json +++ b/core/lib/dal/.sqlx/query-14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835.json @@ -1,12 +1,6 @@ { "db_name": "PostgreSQL", -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-6c2dc484f78fb1a08299c1cda6bbfbc69332a0ad8cd288ae99c960f31d34fae9.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", -======== - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", ->>>>>>>> main:core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -145,7 +139,7 @@ "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, "name": "blob_id?", "type_info": "Text" } @@ -186,11 +180,5 @@ false ] }, -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-6c2dc484f78fb1a08299c1cda6bbfbc69332a0ad8cd288ae99c960f31d34fae9.json - "hash": "6c2dc484f78fb1a08299c1cda6bbfbc69332a0ad8cd288ae99c960f31d34fae9" -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json - "hash": "5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb" -======== - "hash": "860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192" ->>>>>>>> main:core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json + "hash": "14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835" } diff --git a/core/lib/dal/.sqlx/query-465b5e247ba7105ca138ca39965046d12cef9ac2629758decfcf33545d7cb462.json b/core/lib/dal/.sqlx/query-465b5e247ba7105ca138ca39965046d12cef9ac2629758decfcf33545d7cb462.json deleted file mode 100644 index 9229b7ae4ed..00000000000 --- a/core/lib/dal/.sqlx/query-465b5e247ba7105ca138ca39965046d12cef9ac2629758decfcf33545d7cb462.json +++ /dev/null @@ -1,199 +0,0 @@ -{ - "db_name": "PostgreSQL", -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-465b5e247ba7105ca138ca39965046d12cef9ac2629758decfcf33545d7cb462.json - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", -======== - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", ->>>>>>>> main:core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 6, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 8, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 10, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 12, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 15, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 16, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "evm_emulator_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 18, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 19, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 20, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 22, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 24, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 25, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "pubdata_input", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "blob_id?", - "type_info": "Text" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int4", - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - true, - true, - true, - false - ] - }, -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-465b5e247ba7105ca138ca39965046d12cef9ac2629758decfcf33545d7cb462.json - "hash": "465b5e247ba7105ca138ca39965046d12cef9ac2629758decfcf33545d7cb462" -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json - "hash": "ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3" -======== - "hash": "9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361" ->>>>>>>> main:core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json -} diff --git a/core/lib/dal/.sqlx/query-511b99d53e4559deb1d5fd65193410d8d7497e18aeeed85eaaf6ecdfbb77e10d.json b/core/lib/dal/.sqlx/query-511b99d53e4559deb1d5fd65193410d8d7497e18aeeed85eaaf6ecdfbb77e10d.json deleted file mode 100644 index 51d60e0dca7..00000000000 --- a/core/lib/dal/.sqlx/query-511b99d53e4559deb1d5fd65193410d8d7497e18aeeed85eaaf6ecdfbb77e10d.json +++ /dev/null @@ -1,196 +0,0 @@ -{ - "db_name": "PostgreSQL", -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-511b99d53e4559deb1d5fd65193410d8d7497e18aeeed85eaaf6ecdfbb77e10d.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", -======== - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", ->>>>>>>> main:core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 6, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 8, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 10, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 12, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 15, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 16, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "evm_emulator_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 18, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 19, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 20, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 22, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 24, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 25, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "pubdata_input", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "blob_id?", - "type_info": "Text" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - true, - true, - true, - false - ] - }, -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-511b99d53e4559deb1d5fd65193410d8d7497e18aeeed85eaaf6ecdfbb77e10d.json - "hash": "511b99d53e4559deb1d5fd65193410d8d7497e18aeeed85eaaf6ecdfbb77e10d" -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json - "hash": "4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862" -======== - "hash": "da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7" ->>>>>>>> main:core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json -} diff --git a/core/lib/dal/.sqlx/query-52655240d4c434e9e01246a659646fc28edbc885e3123584c8d056ba3d3ce96c.json b/core/lib/dal/.sqlx/query-52655240d4c434e9e01246a659646fc28edbc885e3123584c8d056ba3d3ce96c.json deleted file mode 100644 index 3ce387f5e79..00000000000 --- a/core/lib/dal/.sqlx/query-52655240d4c434e9e01246a659646fc28edbc885e3123584c8d056ba3d3ce96c.json +++ /dev/null @@ -1,197 +0,0 @@ -{ - "db_name": "PostgreSQL", -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-52655240d4c434e9e01246a659646fc28edbc885e3123584c8d056ba3d3ce96c.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", -======== - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", ->>>>>>>> main:core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 6, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 8, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 10, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 12, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 15, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 16, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "evm_emulator_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 18, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 19, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 20, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 22, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 24, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 25, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "pubdata_input", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "blob_id?", - "type_info": "Text" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - true, - true, - true, - true, - true, - false - ] - }, -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-52655240d4c434e9e01246a659646fc28edbc885e3123584c8d056ba3d3ce96c.json - "hash": "52655240d4c434e9e01246a659646fc28edbc885e3123584c8d056ba3d3ce96c" -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json - "hash": "ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa" -======== - "hash": "05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32" ->>>>>>>> main:core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json -} diff --git a/core/lib/dal/.sqlx/query-16f6b0ea452c3402a60a41628e9d6fcff54624e85e5c2c5bd1fc21c8956b9948.json b/core/lib/dal/.sqlx/query-680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed.json similarity index 51% rename from core/lib/dal/.sqlx/query-16f6b0ea452c3402a60a41628e9d6fcff54624e85e5c2c5bd1fc21c8956b9948.json rename to core/lib/dal/.sqlx/query-680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed.json index 335ce8afd9e..ffe9e7e39cd 100644 --- a/core/lib/dal/.sqlx/query-16f6b0ea452c3402a60a41628e9d6fcff54624e85e5c2c5bd1fc21c8956b9948.json +++ b/core/lib/dal/.sqlx/query-680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed.json @@ -1,12 +1,6 @@ { "db_name": "PostgreSQL", -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-16f6b0ea452c3402a60a41628e9d6fcff54624e85e5c2c5bd1fc21c8956b9948.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", -======== - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", ->>>>>>>> main:core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -145,7 +139,7 @@ "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, "name": "blob_id?", "type_info": "Text" } @@ -184,11 +178,5 @@ false ] }, -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-16f6b0ea452c3402a60a41628e9d6fcff54624e85e5c2c5bd1fc21c8956b9948.json - "hash": "16f6b0ea452c3402a60a41628e9d6fcff54624e85e5c2c5bd1fc21c8956b9948" -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json - "hash": "30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e" -======== - "hash": "9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24" ->>>>>>>> main:core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json + "hash": "680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed" } diff --git a/core/lib/dal/.sqlx/query-73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff.json b/core/lib/dal/.sqlx/query-73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff.json new file mode 100644 index 00000000000..c6c616db7ce --- /dev/null +++ b/core/lib/dal/.sqlx/query-73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff.json @@ -0,0 +1,184 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "bloom", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "priority_ops_onchain_data", + "type_info": "ByteaArray" + }, + { + "ordinal": 6, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "commitment", + "type_info": "Bytea" + }, + { + "ordinal": 8, + "name": "l2_to_l1_messages", + "type_info": "ByteaArray" + }, + { + "ordinal": 9, + "name": "used_contract_hashes", + "type_info": "Jsonb" + }, + { + "ordinal": 10, + "name": "compressed_initial_writes", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "compressed_repeated_writes", + "type_info": "Bytea" + }, + { + "ordinal": 12, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + }, + { + "ordinal": 13, + "name": "rollup_last_leaf_index", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "zkporter_is_available", + "type_info": "Bool" + }, + { + "ordinal": 15, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "aux_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pass_through_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 22, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, + { + "ordinal": 23, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 24, + "name": "events_queue_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 25, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 26, + "name": "pubdata_input", + "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "blob_id?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true, + false + ] + }, + "hash": "73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff" +} diff --git a/core/lib/dal/.sqlx/query-768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986.json b/core/lib/dal/.sqlx/query-768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986.json new file mode 100644 index 00000000000..55009c6bf24 --- /dev/null +++ b/core/lib/dal/.sqlx/query-768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986.json @@ -0,0 +1,185 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "bloom", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "priority_ops_onchain_data", + "type_info": "ByteaArray" + }, + { + "ordinal": 6, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "commitment", + "type_info": "Bytea" + }, + { + "ordinal": 8, + "name": "l2_to_l1_messages", + "type_info": "ByteaArray" + }, + { + "ordinal": 9, + "name": "used_contract_hashes", + "type_info": "Jsonb" + }, + { + "ordinal": 10, + "name": "compressed_initial_writes", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "compressed_repeated_writes", + "type_info": "Bytea" + }, + { + "ordinal": 12, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + }, + { + "ordinal": 13, + "name": "rollup_last_leaf_index", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "zkporter_is_available", + "type_info": "Bool" + }, + { + "ordinal": 15, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "aux_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pass_through_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 22, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, + { + "ordinal": 23, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 24, + "name": "events_queue_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 25, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 26, + "name": "pubdata_input", + "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "blob_id?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true, + true, + true, + false + ] + }, + "hash": "768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986" +} diff --git a/core/lib/dal/.sqlx/query-288e6ac4a1430740be80e89771d20b9c3e72d512db8747bf4c8af2b82855725a.json b/core/lib/dal/.sqlx/query-77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8.json similarity index 55% rename from core/lib/dal/.sqlx/query-288e6ac4a1430740be80e89771d20b9c3e72d512db8747bf4c8af2b82855725a.json rename to core/lib/dal/.sqlx/query-77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8.json index ab1f05241fa..31a0869a64a 100644 --- a/core/lib/dal/.sqlx/query-288e6ac4a1430740be80e89771d20b9c3e72d512db8747bf4c8af2b82855725a.json +++ b/core/lib/dal/.sqlx/query-77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8.json @@ -1,12 +1,6 @@ { "db_name": "PostgreSQL", -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-288e6ac4a1430740be80e89771d20b9c3e72d512db8747bf4c8af2b82855725a.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", -======== - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", ->>>>>>>> main:core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -145,7 +139,7 @@ "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, "name": "blob_id?", "type_info": "Text" } @@ -186,11 +180,5 @@ false ] }, -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-288e6ac4a1430740be80e89771d20b9c3e72d512db8747bf4c8af2b82855725a.json - "hash": "288e6ac4a1430740be80e89771d20b9c3e72d512db8747bf4c8af2b82855725a" -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json - "hash": "60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7" -======== - "hash": "4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98" ->>>>>>>> main:core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json + "hash": "77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8" } diff --git a/core/lib/dal/.sqlx/query-808d0d7aa2dfe1b27b9f023db9723fe5461502b1607d441e5057923313fc3ab8.json b/core/lib/dal/.sqlx/query-808d0d7aa2dfe1b27b9f023db9723fe5461502b1607d441e5057923313fc3ab8.json deleted file mode 100644 index 47788aef0f0..00000000000 --- a/core/lib/dal/.sqlx/query-808d0d7aa2dfe1b27b9f023db9723fe5461502b1607d441e5057923313fc3ab8.json +++ /dev/null @@ -1,200 +0,0 @@ -{ - "db_name": "PostgreSQL", -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-808d0d7aa2dfe1b27b9f023db9723fe5461502b1607d441e5057923313fc3ab8.json - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", -======== - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", ->>>>>>>> main:core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - }, - { - "ordinal": 1, - "name": "timestamp", - "type_info": "Int8" - }, - { - "ordinal": 2, - "name": "l1_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 3, - "name": "l2_tx_count", - "type_info": "Int4" - }, - { - "ordinal": 4, - "name": "bloom", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "priority_ops_onchain_data", - "type_info": "ByteaArray" - }, - { - "ordinal": 6, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "commitment", - "type_info": "Bytea" - }, - { - "ordinal": 8, - "name": "l2_to_l1_messages", - "type_info": "ByteaArray" - }, - { - "ordinal": 9, - "name": "used_contract_hashes", - "type_info": "Jsonb" - }, - { - "ordinal": 10, - "name": "compressed_initial_writes", - "type_info": "Bytea" - }, - { - "ordinal": 11, - "name": "compressed_repeated_writes", - "type_info": "Bytea" - }, - { - "ordinal": 12, - "name": "l2_l1_merkle_root", - "type_info": "Bytea" - }, - { - "ordinal": 13, - "name": "rollup_last_leaf_index", - "type_info": "Int8" - }, - { - "ordinal": 14, - "name": "zkporter_is_available", - "type_info": "Bool" - }, - { - "ordinal": 15, - "name": "bootloader_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 16, - "name": "default_aa_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 17, - "name": "evm_emulator_code_hash", - "type_info": "Bytea" - }, - { - "ordinal": 18, - "name": "aux_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 19, - "name": "pass_through_data_hash", - "type_info": "Bytea" - }, - { - "ordinal": 20, - "name": "meta_parameters_hash", - "type_info": "Bytea" - }, - { - "ordinal": 21, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 22, - "name": "compressed_state_diffs", - "type_info": "Bytea" - }, - { - "ordinal": 23, - "name": "system_logs", - "type_info": "ByteaArray" - }, - { - "ordinal": 24, - "name": "events_queue_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 25, - "name": "bootloader_initial_content_commitment", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "pubdata_input", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "blob_id?", - "type_info": "Text" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Int4", - "Bool", - "Int8" - ] - }, - "nullable": [ - false, - false, - false, - false, - false, - false, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - true, - false, - true, - true, - true, - false - ] - }, -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-808d0d7aa2dfe1b27b9f023db9723fe5461502b1607d441e5057923313fc3ab8.json - "hash": "808d0d7aa2dfe1b27b9f023db9723fe5461502b1607d441e5057923313fc3ab8" -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json - "hash": "2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada" -======== - "hash": "16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870" ->>>>>>>> main:core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json -} diff --git a/core/lib/dal/.sqlx/query-8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4.json b/core/lib/dal/.sqlx/query-8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4.json new file mode 100644 index 00000000000..b78b7965ac7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4.json @@ -0,0 +1,187 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "bloom", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "priority_ops_onchain_data", + "type_info": "ByteaArray" + }, + { + "ordinal": 6, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "commitment", + "type_info": "Bytea" + }, + { + "ordinal": 8, + "name": "l2_to_l1_messages", + "type_info": "ByteaArray" + }, + { + "ordinal": 9, + "name": "used_contract_hashes", + "type_info": "Jsonb" + }, + { + "ordinal": 10, + "name": "compressed_initial_writes", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "compressed_repeated_writes", + "type_info": "Bytea" + }, + { + "ordinal": 12, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + }, + { + "ordinal": 13, + "name": "rollup_last_leaf_index", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "zkporter_is_available", + "type_info": "Bool" + }, + { + "ordinal": 15, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "aux_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pass_through_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 22, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, + { + "ordinal": 23, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 24, + "name": "events_queue_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 25, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 26, + "name": "pubdata_input", + "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "blob_id?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Int4", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true, + false + ] + }, + "hash": "8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4" +} diff --git a/core/lib/dal/.sqlx/query-9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44.json b/core/lib/dal/.sqlx/query-9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44.json deleted file mode 100644 index 54f0d27bab2..00000000000 --- a/core/lib/dal/.sqlx/query-9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Bytea", - "Bytea", - "Int4", - "ByteaArray", - "Int8Array", - "Int8Array", - "Bytea", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "9f2c06e6b14434ac4f3b556dc97994cc05ebeb4e5aeeaee50b7c4d8baf58ca44" -} diff --git a/core/lib/dal/.sqlx/query-a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json b/core/lib/dal/.sqlx/query-a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json new file mode 100644 index 00000000000..0fa6af063d7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json @@ -0,0 +1,188 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "l1_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "l2_tx_count", + "type_info": "Int4" + }, + { + "ordinal": 4, + "name": "bloom", + "type_info": "Bytea" + }, + { + "ordinal": 5, + "name": "priority_ops_onchain_data", + "type_info": "ByteaArray" + }, + { + "ordinal": 6, + "name": "hash", + "type_info": "Bytea" + }, + { + "ordinal": 7, + "name": "commitment", + "type_info": "Bytea" + }, + { + "ordinal": 8, + "name": "l2_to_l1_messages", + "type_info": "ByteaArray" + }, + { + "ordinal": 9, + "name": "used_contract_hashes", + "type_info": "Jsonb" + }, + { + "ordinal": 10, + "name": "compressed_initial_writes", + "type_info": "Bytea" + }, + { + "ordinal": 11, + "name": "compressed_repeated_writes", + "type_info": "Bytea" + }, + { + "ordinal": 12, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + }, + { + "ordinal": 13, + "name": "rollup_last_leaf_index", + "type_info": "Int8" + }, + { + "ordinal": 14, + "name": "zkporter_is_available", + "type_info": "Bool" + }, + { + "ordinal": 15, + "name": "bootloader_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "default_aa_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "aux_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pass_through_data_hash", + "type_info": "Bytea" + }, + { + "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 22, + "name": "compressed_state_diffs", + "type_info": "Bytea" + }, + { + "ordinal": 23, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 24, + "name": "events_queue_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 25, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 26, + "name": "pubdata_input", + "type_info": "Bytea" + }, + { + "ordinal": 27, + "name": "blob_id?", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Int4", + "Bool", + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + true, + true, + true, + false + ] + }, + "hash": "a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769" +} diff --git a/core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json b/core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json deleted file mode 100644 index 9ae9d2e50cd..00000000000 --- a/core/lib/dal/.sqlx/query-c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Bytea", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4", - "Int8", - "Int8", - "Int8", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "c4835d40921af47bfb4f60102bbba3af74e8e7b5944cb2943b5badb906167046" -} diff --git a/core/lib/dal/.sqlx/query-70d2f1a59e5e12c4c0c843fe14c674d9dab0ebd6256b0d3c9142a62f99de111d.json b/core/lib/dal/.sqlx/query-ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04.json similarity index 52% rename from core/lib/dal/.sqlx/query-70d2f1a59e5e12c4c0c843fe14c674d9dab0ebd6256b0d3c9142a62f99de111d.json rename to core/lib/dal/.sqlx/query-ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04.json index 898bfd9e5f5..53579e54209 100644 --- a/core/lib/dal/.sqlx/query-70d2f1a59e5e12c4c0c843fe14c674d9dab0ebd6256b0d3c9142a62f99de111d.json +++ b/core/lib/dal/.sqlx/query-ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04.json @@ -1,12 +1,6 @@ { "db_name": "PostgreSQL", -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-70d2f1a59e5e12c4c0c843fe14c674d9dab0ebd6256b0d3c9142a62f99de111d.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", -======== - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", ->>>>>>>> main:core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -145,7 +139,7 @@ "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, "name": "blob_id?", "type_info": "Text" } @@ -188,11 +182,5 @@ false ] }, -<<<<<<<< HEAD:core/lib/dal/.sqlx/query-70d2f1a59e5e12c4c0c843fe14c674d9dab0ebd6256b0d3c9142a62f99de111d.json - "hash": "70d2f1a59e5e12c4c0c843fe14c674d9dab0ebd6256b0d3c9142a62f99de111d" -|||||||| 3fd2fb14e:core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json - "hash": "932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d" -======== - "hash": "b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731" ->>>>>>>> main:core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json + "hash": "ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04" } diff --git a/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json new file mode 100644 index 00000000000..4fe32531a3f --- /dev/null +++ b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "Int8", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "ByteaArray", + "Int8Array", + "Int8Array", + "Bytea", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950" +} From bf93f781e672c318eaa1b5531b7f53f962b1a05b Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 16:28:47 -0300 Subject: [PATCH 25/36] Format code --- core/lib/dal/src/blocks_dal.rs | 90 ++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 32 deletions(-) diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index d444fed76c4..9030214e9b1 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -338,8 +338,10 @@ impl BlocksDal<'_, '_> { data_availability.blob_id AS "blob_id?" FROM l1_batches - LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number = $1 "#, @@ -1006,9 +1008,13 @@ impl BlocksDal<'_, '_> { sqlx::query!( r#" INSERT INTO - commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) + commitments ( + l1_batch_number, + events_queue_commitment, + bootloader_initial_content_commitment + ) VALUES - ($1, $2, $3) + ($1, $2, $3) ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(number.0), @@ -1067,8 +1073,10 @@ impl BlocksDal<'_, '_> { data_availability.blob_id AS "blob_id?" FROM l1_batches - LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number = 0 OR eth_commit_tx_id IS NOT NULL @@ -1100,7 +1108,9 @@ impl BlocksDal<'_, '_> { number FROM l1_batches - LEFT JOIN eth_txs_history AS commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) + LEFT JOIN + eth_txs_history AS commit_tx + ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) WHERE commit_tx.confirmed_at IS NOT NULL ORDER BY @@ -1167,7 +1177,9 @@ impl BlocksDal<'_, '_> { number FROM l1_batches - LEFT JOIN eth_txs_history AS prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) + LEFT JOIN + eth_txs_history AS prove_tx + ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) WHERE prove_tx.confirmed_at IS NOT NULL ORDER BY @@ -1192,7 +1204,9 @@ impl BlocksDal<'_, '_> { number FROM l1_batches - LEFT JOIN eth_txs_history AS execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id) + LEFT JOIN + eth_txs_history AS execute_tx + ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id) WHERE execute_tx.confirmed_at IS NOT NULL ORDER BY @@ -1250,8 +1264,10 @@ impl BlocksDal<'_, '_> { data_availability.blob_id AS "blob_id?" FROM l1_batches - LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NOT NULL AND eth_prove_tx_id IS NULL @@ -1338,7 +1354,7 @@ impl BlocksDal<'_, '_> { ROW_NUMBER() OVER ( ORDER BY number ASC - ) AS ROW_NUMBER + ) AS row_number FROM l1_batches WHERE @@ -1350,10 +1366,10 @@ impl BlocksDal<'_, '_> { LIMIT $2 ) inn - LEFT JOIN commitments ON commitments.l1_batch_number = inn.number - LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number + LEFT JOIN commitments ON commitments.l1_batch_number = inn.number + LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number WHERE - number - ROW_NUMBER = $1 + number - row_number = $1 "#, last_proved_batch_number.0 as i32, limit as i32 @@ -1409,8 +1425,10 @@ impl BlocksDal<'_, '_> { data_availability.blob_id AS "blob_id?" FROM l1_batches - LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL @@ -1482,8 +1500,10 @@ impl BlocksDal<'_, '_> { MAX(l1_batches.number) FROM l1_batches - JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) - JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) + JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id) + JOIN + eth_txs_history AS commit_tx + ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id) WHERE commit_tx.confirmed_at IS NOT NULL AND eth_prove_tx_id IS NOT NULL @@ -1491,7 +1511,7 @@ impl BlocksDal<'_, '_> { AND EXTRACT( EPOCH FROM - commit_tx.confirmed_at + commit_tx.confirmed_at ) < $1 "#, max_l1_batch_timestamp_seconds_bd, @@ -1537,8 +1557,10 @@ impl BlocksDal<'_, '_> { data_availability.blob_id AS "blob_id?" FROM l1_batches - LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE number BETWEEN $1 AND $2 ORDER BY @@ -1604,9 +1626,11 @@ impl BlocksDal<'_, '_> { data_availability.blob_id AS "blob_id?" FROM l1_batches - LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number - JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number + LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number WHERE eth_commit_tx_id IS NULL AND number != 0 @@ -1685,9 +1709,11 @@ impl BlocksDal<'_, '_> { data_availability.blob_id AS "blob_id?" FROM l1_batches - LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number - LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number - JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version + LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number + LEFT JOIN + data_availability + ON data_availability.l1_batch_number = l1_batches.number + JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version WHERE eth_commit_tx_id IS NULL AND number != 0 @@ -1886,7 +1912,7 @@ impl BlocksDal<'_, '_> { bytecode FROM factory_deps - INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number + INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number WHERE miniblocks.l1_batch_number = $1 "#, @@ -2382,7 +2408,7 @@ impl BlocksDal<'_, '_> { value FROM l2_to_l1_logs - JOIN miniblocks ON l2_to_l1_logs.miniblock_number = miniblocks.number + JOIN miniblocks ON l2_to_l1_logs.miniblock_number = miniblocks.number WHERE l1_batch_number = $1 ORDER BY @@ -2480,8 +2506,8 @@ impl BlocksDal<'_, '_> { FROM ( SELECT - UNNEST($1::BIGINT[]) AS number, - UNNEST($2::BYTEA[]) AS logs_bloom + UNNEST($1::BIGINT []) AS number, + UNNEST($2::BYTEA []) AS logs_bloom ) AS data WHERE miniblocks.number = data.number From 2e49a43d6e2f921c5f872a9353f87dd1a53417cb Mon Sep 17 00:00:00 2001 From: Juan Munoz Date: Tue, 8 Oct 2024 17:12:08 -0300 Subject: [PATCH 26/36] query changes --- ...c2563a3e061bcc6505d8c295b1b2517f85f1b.json | 20 ----------------- ...6852faf20600198aa78f66ad8ba9a37ac455f.json | 22 ------------------- ...154185671d13f3b234ce1f16aaeeab54b2a16.json | 16 ++++++++++++++ ...2f75a4055a9c1e7c3b767455df804f09f7d4c.json | 20 +++++++++++++++++ ...1eda9057c7fb71b8f0d96f69fda78662ecec5.json | 20 +++++++++++++++++ ...87ed365c0369f94bc457cb162a5c876a123c7.json | 16 -------------- ...446e93a47622f4c6753c46dd24e8733d4ad4.json} | 4 ++-- ...82aa15db6b572c8bf041399cca6776d6a7a3.json} | 4 ++-- ...3a2976d6c4fd581e66dd27439ae42a04b7f5.json} | 4 ++-- ...d0f52d6a11c0b27aa346a47a1545d2527639.json} | 4 ++-- ...c102d83e3e851a1c291d718f22f71c535109.json} | 4 ++-- ...1835d00426c893355931aeac42d86beba431.json} | 4 ++-- ...2846ec9d5a814df33abe2f9a70861ab6ed94.json} | 4 ++-- ...8505d908840f5e52a3777a0d844cd229480e.json} | 4 ++-- ...39de67f5c24d7a798e29668d36278ed775c0.json} | 4 ++-- ...a4b4e2af48907fa9321064ddb24ac02ab17cd.json | 20 ----------------- ...db71f3457e1619a170683e03ddf0e78abaf29.json | 22 +++++++++++++++++++ ...3c5b416cbb2c1a1e2712dce1f01835181c3b.json} | 4 ++-- ...acdfc5af364d7271c06db0c95e0f1b0e33f9.json} | 4 ++-- ...8242aad3e9a47400f6d6837a35f4c54a216b9.json | 20 ----------------- ...ce3d957afdb5f9809929dbe7a67a4a164e741.json | 20 +++++++++++++++++ 21 files changed, 120 insertions(+), 120 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-026ab7dd7407f10074a2966b5eac2563a3e061bcc6505d8c295b1b2517f85f1b.json delete mode 100644 core/lib/dal/.sqlx/query-13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f.json create mode 100644 core/lib/dal/.sqlx/query-1767de83aa4ef34f080da6d30d6154185671d13f3b234ce1f16aaeeab54b2a16.json create mode 100644 core/lib/dal/.sqlx/query-1c9468bead5b80f209c4e94ab652f75a4055a9c1e7c3b767455df804f09f7d4c.json create mode 100644 core/lib/dal/.sqlx/query-2a4bfb1ed34f0d508352a5f3f761eda9057c7fb71b8f0d96f69fda78662ecec5.json delete mode 100644 core/lib/dal/.sqlx/query-307f15e00a97440868189f25e4487ed365c0369f94bc457cb162a5c876a123c7.json rename core/lib/dal/.sqlx/{query-73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff.json => query-38d7647ec6368e08e3872a6fcbde446e93a47622f4c6753c46dd24e8733d4ad4.json} (88%) rename core/lib/dal/.sqlx/{query-77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8.json => query-4c606e5527402d3a3c9e30bed63082aa15db6b572c8bf041399cca6776d6a7a3.json} (92%) rename core/lib/dal/.sqlx/{query-8903ba5db3f87851c12da133573b4207b69cc48b4ba648e797211631be612b69.json => query-5730de5e37535cf126667b89da6e3a2976d6c4fd581e66dd27439ae42a04b7f5.json} (66%) rename core/lib/dal/.sqlx/{query-a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json => query-60cfe2cbb047f702e181d3ec0018d0f52d6a11c0b27aa346a47a1545d2527639.json} (79%) rename core/lib/dal/.sqlx/{query-680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed.json => query-8813dc03aadba85a7c8d0b1f0f19c102d83e3e851a1c291d718f22f71c535109.json} (88%) rename core/lib/dal/.sqlx/{query-ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04.json => query-8ee904c22462b1c9b57be0af039a1835d00426c893355931aeac42d86beba431.json} (89%) rename core/lib/dal/.sqlx/{query-14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835.json => query-905d3dc2a7757ab78175dbc5fdbd2846ec9d5a814df33abe2f9a70861ab6ed94.json} (89%) rename core/lib/dal/.sqlx/{query-8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4.json => query-ae7dc0868a7d7e1ff5054fb9b2b58505d908840f5e52a3777a0d844cd229480e.json} (83%) rename core/lib/dal/.sqlx/{query-768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986.json => query-b833a1e7984705508d8fb0fad54539de67f5c24d7a798e29668d36278ed775c0.json} (92%) delete mode 100644 core/lib/dal/.sqlx/query-bd51c9d93b103292f5acbdb266ba4b4e2af48907fa9321064ddb24ac02ab17cd.json create mode 100644 core/lib/dal/.sqlx/query-c138d84e1af2e2ef20395201f55db71f3457e1619a170683e03ddf0e78abaf29.json rename core/lib/dal/.sqlx/{query-58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980.json => query-cda64a69c8df8eaf67d09324f8283c5b416cbb2c1a1e2712dce1f01835181c3b.json} (85%) rename core/lib/dal/.sqlx/{query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json => query-cda66c56ab9ff1dcb1b2882a23c4acdfc5af364d7271c06db0c95e0f1b0e33f9.json} (56%) delete mode 100644 core/lib/dal/.sqlx/query-d3b09cbcddf6238b358d32d57678242aad3e9a47400f6d6837a35f4c54a216b9.json create mode 100644 core/lib/dal/.sqlx/query-f551ac609521345b0dbcce65df2ce3d957afdb5f9809929dbe7a67a4a164e741.json diff --git a/core/lib/dal/.sqlx/query-026ab7dd7407f10074a2966b5eac2563a3e061bcc6505d8c295b1b2517f85f1b.json b/core/lib/dal/.sqlx/query-026ab7dd7407f10074a2966b5eac2563a3e061bcc6505d8c295b1b2517f85f1b.json deleted file mode 100644 index d98798241f7..00000000000 --- a/core/lib/dal/.sqlx/query-026ab7dd7407f10074a2966b5eac2563a3e061bcc6505d8c295b1b2517f85f1b.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number\n FROM\n l1_batches\n LEFT JOIN eth_txs_history AS prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id)\n WHERE\n prove_tx.confirmed_at IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "026ab7dd7407f10074a2966b5eac2563a3e061bcc6505d8c295b1b2517f85f1b" -} diff --git a/core/lib/dal/.sqlx/query-13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f.json b/core/lib/dal/.sqlx/query-13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f.json deleted file mode 100644 index 86393dce8ab..00000000000 --- a/core/lib/dal/.sqlx/query-13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(l1_batches.number)\n FROM\n l1_batches\n JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id)\n JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id)\n WHERE\n commit_tx.confirmed_at IS NOT NULL\n AND eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n AND EXTRACT(\n EPOCH\n FROM\n commit_tx.confirmed_at\n ) < $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "max", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Numeric" - ] - }, - "nullable": [ - null - ] - }, - "hash": "13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f" -} diff --git a/core/lib/dal/.sqlx/query-1767de83aa4ef34f080da6d30d6154185671d13f3b234ce1f16aaeeab54b2a16.json b/core/lib/dal/.sqlx/query-1767de83aa4ef34f080da6d30d6154185671d13f3b234ce1f16aaeeab54b2a16.json new file mode 100644 index 00000000000..84fc44f7b5f --- /dev/null +++ b/core/lib/dal/.sqlx/query-1767de83aa4ef34f080da6d30d6154185671d13f3b234ce1f16aaeeab54b2a16.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n commitments (\n l1_batch_number,\n events_queue_commitment,\n bootloader_initial_content_commitment\n )\n VALUES\n ($1, $2, $3)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "1767de83aa4ef34f080da6d30d6154185671d13f3b234ce1f16aaeeab54b2a16" +} diff --git a/core/lib/dal/.sqlx/query-1c9468bead5b80f209c4e94ab652f75a4055a9c1e7c3b767455df804f09f7d4c.json b/core/lib/dal/.sqlx/query-1c9468bead5b80f209c4e94ab652f75a4055a9c1e7c3b767455df804f09f7d4c.json new file mode 100644 index 00000000000..39eaca5a943 --- /dev/null +++ b/core/lib/dal/.sqlx/query-1c9468bead5b80f209c4e94ab652f75a4055a9c1e7c3b767455df804f09f7d4c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number\n FROM\n l1_batches\n LEFT JOIN\n eth_txs_history AS prove_tx\n ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id)\n WHERE\n prove_tx.confirmed_at IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "1c9468bead5b80f209c4e94ab652f75a4055a9c1e7c3b767455df804f09f7d4c" +} diff --git a/core/lib/dal/.sqlx/query-2a4bfb1ed34f0d508352a5f3f761eda9057c7fb71b8f0d96f69fda78662ecec5.json b/core/lib/dal/.sqlx/query-2a4bfb1ed34f0d508352a5f3f761eda9057c7fb71b8f0d96f69fda78662ecec5.json new file mode 100644 index 00000000000..7444f11734a --- /dev/null +++ b/core/lib/dal/.sqlx/query-2a4bfb1ed34f0d508352a5f3f761eda9057c7fb71b8f0d96f69fda78662ecec5.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number\n FROM\n l1_batches\n LEFT JOIN\n eth_txs_history AS commit_tx\n ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id)\n WHERE\n commit_tx.confirmed_at IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "2a4bfb1ed34f0d508352a5f3f761eda9057c7fb71b8f0d96f69fda78662ecec5" +} diff --git a/core/lib/dal/.sqlx/query-307f15e00a97440868189f25e4487ed365c0369f94bc457cb162a5c876a123c7.json b/core/lib/dal/.sqlx/query-307f15e00a97440868189f25e4487ed365c0369f94bc457cb162a5c876a123c7.json deleted file mode 100644 index 6c55bf34eab..00000000000 --- a/core/lib/dal/.sqlx/query-307f15e00a97440868189f25e4487ed365c0369f94bc457cb162a5c876a123c7.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment)\n VALUES\n ($1, $2, $3)\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "307f15e00a97440868189f25e4487ed365c0369f94bc457cb162a5c876a123c7" -} diff --git a/core/lib/dal/.sqlx/query-73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff.json b/core/lib/dal/.sqlx/query-38d7647ec6368e08e3872a6fcbde446e93a47622f4c6753c46dd24e8733d4ad4.json similarity index 88% rename from core/lib/dal/.sqlx/query-73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff.json rename to core/lib/dal/.sqlx/query-38d7647ec6368e08e3872a6fcbde446e93a47622f4c6753c46dd24e8733d4ad4.json index c6c616db7ce..ce61817fcd3 100644 --- a/core/lib/dal/.sqlx/query-73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff.json +++ b/core/lib/dal/.sqlx/query-38d7647ec6368e08e3872a6fcbde446e93a47622f4c6753c46dd24e8733d4ad4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -180,5 +180,5 @@ false ] }, - "hash": "73754563c5e06879304e3a491bd21849f9aad29de043fabb8f781bcb520266ff" + "hash": "38d7647ec6368e08e3872a6fcbde446e93a47622f4c6753c46dd24e8733d4ad4" } diff --git a/core/lib/dal/.sqlx/query-77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8.json b/core/lib/dal/.sqlx/query-4c606e5527402d3a3c9e30bed63082aa15db6b572c8bf041399cca6776d6a7a3.json similarity index 92% rename from core/lib/dal/.sqlx/query-77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8.json rename to core/lib/dal/.sqlx/query-4c606e5527402d3a3c9e30bed63082aa15db6b572c8bf041399cca6776d6a7a3.json index 31a0869a64a..905dd90d9bf 100644 --- a/core/lib/dal/.sqlx/query-77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8.json +++ b/core/lib/dal/.sqlx/query-4c606e5527402d3a3c9e30bed63082aa15db6b572c8bf041399cca6776d6a7a3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -180,5 +180,5 @@ false ] }, - "hash": "77cb08989cff02859eac838cae0b81e3d713e93e16523e111989f0de47e9dad8" + "hash": "4c606e5527402d3a3c9e30bed63082aa15db6b572c8bf041399cca6776d6a7a3" } diff --git a/core/lib/dal/.sqlx/query-8903ba5db3f87851c12da133573b4207b69cc48b4ba648e797211631be612b69.json b/core/lib/dal/.sqlx/query-5730de5e37535cf126667b89da6e3a2976d6c4fd581e66dd27439ae42a04b7f5.json similarity index 66% rename from core/lib/dal/.sqlx/query-8903ba5db3f87851c12da133573b4207b69cc48b4ba648e797211631be612b69.json rename to core/lib/dal/.sqlx/query-5730de5e37535cf126667b89da6e3a2976d6c4fd581e66dd27439ae42a04b7f5.json index 3d47a756f3e..275191d04f5 100644 --- a/core/lib/dal/.sqlx/query-8903ba5db3f87851c12da133573b4207b69cc48b4ba648e797211631be612b69.json +++ b/core/lib/dal/.sqlx/query-5730de5e37535cf126667b89da6e3a2976d6c4fd581e66dd27439ae42a04b7f5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode_hash,\n bytecode\n FROM\n factory_deps\n INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number\n WHERE\n miniblocks.l1_batch_number = $1\n ", + "query": "\n SELECT\n bytecode_hash,\n bytecode\n FROM\n factory_deps\n INNER JOIN miniblocks ON miniblocks.number = factory_deps.miniblock_number\n WHERE\n miniblocks.l1_batch_number = $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ false ] }, - "hash": "8903ba5db3f87851c12da133573b4207b69cc48b4ba648e797211631be612b69" + "hash": "5730de5e37535cf126667b89da6e3a2976d6c4fd581e66dd27439ae42a04b7f5" } diff --git a/core/lib/dal/.sqlx/query-a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json b/core/lib/dal/.sqlx/query-60cfe2cbb047f702e181d3ec0018d0f52d6a11c0b27aa346a47a1545d2527639.json similarity index 79% rename from core/lib/dal/.sqlx/query-a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json rename to core/lib/dal/.sqlx/query-60cfe2cbb047f702e181d3ec0018d0f52d6a11c0b27aa346a47a1545d2527639.json index 0fa6af063d7..d9d5b0666c0 100644 --- a/core/lib/dal/.sqlx/query-a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769.json +++ b/core/lib/dal/.sqlx/query-60cfe2cbb047f702e181d3ec0018d0f52d6a11c0b27aa346a47a1545d2527639.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -184,5 +184,5 @@ false ] }, - "hash": "a46d8b286dd078c905523b34643b0cf8ad6e1046dce9d5e24c1a1eea138d4769" + "hash": "60cfe2cbb047f702e181d3ec0018d0f52d6a11c0b27aa346a47a1545d2527639" } diff --git a/core/lib/dal/.sqlx/query-680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed.json b/core/lib/dal/.sqlx/query-8813dc03aadba85a7c8d0b1f0f19c102d83e3e851a1c291d718f22f71c535109.json similarity index 88% rename from core/lib/dal/.sqlx/query-680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed.json rename to core/lib/dal/.sqlx/query-8813dc03aadba85a7c8d0b1f0f19c102d83e3e851a1c291d718f22f71c535109.json index ffe9e7e39cd..029d477eb6e 100644 --- a/core/lib/dal/.sqlx/query-680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed.json +++ b/core/lib/dal/.sqlx/query-8813dc03aadba85a7c8d0b1f0f19c102d83e3e851a1c291d718f22f71c535109.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -178,5 +178,5 @@ false ] }, - "hash": "680b632b9dd1204d8c024ba03e882c6539a0bdfc0571df9433d5ce97b6a929ed" + "hash": "8813dc03aadba85a7c8d0b1f0f19c102d83e3e851a1c291d718f22f71c535109" } diff --git a/core/lib/dal/.sqlx/query-ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04.json b/core/lib/dal/.sqlx/query-8ee904c22462b1c9b57be0af039a1835d00426c893355931aeac42d86beba431.json similarity index 89% rename from core/lib/dal/.sqlx/query-ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04.json rename to core/lib/dal/.sqlx/query-8ee904c22462b1c9b57be0af039a1835d00426c893355931aeac42d86beba431.json index 53579e54209..5bae808826e 100644 --- a/core/lib/dal/.sqlx/query-ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04.json +++ b/core/lib/dal/.sqlx/query-8ee904c22462b1c9b57be0af039a1835d00426c893355931aeac42d86beba431.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -182,5 +182,5 @@ false ] }, - "hash": "ee2634e9c09cbdbcf039f6c0b6924547e783aab9c9f772eaee3d464b3a671f04" + "hash": "8ee904c22462b1c9b57be0af039a1835d00426c893355931aeac42d86beba431" } diff --git a/core/lib/dal/.sqlx/query-14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835.json b/core/lib/dal/.sqlx/query-905d3dc2a7757ab78175dbc5fdbd2846ec9d5a814df33abe2f9a70861ab6ed94.json similarity index 89% rename from core/lib/dal/.sqlx/query-14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835.json rename to core/lib/dal/.sqlx/query-905d3dc2a7757ab78175dbc5fdbd2846ec9d5a814df33abe2f9a70861ab6ed94.json index fb7aceb0549..14268ad0bb7 100644 --- a/core/lib/dal/.sqlx/query-14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835.json +++ b/core/lib/dal/.sqlx/query-905d3dc2a7757ab78175dbc5fdbd2846ec9d5a814df33abe2f9a70861ab6ed94.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -180,5 +180,5 @@ false ] }, - "hash": "14b6544c4dc13437019c97de62b7636469ed35acab9237802753bd970cda6835" + "hash": "905d3dc2a7757ab78175dbc5fdbd2846ec9d5a814df33abe2f9a70861ab6ed94" } diff --git a/core/lib/dal/.sqlx/query-8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4.json b/core/lib/dal/.sqlx/query-ae7dc0868a7d7e1ff5054fb9b2b58505d908840f5e52a3777a0d844cd229480e.json similarity index 83% rename from core/lib/dal/.sqlx/query-8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4.json rename to core/lib/dal/.sqlx/query-ae7dc0868a7d7e1ff5054fb9b2b58505d908840f5e52a3777a0d844cd229480e.json index b78b7965ac7..45643410361 100644 --- a/core/lib/dal/.sqlx/query-8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4.json +++ b/core/lib/dal/.sqlx/query-ae7dc0868a7d7e1ff5054fb9b2b58505d908840f5e52a3777a0d844cd229480e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN data_availability ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -183,5 +183,5 @@ false ] }, - "hash": "8b5b1ef99c268f752f9c81450a020db9c633cfc433ade18980a9f6d76293aaf4" + "hash": "ae7dc0868a7d7e1ff5054fb9b2b58505d908840f5e52a3777a0d844cd229480e" } diff --git a/core/lib/dal/.sqlx/query-768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986.json b/core/lib/dal/.sqlx/query-b833a1e7984705508d8fb0fad54539de67f5c24d7a798e29668d36278ed775c0.json similarity index 92% rename from core/lib/dal/.sqlx/query-768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986.json rename to core/lib/dal/.sqlx/query-b833a1e7984705508d8fb0fad54539de67f5c24d7a798e29668d36278ed775c0.json index 55009c6bf24..3a89808e85f 100644 --- a/core/lib/dal/.sqlx/query-768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986.json +++ b/core/lib/dal/.sqlx/query-b833a1e7984705508d8fb0fad54539de67f5c24d7a798e29668d36278ed775c0.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS ROW_NUMBER\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - ROW_NUMBER = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n data_availability.blob_id AS \"blob_id?\"\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -181,5 +181,5 @@ false ] }, - "hash": "768a52f988e097b5a09e6349ce65e29ed8ab2042aae39403c66ae0a668a7c986" + "hash": "b833a1e7984705508d8fb0fad54539de67f5c24d7a798e29668d36278ed775c0" } diff --git a/core/lib/dal/.sqlx/query-bd51c9d93b103292f5acbdb266ba4b4e2af48907fa9321064ddb24ac02ab17cd.json b/core/lib/dal/.sqlx/query-bd51c9d93b103292f5acbdb266ba4b4e2af48907fa9321064ddb24ac02ab17cd.json deleted file mode 100644 index 7f1fc9b176c..00000000000 --- a/core/lib/dal/.sqlx/query-bd51c9d93b103292f5acbdb266ba4b4e2af48907fa9321064ddb24ac02ab17cd.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number\n FROM\n l1_batches\n LEFT JOIN eth_txs_history AS commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id)\n WHERE\n commit_tx.confirmed_at IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "bd51c9d93b103292f5acbdb266ba4b4e2af48907fa9321064ddb24ac02ab17cd" -} diff --git a/core/lib/dal/.sqlx/query-c138d84e1af2e2ef20395201f55db71f3457e1619a170683e03ddf0e78abaf29.json b/core/lib/dal/.sqlx/query-c138d84e1af2e2ef20395201f55db71f3457e1619a170683e03ddf0e78abaf29.json new file mode 100644 index 00000000000..0d80845d8f7 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c138d84e1af2e2ef20395201f55db71f3457e1619a170683e03ddf0e78abaf29.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(l1_batches.number)\n FROM\n l1_batches\n JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id)\n JOIN\n eth_txs_history AS commit_tx\n ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id)\n WHERE\n commit_tx.confirmed_at IS NOT NULL\n AND eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n AND EXTRACT(\n EPOCH\n FROM\n commit_tx.confirmed_at\n ) < $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Numeric" + ] + }, + "nullable": [ + null + ] + }, + "hash": "c138d84e1af2e2ef20395201f55db71f3457e1619a170683e03ddf0e78abaf29" +} diff --git a/core/lib/dal/.sqlx/query-58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980.json b/core/lib/dal/.sqlx/query-cda64a69c8df8eaf67d09324f8283c5b416cbb2c1a1e2712dce1f01835181c3b.json similarity index 85% rename from core/lib/dal/.sqlx/query-58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980.json rename to core/lib/dal/.sqlx/query-cda64a69c8df8eaf67d09324f8283c5b416cbb2c1a1e2712dce1f01835181c3b.json index 0b45e2c25c2..69718cebdbe 100644 --- a/core/lib/dal/.sqlx/query-58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980.json +++ b/core/lib/dal/.sqlx/query-cda64a69c8df8eaf67d09324f8283c5b416cbb2c1a1e2712dce1f01835181c3b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n l1_batch_number,\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n JOIN miniblocks ON l2_to_l1_logs.miniblock_number = miniblocks.number\n WHERE\n l1_batch_number = $1\n ORDER BY\n miniblock_number,\n log_index_in_miniblock\n ", + "query": "\n SELECT\n miniblock_number,\n log_index_in_miniblock,\n log_index_in_tx,\n tx_hash,\n l1_batch_number,\n shard_id,\n is_service,\n tx_index_in_miniblock,\n tx_index_in_l1_batch,\n sender,\n key,\n value\n FROM\n l2_to_l1_logs\n JOIN miniblocks ON l2_to_l1_logs.miniblock_number = miniblocks.number\n WHERE\n l1_batch_number = $1\n ORDER BY\n miniblock_number,\n log_index_in_miniblock\n ", "describe": { "columns": [ { @@ -84,5 +84,5 @@ false ] }, - "hash": "58f900812efdb615f6286eb4212c3252680594256dccb0d7e51444f613011980" + "hash": "cda64a69c8df8eaf67d09324f8283c5b416cbb2c1a1e2712dce1f01835181c3b" } diff --git a/core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json b/core/lib/dal/.sqlx/query-cda66c56ab9ff1dcb1b2882a23c4acdfc5af364d7271c06db0c95e0f1b0e33f9.json similarity index 56% rename from core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json rename to core/lib/dal/.sqlx/query-cda66c56ab9ff1dcb1b2882a23c4acdfc5af364d7271c06db0c95e0f1b0e33f9.json index 7582e0f64e7..ec79549adbd 100644 --- a/core/lib/dal/.sqlx/query-29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f.json +++ b/core/lib/dal/.sqlx/query-cda66c56ab9ff1dcb1b2882a23c4acdfc5af364d7271c06db0c95e0f1b0e33f9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE miniblocks\n SET\n logs_bloom = data.logs_bloom\n FROM\n (\n SELECT\n UNNEST($1::BIGINT[]) AS number,\n UNNEST($2::BYTEA[]) AS logs_bloom\n ) AS data\n WHERE\n miniblocks.number = data.number\n ", + "query": "\n UPDATE miniblocks\n SET\n logs_bloom = data.logs_bloom\n FROM\n (\n SELECT\n UNNEST($1::BIGINT []) AS number,\n UNNEST($2::BYTEA []) AS logs_bloom\n ) AS data\n WHERE\n miniblocks.number = data.number\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "29a9350164fc0b2983f753e105a70e583b455383eec526eee3acfe6670e30f2f" + "hash": "cda66c56ab9ff1dcb1b2882a23c4acdfc5af364d7271c06db0c95e0f1b0e33f9" } diff --git a/core/lib/dal/.sqlx/query-d3b09cbcddf6238b358d32d57678242aad3e9a47400f6d6837a35f4c54a216b9.json b/core/lib/dal/.sqlx/query-d3b09cbcddf6238b358d32d57678242aad3e9a47400f6d6837a35f4c54a216b9.json deleted file mode 100644 index 8770a9b596e..00000000000 --- a/core/lib/dal/.sqlx/query-d3b09cbcddf6238b358d32d57678242aad3e9a47400f6d6837a35f4c54a216b9.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number\n FROM\n l1_batches\n LEFT JOIN eth_txs_history AS execute_tx ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id)\n WHERE\n execute_tx.confirmed_at IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "d3b09cbcddf6238b358d32d57678242aad3e9a47400f6d6837a35f4c54a216b9" -} diff --git a/core/lib/dal/.sqlx/query-f551ac609521345b0dbcce65df2ce3d957afdb5f9809929dbe7a67a4a164e741.json b/core/lib/dal/.sqlx/query-f551ac609521345b0dbcce65df2ce3d957afdb5f9809929dbe7a67a4a164e741.json new file mode 100644 index 00000000000..9297d4a9cf8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-f551ac609521345b0dbcce65df2ce3d957afdb5f9809929dbe7a67a4a164e741.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number\n FROM\n l1_batches\n LEFT JOIN\n eth_txs_history AS execute_tx\n ON (l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id)\n WHERE\n execute_tx.confirmed_at IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "f551ac609521345b0dbcce65df2ce3d957afdb5f9809929dbe7a67a4a164e741" +} From 9c612a738a04bb4ff191ec78b985ff5f93a955ee Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:39:14 -0300 Subject: [PATCH 27/36] Remove formatting and configs --- Cargo.toml | 2 +- docker-compose.yml | 3 ++- docs/guides/build-docker.md | 3 +-- etc/env/base/eigen_da_client.toml | 2 -- etc/env/configs/dev_validium.toml | 20 ++++++++++---------- etc/env/file_based/general.yaml | 8 ++++++-- 6 files changed, 20 insertions(+), 18 deletions(-) delete mode 100644 etc/env/base/eigen_da_client.toml diff --git a/Cargo.toml b/Cargo.toml index 31f61f2b2d5..691341f71ba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -185,7 +185,7 @@ tower-http = "0.5.2" tracing = "0.1" tracing-subscriber = "0.3" tracing-opentelemetry = "0.25.0" -time = "0.3.36" # Has to be same as used by `tracing-subscriber` +time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" fraction = "0.15.3" diff --git a/docker-compose.yml b/docker-compose.yml index 0046b0d5db9..1e3a273ec9a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: "3.2" +version: '3.2' services: reth: restart: always @@ -15,6 +15,7 @@ services: command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config + postgres: image: "postgres:14" command: postgres -c 'max_connections=1000' diff --git a/docs/guides/build-docker.md b/docs/guides/build-docker.md index 6b0608275d8..5dd9cff022b 100644 --- a/docs/guides/build-docker.md +++ b/docs/guides/build-docker.md @@ -10,8 +10,7 @@ Install prerequisites: see ## Build docker files -You may build all images with [Makefile](../../docker/Makefile) located in [docker](../../docker) directory in this -repository +You may build all images with [Makefile](../../docker/Makefile) located in [docker](../../docker) directory in this repository > All commands should be run from the root directory of the repository diff --git a/etc/env/base/eigen_da_client.toml b/etc/env/base/eigen_da_client.toml deleted file mode 100644 index 93a1c59fb0a..00000000000 --- a/etc/env/base/eigen_da_client.toml +++ /dev/null @@ -1,2 +0,0 @@ -[eigen_da_client] -api_node_url = "http://127.0.0.1:4242" diff --git a/etc/env/configs/dev_validium.toml b/etc/env/configs/dev_validium.toml index 7b26cfe096f..5ed4ccb38e4 100644 --- a/etc/env/configs/dev_validium.toml +++ b/etc/env/configs/dev_validium.toml @@ -1,21 +1,21 @@ -__imports__ = ["base", "l1-inits/.init.env", "l2-inits/dev_validium.init.env"] +__imports__ = [ "base", "l1-inits/.init.env", "l2-inits/dev_validium.init.env" ] [chain.state_keeper] -compute_overhead_part = 1.0 -pubdata_overhead_part = 0.0 -batch_overhead_l1_gas = 1000000 +compute_overhead_part=1.0 +pubdata_overhead_part=0.0 +batch_overhead_l1_gas=1000000 # This value should be higher for Validium, but now it is not compatible with the current blobs model. # For this config to be set to its proper value we need to fully integrate Validium + Blobs. -max_pubdata_per_batch = 2097152 -fee_model_version = "V2" -l1_batch_commit_data_generator_mode = "Validium" +max_pubdata_per_batch=100000 +fee_model_version="V2" +l1_batch_commit_data_generator_mode="Validium" [eth_sender] -sender_pubdata_sending_mode = "Custom" +sender_pubdata_sending_mode="Custom" # This override will be removed soon but it is needed for now. [eth_sender.gas_adjuster] -max_blob_base_fee = 0 +max_blob_base_fee=0 [_metadata] -base = ['dev.toml'] +base=['dev.toml'] diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 28b421ad915..a4ba8c0201a 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -41,7 +41,7 @@ api: estimate_gas_scale_factor: 1.3 estimate_gas_acceptable_overestimation: 5000 max_tx_size: 1000000 - api_namespaces: [en, eth, net, web3, zks, pubsub, debug] + api_namespaces: [ en,eth,net,web3,zks,pubsub,debug ] state_keeper: transaction_slots: 8192 max_allowed_l2_tx_gas_limit: 15000000000 @@ -104,7 +104,7 @@ eth: aggregated_block_execute_deadline: 10 timestamp_criteria_max_allowed_lag: 30 max_eth_tx_data_size: 120000 - aggregated_proof_sizes: [1] + aggregated_proof_sizes: [ 1 ] max_aggregated_tx_gas: 15000000 max_acceptable_priority_fee_in_gwei: 100000000000 pubdata_sending_mode: BLOBS @@ -121,6 +121,7 @@ eth: confirmations_for_eth_event: 0 eth_node_poll_interval: 300 + snapshot_creator: object_store: file_backed: @@ -129,6 +130,7 @@ snapshot_creator: concurrent_queries_count: 25 storage_logs_chunk_size: 1000000 + prover: prover_object_store: file_backed: @@ -287,6 +289,7 @@ prover_job_monitor: witness_job_queuer_run_interval_ms: 10000 http_port: 3074 + base_token_adjuster: price_polling_interval_ms: 30000 price_cache_update_interval_ms: 2000 @@ -298,6 +301,7 @@ external_price_api_client: forced_numerator: 3 forced_denominator: 2 + house_keeper: l1_batch_metrics_reporting_interval_ms: 10000 From d6c4c26f2c6ea36118b5c7d6094ce11fb05a93d1 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:40:10 -0300 Subject: [PATCH 28/36] Update contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 80ab7a821dd..85b4b49b73a 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 80ab7a821ddab19a04867d3437ed1064f70b53a2 +Subproject commit 85b4b49b73a573cae1444b50aae8a46c4ad03bd7 From 22da3329c5093f43d320cf5a7b72e3ed3ceb5ee4 Mon Sep 17 00:00:00 2001 From: Juan Munoz Date: Tue, 8 Oct 2024 18:03:56 -0300 Subject: [PATCH 29/36] add max blob size to doc --- eigenda-integration.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/eigenda-integration.md b/eigenda-integration.md index fe18f806404..2ff5c1ae4b9 100644 --- a/eigenda-integration.md +++ b/eigenda-integration.md @@ -18,10 +18,16 @@ da_client: eigenda-proxy: image: ghcr.io/layr-labs/eigenda-proxy ports: - - '4242:4242' + - "4242:4242" command: ./eigenda-proxy --addr 0.0.0.0 --port 4242 --memstore.enabled --eigenda-max-blob-length "2MiB" ``` +3. (optional) for using pubdata with 2MiB (as per specification), modify general config: + +```yaml +max_pubdata_per_batch: 2097152 +``` + ## Local Setup 1. Install `zk_inception` & `zk_supervisor` From 24256c8f3cd4aefd4c22c392473a1df8306db9e3 Mon Sep 17 00:00:00 2001 From: juan518munoz <62400508+juan518munoz@users.noreply.github.com> Date: Tue, 8 Oct 2024 18:48:07 -0300 Subject: [PATCH 30/36] Update eigenda-integration.md --- eigenda-integration.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eigenda-integration.md b/eigenda-integration.md index 2ff5c1ae4b9..4750f6783d2 100644 --- a/eigenda-integration.md +++ b/eigenda-integration.md @@ -4,7 +4,7 @@ Changes needed both for local and mainnet/testnet setup. -1. Add `da_client` to general config: +1. Add `da_client` to `etc/env/file_based/general.yaml`: ```yaml da_client: @@ -22,7 +22,7 @@ eigenda-proxy: command: ./eigenda-proxy --addr 0.0.0.0 --port 4242 --memstore.enabled --eigenda-max-blob-length "2MiB" ``` -3. (optional) for using pubdata with 2MiB (as per specification), modify general config: +3. (optional) for using pubdata with 2MiB (as per specification), modify `etc/env/file_based/general.yaml`: ```yaml max_pubdata_per_batch: 2097152 From 65d3a4ea940f9774856506721600b988c66f88f4 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Thu, 10 Oct 2024 10:30:41 -0300 Subject: [PATCH 31/36] Format code --- core/bin/zksync_server/src/node_builder.rs | 5 +- core/lib/contracts/src/lib.rs | 8 +- core/node/da_clients/src/blob_info.rs | 86 ++++++++++++------- core/node/da_clients/src/lib.rs | 2 +- .../layers/da_clients/eigen_da.rs | 11 ++- eigenda-integration.md | 2 +- .../forge_interface/deploy_ecosystem/input.rs | 3 +- 7 files changed, 76 insertions(+), 41 deletions(-) diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e5c95d431fa..c38c6b2ac1d 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -524,7 +524,10 @@ impl MainNodeBuilder { .add_layer(ObjectStorageClientWiringLayer::new(config)); } (DAClient::EigenDA(config), _) => { - self.node.add_layer(EigenDAWiringLayer::new(config, self.contracts_config.eigenda_verifier_addr.unwrap())); + self.node.add_layer(EigenDAWiringLayer::new( + config, + self.contracts_config.eigenda_verifier_addr.unwrap(), + )); } } diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 551179d57df..e3544b0c439 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -64,10 +64,8 @@ const LOADNEXT_CONTRACT_FILE: &str = "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/LoadnextContract.json"; const LOADNEXT_SIMPLE_CONTRACT_FILE: &str = "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/Foo.json"; -const EIGENDA_VERIFIER_CONTRACT_FILE: (&str, &str) = ( - "eigenda", - "EigendaVerifier.sol/EigendaVerifier.json", -); +const EIGENDA_VERIFIER_CONTRACT_FILE: (&str, &str) = + ("eigenda", "EigendaVerifier.sol/EigendaVerifier.json"); fn home_path() -> PathBuf { Workspace::locate().core() } @@ -167,7 +165,7 @@ pub fn verifier_contract() -> Contract { } pub fn eigenda_verifier_contract() -> Contract { - load_contract_for_both_compilers(EIGENDA_VERIFIER_CONTRACT_FILE) + load_contract_for_both_compilers(EIGENDA_VERIFIER_CONTRACT_FILE) } #[derive(Debug, Clone)] diff --git a/core/node/da_clients/src/blob_info.rs b/core/node/da_clients/src/blob_info.rs index 8460fc399fd..f25900836ca 100644 --- a/core/node/da_clients/src/blob_info.rs +++ b/core/node/da_clients/src/blob_info.rs @@ -1,10 +1,9 @@ -use rlp::Decodable; -use rlp::DecoderError; -use rlp::Rlp; -use zksync_types::web3::contract::Tokenizable; -use zksync_types::web3::contract::Tokenize; -use zksync_types::ethabi::Token; -use zksync_types::U256; +use rlp::{Decodable, DecoderError, Rlp}; +use zksync_types::{ + ethabi::Token, + web3::contract::{Tokenizable, Tokenize}, + U256, +}; #[derive(Debug)] pub struct G1Commitment { @@ -14,8 +13,8 @@ pub struct G1Commitment { impl Decodable for G1Commitment { fn decode(rlp: &Rlp) -> Result { - let x: Vec = rlp.val_at(0)?; // Decode first element as Vec - let y: Vec = rlp.val_at(1)?; // Decode second element as Vec + let x: Vec = rlp.val_at(0)?; // Decode first element as Vec + let y: Vec = rlp.val_at(1)?; // Decode second element as Vec Ok(G1Commitment { x, y }) } @@ -23,7 +22,6 @@ impl Decodable for G1Commitment { impl Tokenize for G1Commitment { fn into_tokens(self) -> Vec { - let x = Token::Uint(U256::from_big_endian(&self.x)); let y = Token::Uint(U256::from_big_endian(&self.y)); @@ -36,7 +34,7 @@ pub struct BlobQuorumParam { pub quorum_number: u32, pub adversary_threshold_percentage: u32, pub confirmation_threshold_percentage: u32, - pub chunk_length: u32 + pub chunk_length: u32, } impl Decodable for BlobQuorumParam { @@ -52,13 +50,19 @@ impl Decodable for BlobQuorumParam { impl Tokenize for BlobQuorumParam { fn into_tokens(self) -> Vec { - let quorum_number = Token::Uint(U256::from(self.quorum_number)); - let adversary_threshold_percentage = Token::Uint(U256::from(self.adversary_threshold_percentage)); - let confirmation_threshold_percentage = Token::Uint(U256::from(self.confirmation_threshold_percentage)); + let adversary_threshold_percentage = + Token::Uint(U256::from(self.adversary_threshold_percentage)); + let confirmation_threshold_percentage = + Token::Uint(U256::from(self.confirmation_threshold_percentage)); let chunk_length = Token::Uint(U256::from(self.chunk_length)); - vec![quorum_number, adversary_threshold_percentage,confirmation_threshold_percentage,chunk_length] + vec![ + quorum_number, + adversary_threshold_percentage, + confirmation_threshold_percentage, + chunk_length, + ] } } @@ -66,7 +70,7 @@ impl Tokenize for BlobQuorumParam { pub struct BlobHeader { pub commitment: G1Commitment, pub data_length: u32, - pub blob_quorum_params: Vec + pub blob_quorum_params: Vec, } impl Decodable for BlobHeader { @@ -87,9 +91,17 @@ impl Tokenize for BlobHeader { fn into_tokens(self) -> Vec { let commitment = self.commitment.into_tokens(); let data_length = Token::Uint(U256::from(self.data_length)); - let blob_quorum_params = self.blob_quorum_params.into_iter().map(|quorum| Token::Tuple(quorum.into_tokens())).collect(); - - vec![Token::Tuple(commitment), data_length,Token::Array(blob_quorum_params)] + let blob_quorum_params = self + .blob_quorum_params + .into_iter() + .map(|quorum| Token::Tuple(quorum.into_tokens())) + .collect(); + + vec![ + Token::Tuple(commitment), + data_length, + Token::Array(blob_quorum_params), + ] } } @@ -98,7 +110,7 @@ pub struct BatchHeader { pub batch_root: Vec, pub quorum_numbers: Vec, pub quorum_signed_percentages: Vec, - pub reference_block_number: u32 + pub reference_block_number: u32, } impl Decodable for BatchHeader { @@ -119,7 +131,12 @@ impl Tokenize for BatchHeader { let quorum_signed_percentages = self.quorum_signed_percentages.into_token(); let reference_block_number = Token::Uint(U256::from(self.reference_block_number)); - vec![batch_root, quorum_numbers,quorum_signed_percentages,reference_block_number] + vec![ + batch_root, + quorum_numbers, + quorum_signed_percentages, + reference_block_number, + ] } } @@ -129,7 +146,7 @@ pub struct BatchMetadata { pub signatory_record_hash: Vec, pub fee: Vec, pub confirmation_block_number: u32, - pub batch_header_hash: Vec + pub batch_header_hash: Vec, } impl Decodable for BatchMetadata { @@ -152,7 +169,11 @@ impl Tokenize for BatchMetadata { let signatory_record_hash = Token::FixedBytes(self.signatory_record_hash); let confirmation_block_number = Token::Uint(U256::from(self.confirmation_block_number)); - vec![Token::Tuple(batch_header), signatory_record_hash,confirmation_block_number] + vec![ + Token::Tuple(batch_header), + signatory_record_hash, + confirmation_block_number, + ] } } @@ -162,7 +183,7 @@ pub struct BlobVerificationProof { pub blob_index: u32, pub batch_medatada: BatchMetadata, pub inclusion_proof: Vec, - pub quorum_indexes: Vec + pub quorum_indexes: Vec, } impl Decodable for BlobVerificationProof { @@ -185,14 +206,20 @@ impl Tokenize for BlobVerificationProof { let inclusion_proof = self.inclusion_proof.into_token(); let quorum_indexes = self.quorum_indexes.into_token(); - vec![batch_id, blob_index,Token::Tuple(batch_medatada),inclusion_proof,quorum_indexes] + vec![ + batch_id, + blob_index, + Token::Tuple(batch_medatada), + inclusion_proof, + quorum_indexes, + ] } } #[derive(Debug)] pub struct BlobInfo { pub blob_header: BlobHeader, - pub blob_verification_proof: BlobVerificationProof + pub blob_verification_proof: BlobVerificationProof, } impl Decodable for BlobInfo { @@ -212,8 +239,9 @@ impl Tokenize for BlobInfo { let blob_header = self.blob_header.into_tokens(); let blob_verification_proof = self.blob_verification_proof.into_tokens(); - vec![Token::Tuple(vec![Token::Tuple(blob_header),Token::Tuple(blob_verification_proof)])] + vec![Token::Tuple(vec![ + Token::Tuple(blob_header), + Token::Tuple(blob_verification_proof), + ])] } } - - diff --git a/core/node/da_clients/src/lib.rs b/core/node/da_clients/src/lib.rs index 07bac46e923..e4cf987d257 100644 --- a/core/node/da_clients/src/lib.rs +++ b/core/node/da_clients/src/lib.rs @@ -1,5 +1,5 @@ pub mod avail; +pub mod blob_info; pub mod eigen_da; pub mod no_da; pub mod object_store; -pub mod blob_info; diff --git a/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs b/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs index f29f4e1d312..1365e06f66e 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/eigen_da.rs @@ -5,7 +5,9 @@ use zksync_node_framework_derive::FromContext; use zksync_types::Address; use crate::{ - implementations::resources::{da_client::DAClientResource, eth_interface::EthInterfaceResource}, + implementations::resources::{ + da_client::DAClientResource, eth_interface::EthInterfaceResource, + }, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; @@ -18,7 +20,10 @@ pub struct EigenDAWiringLayer { impl EigenDAWiringLayer { pub fn new(config: EigenDAConfig, verifier_address: Address) -> Self { - Self { config, verifier_address } + Self { + config, + verifier_address, + } } } @@ -46,7 +51,7 @@ impl WiringLayer for EigenDAWiringLayer { async fn wire(self, input: Self::Input) -> Result { let EthInterfaceResource(query_client) = input.eth_client; let client: Box = - Box::new(EigenDAClient::new(self.config,query_client, self.verifier_address).await?); + Box::new(EigenDAClient::new(self.config, query_client, self.verifier_address).await?); Ok(Self::Output { client: DAClientResource(client), diff --git a/eigenda-integration.md b/eigenda-integration.md index 4750f6783d2..f5606512f96 100644 --- a/eigenda-integration.md +++ b/eigenda-integration.md @@ -18,7 +18,7 @@ da_client: eigenda-proxy: image: ghcr.io/layr-labs/eigenda-proxy ports: - - "4242:4242" + - '4242:4242' command: ./eigenda-proxy --addr 0.0.0.0 --port 4242 --memstore.enabled --eigenda-max-blob-length "2MiB" ``` diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 8f083fe6195..f338158b60a 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -54,7 +54,8 @@ impl Default for InitialDeploymentConfig { // toml crate u64 support is backed by i64 implementation // https://github.com/toml-rs/toml/issues/705 bridgehub_create_new_chain_salt: rand::thread_rng().gen_range(0..=i64::MAX) as u64, - eigen_service_manager: Address::from_str("0x0000000000000000000000000000000000000000").unwrap(), + eigen_service_manager: Address::from_str("0x0000000000000000000000000000000000000000") + .unwrap(), } } } From 70aab5c215510f9fd15d5f433e77a8851f3647db Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Thu, 10 Oct 2024 10:30:57 -0300 Subject: [PATCH 32/36] Update contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 9f98e7bb897..424aca7d33a 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 9f98e7bb8973c59f280c4bf177fdafff8cbfb477 +Subproject commit 424aca7d33a102fe99267def947956730cc7b052 From 2b7fb1a1eea5580598784ecfb3c18ad0e55749fd Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:00:17 -0300 Subject: [PATCH 33/36] Complete merge --- Cargo.lock | 284 +++++++++++++++++---- core/bin/zksync_server/src/node_builder.rs | 2 +- zk_toolbox/Cargo.lock | 15 +- zk_toolbox/crates/zk_supervisor/Cargo.toml | 2 +- 4 files changed, 240 insertions(+), 63 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ac134d1286c..92b38ca7d26 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -252,12 +252,43 @@ dependencies = [ "term", ] +[[package]] +name = "assert-json-diff" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "assert_matches" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + [[package]] name = "async-channel" version = "2.3.1" @@ -294,6 +325,21 @@ dependencies = [ "futures-lite", ] +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", +] + [[package]] name = "async-io" version = "2.3.4" @@ -335,13 +381,22 @@ dependencies = [ "futures-lite", ] +[[package]] +name = "async-object-pool" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "333c456b97c3f2d50604e8b2624253b7f787208cb72eb75e64b0ad11b221652c" +dependencies = [ + "async-std", +] + [[package]] name = "async-process" version = "2.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8a07789659a4d385b79b18b9127fc27e1a59e1e89117c78c5ea3b806f016374" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-io", "async-lock", "async-signal", @@ -384,6 +439,34 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "async-std" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io", + "async-lock", + "async-process", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers 0.3.0", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -635,6 +718,17 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "basic-cookies" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67bd8fd42c16bdb08688243dc5f0cc117a3ca9efeeaba3a345a18a6159ad96f7" +dependencies = [ + "lalrpop", + "lalrpop-util", + "regex", +] + [[package]] name = "basic-toml" version = "0.1.4" @@ -918,7 +1012,7 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-task", "futures-io", "futures-lite", @@ -2089,18 +2183,18 @@ dependencies = [ [[package]] name = "derive_more" -version = "1.0.0-beta.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7abbfc297053be59290e3152f8cbcd52c8642e0728b69ee187d991d4c1af08d" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "1.0.0-beta.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bba3e9872d7c58ce7ef0fcf1844fcc3e23ef2a58377b50df35dd98e42a5726e" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -2760,6 +2854,12 @@ dependencies = [ "yansi", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "event-listener" version = "4.0.3" @@ -2827,18 +2927,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ff_ce" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" -dependencies = [ - "byteorder", - "hex", - "rand 0.4.6", - "serde", -] - [[package]] name = "fiat-crypto" version = "0.2.3" @@ -3150,7 +3238,7 @@ version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" dependencies = [ - "gloo-timers", + "gloo-timers 0.2.6", "send_wrapper 0.4.0", ] @@ -3295,6 +3383,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "gloo-utils" version = "0.2.0" @@ -3693,6 +3793,34 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "httpmock" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08ec9586ee0910472dec1a1f0f8acf52f0fdde93aea74d70d4a3107b4be0fd5b" +dependencies = [ + "assert-json-diff", + "async-object-pool", + "async-std", + "async-trait", + "base64 0.21.5", + "basic-cookies", + "crossbeam-utils", + "form_urlencoded", + "futures-util", + "hyper 0.14.29", + "lazy_static", + "levenshtein", + "log", + "regex", + "serde", + "serde_json", + "serde_regex", + "similar", + "tokio", + "url", +] + [[package]] name = "hyper" version = "0.14.29" @@ -4410,6 +4538,15 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + [[package]] name = "lalrpop" version = "0.20.2" @@ -4422,6 +4559,7 @@ dependencies = [ "itertools 0.11.0", "lalrpop-util", "petgraph", + "pico-args", "regex", "regex-syntax 0.8.2", "string_cache", @@ -4461,6 +4599,12 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" +[[package]] +name = "levenshtein" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" + [[package]] name = "libc" version = "0.2.155" @@ -4638,6 +4782,9 @@ name = "log" version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +dependencies = [ + "value-bag", +] [[package]] name = "logos" @@ -5636,6 +5783,12 @@ dependencies = [ "siphasher 0.3.11", ] +[[package]] +name = "pico-args" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" + [[package]] name = "pin-project" version = "1.1.3" @@ -5986,7 +6139,7 @@ checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" dependencies = [ "bytes", "heck 0.4.1", - "itertools 0.10.5", + "itertools 0.11.0", "log", "multimap", "once_cell", @@ -6007,7 +6160,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.11.0", "proc-macro2 1.0.86", "quote 1.0.36", "syn 2.0.72", @@ -7343,6 +7496,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_regex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8136f1a4ea815d7eac4101cfd0b16dc0cb5e1fe1b8609dfd728058656b7badf" +dependencies = [ + "regex", + "serde", +] + [[package]] name = "serde_spanned" version = "0.6.8" @@ -7621,7 +7784,7 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-executor", "async-fs", "async-io", @@ -7693,7 +7856,7 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5496f2d116b7019a526b1039ec2247dd172b8670633b1a64a614c9ea12c9d8c7" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-lock", "base64 0.21.5", "blake2-rfc", @@ -9318,6 +9481,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "value-bag" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" + [[package]] name = "vcpkg" version = "0.2.15" @@ -10307,6 +10476,7 @@ dependencies = [ "ethabi", "hex", "num_enum 0.7.2", + "secrecy", "serde", "serde_json", "serde_with", @@ -10412,9 +10582,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" +checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" dependencies = [ "anyhow", "once_cell", @@ -10449,9 +10619,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c409ae915056cf9cadd9304dbc8718fa38edfcb346d06e5b3582dcd2489ef9" +checksum = "a1e7199c07aa14d9c3319839b98ad0496aac6e72327e70ded77ddb66329766db" dependencies = [ "anyhow", "async-trait", @@ -10471,20 +10641,18 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" +checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" dependencies = [ "anyhow", "blst", "ed25519-dalek", "elliptic-curve 0.13.8", - "ff_ce", "hex", "k256 0.13.3", "num-bigint 0.4.6", "num-traits", - "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", "thiserror", @@ -10494,9 +10662,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b018b8a76fc2cbecb51683ce97532501c45d44cbc8bb856d1956e5998259335" +checksum = "db07f7329b29737d8fd6860b350c809ae1b56ad53e26a7d0eddf3664ccb9dacb" dependencies = [ "anyhow", "async-trait", @@ -10516,9 +10684,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5bb2988e41af3083cebfc11f47f2615adae8d829bf9237aa084dede9629a687" +checksum = "a89a2d60db1ccd41438d29724a8d0d57fcf9506eb4443ea4b9205fd78c9c8e59" dependencies = [ "anyhow", "async-trait", @@ -10552,9 +10720,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" +checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" dependencies = [ "anyhow", "bit-vec", @@ -10574,9 +10742,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9dbcb923fa201af03f49f70c11a923b416915d2ddf8b2de3a2e861f22898a4" +checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" dependencies = [ "anyhow", "async-trait", @@ -10594,9 +10762,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" +checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" dependencies = [ "anyhow", "rand 0.8.5", @@ -10931,8 +11099,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "tokio", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -10942,6 +11110,7 @@ dependencies = [ "anyhow", "async-recursion", "async-trait", + "test-log", "thiserror", "tokio", "tracing", @@ -10957,7 +11126,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.24.0" +version = "24.28.0" dependencies = [ "anyhow", "assert_matches", @@ -11020,6 +11189,7 @@ dependencies = [ "bigdecimal", "chrono", "fraction", + "httpmock", "rand 0.8.5", "reqwest 0.12.5", "serde", @@ -11251,7 +11421,6 @@ dependencies = [ "once_cell", "pretty_assertions", "thiserror", - "tokio", "tracing", "vise", "zk_evm 0.131.0-rc.2", @@ -11298,6 +11467,7 @@ dependencies = [ "tower-http", "tracing", "vise", + "zk_evm 0.150.5", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -11327,7 +11497,6 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "hex", "rand 0.8.5", "secrecy", "semver", @@ -11361,6 +11530,7 @@ dependencies = [ "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_executor", "zksync_vm_interface", "zksync_web3_decl", ] @@ -11630,9 +11800,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" +checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" dependencies = [ "anyhow", "bit-vec", @@ -11651,9 +11821,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" +checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" dependencies = [ "anyhow", "heck 0.5.0", @@ -11854,6 +12024,7 @@ dependencies = [ "hex", "itertools 0.10.5", "once_cell", + "rand 0.8.5", "tempfile", "test-casing", "thiserror", @@ -11989,7 +12160,7 @@ dependencies = [ "bincode", "blake2 0.10.6", "chrono", - "derive_more 1.0.0-beta.6", + "derive_more 1.0.0", "hex", "itertools 0.10.5", "num", @@ -12000,6 +12171,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tokio", @@ -12066,8 +12238,8 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "enum_dispatch", "primitive-types", @@ -12078,8 +12250,8 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "primitive-types", ] @@ -12109,6 +12281,7 @@ dependencies = [ "assert_matches", "async-trait", "hex", + "pretty_assertions", "serde", "serde_json", "thiserror", @@ -12131,6 +12304,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "serde", + "serde_json", "tempfile", "test-casing", "tokio", diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index c38c6b2ac1d..3cb147ebf1b 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -523,7 +523,7 @@ impl MainNodeBuilder { self.node .add_layer(ObjectStorageClientWiringLayer::new(config)); } - (DAClient::EigenDA(config), _) => { + (DAClientConfig::EigenDA(config), _) => { self.node.add_layer(EigenDAWiringLayer::new( config, self.contracts_config.eigenda_verifier_addr.unwrap(), diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 297ef404698..5dbe3089d0a 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -5165,8 +5165,9 @@ dependencies = [ [[package]] name = "sqruff-lib" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "676775189e83a98fc603d59fc6d760a66895d511502a538081dac993fde1a09a" dependencies = [ "ahash", "anstyle", @@ -5199,8 +5200,9 @@ dependencies = [ [[package]] name = "sqruff-lib-core" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48ec5ba65376ae9ba3e3dda153668dcb6452a7212ee7b4c9d48e053eb4f0f3fa" dependencies = [ "ahash", "enum_dispatch", @@ -5219,8 +5221,9 @@ dependencies = [ [[package]] name = "sqruff-lib-dialects" -version = "0.18.2" -source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00fa1cd168dad593f8f6996d805acc1fd52c6d0ad0f6f5847a9cc22a6198cfc2" dependencies = [ "ahash", "itertools 0.13.0", diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index 158abe4e2ec..d343e7af43e 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -29,4 +29,4 @@ futures.workspace = true types.workspace = true serde_yaml.workspace = true zksync_basic_types.workspace = true -sqruff-lib = { git = "https://github.com/quarylabs/sqruff", version = "0.18.2" } +sqruff-lib = "0.19.0" From b35a7d82c4b7c18155944be43569b9c6881e2444 Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Thu, 10 Oct 2024 16:33:58 -0300 Subject: [PATCH 34/36] Send blob info to L1 --- Cargo.lock | 1 + contracts | 2 +- core/lib/l1_contract_interface/Cargo.toml | 1 + .../src/i_executor/structures/blob_info.rs | 340 ++++++++++++++++++ .../structures/commit_batch_info.rs | 25 +- .../src/i_executor/structures/mod.rs | 1 + core/node/da_clients/src/eigen_da.rs | 2 +- 7 files changed, 362 insertions(+), 10 deletions(-) create mode 100644 core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs diff --git a/Cargo.lock b/Cargo.lock index 92b38ca7d26..02bb3189bad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11298,6 +11298,7 @@ version = "0.1.0" dependencies = [ "hex", "once_cell", + "rlp", "serde", "serde_json", "serde_with", diff --git a/contracts b/contracts index 424aca7d33a..3adf2e24b93 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 424aca7d33a102fe99267def947956730cc7b052 +Subproject commit 3adf2e24b9301dd954fc30ca441cdfc875c8d674 diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index 8b68df854e7..4eab89faa5c 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -23,6 +23,7 @@ sha2.workspace = true sha3.workspace = true hex.workspace = true once_cell.workspace = true +rlp.workspace = true [dev-dependencies] serde.workspace = true diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs new file mode 100644 index 00000000000..e30e96da904 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/blob_info.rs @@ -0,0 +1,340 @@ +use rlp::{Decodable, DecoderError, Rlp}; +use zksync_types::{ + blob, + ethabi::Token, + web3::contract::{Tokenizable, Tokenize}, + U256, +}; + +#[derive(Debug)] +pub struct G1Commitment { + pub x: Vec, + pub y: Vec, +} + +impl G1Commitment { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.x.len().to_be_bytes()); + bytes.extend(&self.x); + bytes.extend(&self.y.len().to_be_bytes()); + bytes.extend(&self.y); + + bytes + } +} + +impl Decodable for G1Commitment { + fn decode(rlp: &Rlp) -> Result { + let x: Vec = rlp.val_at(0)?; // Decode first element as Vec + let y: Vec = rlp.val_at(1)?; // Decode second element as Vec + + Ok(G1Commitment { x, y }) + } +} + +impl Tokenize for G1Commitment { + fn into_tokens(self) -> Vec { + let x = Token::Uint(U256::from_big_endian(&self.x)); + let y = Token::Uint(U256::from_big_endian(&self.y)); + + vec![x, y] + } +} + +#[derive(Debug)] +pub struct BlobQuorumParam { + pub quorum_number: u32, + pub adversary_threshold_percentage: u32, + pub confirmation_threshold_percentage: u32, + pub chunk_length: u32, +} + +impl BlobQuorumParam { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.quorum_number.to_be_bytes()); + bytes.extend(&self.adversary_threshold_percentage.to_be_bytes()); + bytes.extend(&self.confirmation_threshold_percentage.to_be_bytes()); + bytes.extend(&self.chunk_length.to_be_bytes()); + + bytes + } +} + +impl Decodable for BlobQuorumParam { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobQuorumParam { + quorum_number: rlp.val_at(0)?, + adversary_threshold_percentage: rlp.val_at(1)?, + confirmation_threshold_percentage: rlp.val_at(2)?, + chunk_length: rlp.val_at(3)?, + }) + } +} + +impl Tokenize for BlobQuorumParam { + fn into_tokens(self) -> Vec { + let quorum_number = Token::Uint(U256::from(self.quorum_number)); + let adversary_threshold_percentage = + Token::Uint(U256::from(self.adversary_threshold_percentage)); + let confirmation_threshold_percentage = + Token::Uint(U256::from(self.confirmation_threshold_percentage)); + let chunk_length = Token::Uint(U256::from(self.chunk_length)); + + vec![ + quorum_number, + adversary_threshold_percentage, + confirmation_threshold_percentage, + chunk_length, + ] + } +} + +#[derive(Debug)] +pub struct BlobHeader { + pub commitment: G1Commitment, + pub data_length: u32, + pub blob_quorum_params: Vec, +} + +impl BlobHeader { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(self.commitment.into_bytes()); + bytes.extend(&self.data_length.to_be_bytes()); + bytes.extend(&self.blob_quorum_params.len().to_be_bytes()); + + for quorum in &self.blob_quorum_params { + bytes.extend(quorum.into_bytes()); + } + + bytes + } +} + +impl Decodable for BlobHeader { + fn decode(rlp: &Rlp) -> Result { + let commitment: G1Commitment = rlp.val_at(0)?; + let data_length: u32 = rlp.val_at(1)?; + let blob_quorum_params: Vec = rlp.list_at(2)?; + + Ok(BlobHeader { + commitment, + data_length, + blob_quorum_params, + }) + } +} + +impl Tokenize for BlobHeader { + fn into_tokens(self) -> Vec { + let commitment = self.commitment.into_tokens(); + let data_length = Token::Uint(U256::from(self.data_length)); + let blob_quorum_params = self + .blob_quorum_params + .into_iter() + .map(|quorum| Token::Tuple(quorum.into_tokens())) + .collect(); + + vec![ + Token::Tuple(commitment), + data_length, + Token::Array(blob_quorum_params), + ] + } +} + +#[derive(Debug)] +pub struct BatchHeader { + pub batch_root: Vec, + pub quorum_numbers: Vec, + pub quorum_signed_percentages: Vec, + pub reference_block_number: u32, +} + +impl BatchHeader { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.batch_root.len().to_be_bytes()); + bytes.extend(&self.batch_root); + bytes.extend(&self.quorum_numbers.len().to_be_bytes()); + bytes.extend(&self.quorum_numbers); + bytes.extend(&self.quorum_signed_percentages.len().to_be_bytes()); + bytes.extend(&self.quorum_signed_percentages); + bytes.extend(&self.reference_block_number.to_be_bytes()); + + bytes + } +} + +impl Decodable for BatchHeader { + fn decode(rlp: &Rlp) -> Result { + Ok(BatchHeader { + batch_root: rlp.val_at(0)?, + quorum_numbers: rlp.val_at(1)?, + quorum_signed_percentages: rlp.val_at(2)?, + reference_block_number: rlp.val_at(3)?, + }) + } +} + +impl Tokenize for BatchHeader { + fn into_tokens(self) -> Vec { + let batch_root = Token::FixedBytes(self.batch_root); + let quorum_numbers = self.quorum_numbers.into_token(); + let quorum_signed_percentages = self.quorum_signed_percentages.into_token(); + let reference_block_number = Token::Uint(U256::from(self.reference_block_number)); + + vec![ + batch_root, + quorum_numbers, + quorum_signed_percentages, + reference_block_number, + ] + } +} + +#[derive(Debug)] +pub struct BatchMetadata { + pub batch_header: BatchHeader, + pub signatory_record_hash: Vec, + pub fee: Vec, + pub confirmation_block_number: u32, + pub batch_header_hash: Vec, +} + +impl BatchMetadata { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(self.batch_header.into_bytes()); + bytes.extend(&self.signatory_record_hash); + bytes.extend(&self.confirmation_block_number.to_be_bytes()); + + bytes + } +} + +impl Decodable for BatchMetadata { + fn decode(rlp: &Rlp) -> Result { + let batch_header: BatchHeader = rlp.val_at(0)?; + + Ok(BatchMetadata { + batch_header, + signatory_record_hash: rlp.val_at(1)?, + fee: rlp.val_at(2)?, + confirmation_block_number: rlp.val_at(3)?, + batch_header_hash: rlp.val_at(4)?, + }) + } +} + +impl Tokenize for BatchMetadata { + fn into_tokens(self) -> Vec { + let batch_header = self.batch_header.into_tokens(); + let signatory_record_hash = Token::FixedBytes(self.signatory_record_hash); + let confirmation_block_number = Token::Uint(U256::from(self.confirmation_block_number)); + + vec![ + Token::Tuple(batch_header), + signatory_record_hash, + confirmation_block_number, + ] + } +} + +#[derive(Debug)] +pub struct BlobVerificationProof { + pub batch_id: u32, + pub blob_index: u32, + pub batch_medatada: BatchMetadata, + pub inclusion_proof: Vec, + pub quorum_indexes: Vec, +} + +impl BlobVerificationProof { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + bytes.extend(&self.batch_id.to_be_bytes()); + bytes.extend(&self.blob_index.to_be_bytes()); + bytes.extend(self.batch_medatada.into_bytes()); + bytes.extend(&self.inclusion_proof.len().to_be_bytes()); + bytes.extend(&self.inclusion_proof); + bytes.extend(&self.quorum_indexes.len().to_be_bytes()); + bytes.extend(&self.quorum_indexes); + + bytes + } +} + +impl Decodable for BlobVerificationProof { + fn decode(rlp: &Rlp) -> Result { + Ok(BlobVerificationProof { + batch_id: rlp.val_at(0)?, + blob_index: rlp.val_at(1)?, + batch_medatada: rlp.val_at(2)?, + inclusion_proof: rlp.val_at(3)?, + quorum_indexes: rlp.val_at(4)?, + }) + } +} + +impl Tokenize for BlobVerificationProof { + fn into_tokens(self) -> Vec { + let batch_id = Token::Uint(U256::from(self.batch_id)); + let blob_index = Token::Uint(U256::from(self.blob_index)); + let batch_medatada = self.batch_medatada.into_tokens(); + let inclusion_proof = self.inclusion_proof.into_token(); + let quorum_indexes = self.quorum_indexes.into_token(); + + vec![ + batch_id, + blob_index, + Token::Tuple(batch_medatada), + inclusion_proof, + quorum_indexes, + ] + } +} + +#[derive(Debug)] +pub struct BlobInfo { + pub blob_header: BlobHeader, + pub blob_verification_proof: BlobVerificationProof, +} + +impl BlobInfo { + pub fn into_bytes(&self) -> Vec { + let mut bytes = vec![]; + let blob_header_bytes = self.blob_header.into_bytes(); + bytes.extend(blob_header_bytes.len().to_be_bytes()); + bytes.extend(blob_header_bytes); + let blob_verification_proof_bytes = self.blob_verification_proof.into_bytes(); + bytes.extend(blob_verification_proof_bytes); + bytes + } +} + +impl Decodable for BlobInfo { + fn decode(rlp: &Rlp) -> Result { + let blob_header: BlobHeader = rlp.val_at(0)?; + let blob_verification_proof: BlobVerificationProof = rlp.val_at(1)?; + + Ok(BlobInfo { + blob_header, + blob_verification_proof, + }) + } +} + +impl Tokenize for BlobInfo { + fn into_tokens(self) -> Vec { + let blob_header = self.blob_header.into_tokens(); + let blob_verification_proof = self.blob_verification_proof.into_tokens(); + + vec![Token::Tuple(vec![ + Token::Tuple(blob_header), + Token::Tuple(blob_verification_proof), + ])] + } +} diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index cf2b3a0d089..6136f20663f 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -1,3 +1,4 @@ +use rlp::decode; use zksync_types::{ commitment::{ pre_boojum_serialize_commitments, serialize_commitments, L1BatchCommitmentMode, @@ -9,6 +10,7 @@ use zksync_types::{ ProtocolVersionId, U256, }; +use super::blob_info::BlobInfo; use crate::{ i_executor::commit::kzg::{KzgInfo, ZK_SYNC_BYTES_PER_BLOB}, Tokenizable, @@ -217,14 +219,21 @@ impl Tokenizable for CommitBatchInfo<'_> { } (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { let mut operator_da_input = vec![PUBDATA_SOURCE_CUSTOM]; - operator_da_input.extend( - &self - .l1_batch_with_metadata - .metadata - .da_blob_id - .clone() - .unwrap_or_default(), - ); + let commitment = &self + .l1_batch_with_metadata + .metadata + .da_blob_id + .clone() + .unwrap_or_default(); + + let blob_info: BlobInfo = match decode(commitment) { + Ok(info) => info, + Err(_) => return Token::Tuple(vec![]), + }; + + /*operator_da_input.extend( + &blob_info.into_bytes(), + );*/ operator_da_input } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index d1ed57e41f2..ce0f203841c 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -1,5 +1,6 @@ //! Structures exposed by the `IExecutor.sol`. +mod blob_info; mod commit_batch_info; mod stored_batch_info; diff --git a/core/node/da_clients/src/eigen_da.rs b/core/node/da_clients/src/eigen_da.rs index fcdcde9192d..ee58159cd2e 100644 --- a/core/node/da_clients/src/eigen_da.rs +++ b/core/node/da_clients/src/eigen_da.rs @@ -84,7 +84,7 @@ impl DataAvailabilityClient for EigenDAClient { .map_err(to_non_retriable_da_error)? .to_vec(); - self.verify_blob(hex::encode(request_id.clone())).await?; + //self.verify_blob(hex::encode(request_id.clone())).await?; Ok(types::DispatchResponse { blob_id: hex::encode(request_id), From 672c843ace50294579af27a9b727fd6490e1413b Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:06:58 -0300 Subject: [PATCH 35/36] Add blob info --- contracts | 2 +- .../src/i_executor/structures/commit_batch_info.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/contracts b/contracts index 3adf2e24b93..3e1b3c26cad 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 3adf2e24b9301dd954fc30ca441cdfc875c8d674 +Subproject commit 3e1b3c26cad6e667a2058f347e6d91b1c704f866 diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 6136f20663f..3a5d25bab4f 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -226,14 +226,14 @@ impl Tokenizable for CommitBatchInfo<'_> { .clone() .unwrap_or_default(); - let blob_info: BlobInfo = match decode(commitment) { + let data = &hex::decode(commitment).unwrap()[3..]; + + let blob_info: BlobInfo = match decode(data) { Ok(info) => info, Err(_) => return Token::Tuple(vec![]), }; - /*operator_da_input.extend( - &blob_info.into_bytes(), - );*/ + operator_da_input.extend(&blob_info.into_bytes()); operator_da_input } From f8eac20f8e07b334746a5f2d5559cc75fdc31f7a Mon Sep 17 00:00:00 2001 From: Gianbelinche <39842759+gianbelinche@users.noreply.github.com> Date: Thu, 10 Oct 2024 17:25:39 -0300 Subject: [PATCH 36/36] Update contracts --- contracts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts b/contracts index 3e1b3c26cad..b3e1673b962 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 3e1b3c26cad6e667a2058f347e6d91b1c704f866 +Subproject commit b3e1673b962453dca5ad481dbf16474bd7f78e45