From f13648cf10c0a225dc7b4f64cb3b8195c1a52814 Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Mon, 13 Nov 2023 12:22:36 +0100 Subject: [PATCH 001/115] fix(boojnet): various boojnet fixes (#462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - sanity checks for some system log values - bug fixed that real proofs can be sent before batches are committed - commitment is set only by full tree - fix eth_watcher index out of range for EOA-controlled upgrades ## Why ❔ bug fixes ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: “perekopskiy” Co-authored-by: Lyova Potyomkin Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- core/lib/constants/src/lib.rs | 2 + core/lib/constants/src/system_logs.rs | 5 + core/lib/dal/sqlx-data.json | 112 +++++++++++------- core/lib/dal/src/blocks_dal.rs | 78 ++++++++---- .../vm_latest/tracers/pubdata_tracer.rs | 1 + core/lib/types/src/commitment.rs | 34 +++++- core/lib/types/src/protocol_version.rs | 20 ++-- .../zksync_core/src/eth_sender/aggregator.rs | 8 ++ 8 files changed, 185 insertions(+), 75 deletions(-) create mode 100644 core/lib/constants/src/system_logs.rs diff --git a/core/lib/constants/src/lib.rs b/core/lib/constants/src/lib.rs index 8baf3548d4b..6aab79ad71f 100644 --- a/core/lib/constants/src/lib.rs +++ b/core/lib/constants/src/lib.rs @@ -4,6 +4,7 @@ pub mod crypto; pub mod ethereum; pub mod fees; pub mod system_context; +pub mod system_logs; pub mod trusted_slots; pub use blocks::*; @@ -12,4 +13,5 @@ pub use crypto::*; pub use ethereum::*; pub use fees::*; pub use system_context::*; +pub use system_logs::*; pub use trusted_slots::*; diff --git a/core/lib/constants/src/system_logs.rs b/core/lib/constants/src/system_logs.rs new file mode 100644 index 00000000000..081670f0cb5 --- /dev/null +++ b/core/lib/constants/src/system_logs.rs @@ -0,0 +1,5 @@ +/// The key of the system log with value of the L2->L1 logs tree root hash +pub const L2_TO_L1_LOGS_TREE_ROOT_KEY: u32 = 0; + +/// The key of the system log with value of the state diff hash +pub const STATE_DIFF_HASH_KEY: u32 = 2; diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 79758c4274f..b69e48b178e 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -298,6 +298,26 @@ }, "query": "\n WITH events_select AS (\n SELECT\n address, topic1, topic2, topic3, topic4, value,\n miniblock_number, tx_hash, tx_index_in_block,\n event_index_in_block, event_index_in_tx\n FROM events\n WHERE miniblock_number > $1\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n )\n SELECT miniblocks.hash as \"block_hash?\",\n address as \"address!\", topic1 as \"topic1!\", topic2 as \"topic2!\", topic3 as \"topic3!\", topic4 as \"topic4!\", value as \"value!\",\n miniblock_number as \"miniblock_number!\", miniblocks.l1_batch_number as \"l1_batch_number?\", tx_hash as \"tx_hash!\",\n tx_index_in_block as \"tx_index_in_block!\", event_index_in_block as \"event_index_in_block!\", event_index_in_tx as \"event_index_in_tx!\"\n FROM events_select\n INNER JOIN miniblocks ON events_select.miniblock_number = miniblocks.number\n ORDER BY miniblock_number ASC, event_index_in_block ASC\n " }, + "06d90ea65c1e06bd871f090a0fb0e8772ea5e923f1da5310bedd8dc90e0827f4": { + "describe": { + "columns": [ + { + "name": "eth_commit_tx_id", + "ordinal": 0, + "type_info": "Int4" + } + ], + "nullable": [ + true + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT eth_commit_tx_id FROM l1_batches WHERE number = $1" + }, "07310d96fc7e258154ad510684e33d196907ebd599e926d305e5ef9f26afa2fa": { "describe": { "columns": [ @@ -320,6 +340,30 @@ }, "query": "INSERT INTO eth_txs_history (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at) VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3) RETURNING id" }, + "09768b376996b96add16a02d1a59231cb9b525cd5bd19d22a76149962d4c91c2": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bytea", + "Bool", + "Bytea", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int8" + ] + } + }, + "query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, compressed_repeated_writes = $3, compressed_initial_writes = $4, l2_l1_compressed_messages = $5, l2_l1_merkle_root = $6, zkporter_is_available = $7, parent_hash = $8, rollup_last_leaf_index = $9, pass_through_data_hash = $10, meta_parameters_hash = $11, compressed_state_diffs = $12, updated_at = now() WHERE number = $13 AND hash IS NULL" + }, "0c212f47b9a0e719f947a419be8284837b1b01aa23994ba6401b420790b802b8": { "describe": { "columns": [], @@ -1942,6 +1986,20 @@ }, "query": "SELECT recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash\n FROM protocol_versions\n WHERE id = $1\n " }, + "21c29846f4253081057b86cc1b7ce4ef3ae618c5561c876502dc7f4e773ee91e": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Bytea" + ] + } + }, + "query": "INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) VALUES ($1, $2, $3) ON CONFLICT (l1_batch_number) DO NOTHING" + }, "22b57675a726d9cfeb82a60ba50c36cab1548d197ea56a7658d3f005df07c60b": { "describe": { "columns": [ @@ -5653,20 +5711,6 @@ }, "query": "\n UPDATE leaf_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN leaf_aggregation_witness_jobs lawj ON prover_jobs.l1_batch_number = lawj.l1_batch_number\n WHERE lawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 0\n GROUP BY prover_jobs.l1_batch_number, lawj.number_of_basic_circuits\n HAVING COUNT(*) = lawj.number_of_basic_circuits)\n RETURNING l1_batch_number;\n " }, - "68a9ba78f60674bc047e4af6eb2a379725da047f2e6c06bce96a33852565cc95": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Bytea" - ] - } - }, - "query": "INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) VALUES ($1, $2, $3) ON CONFLICT (l1_batch_number) DO UPDATE SET events_queue_commitment = $2, bootloader_initial_content_commitment = $3" - }, "6939e766e122458b2ac618d19b2759c4a7298ef72b81e8c3957e0a5cf35c9552": { "describe": { "columns": [ @@ -9736,6 +9780,20 @@ }, "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE bytecode_hash = ANY($1)" }, + "c0904ee4179531cfb9d458a17f753085dc2ed957b30a89119d7534112add3876": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Bytea" + ] + } + }, + "query": "UPDATE l1_batches SET commitment = $2, aux_data_hash = $3, updated_at = now() WHERE number = $1" + }, "c178e1574d2a16cb90bcc5d5333a4f8dd2a69e0c12b4e7e108a8dcc6000669a5": { "describe": { "columns": [ @@ -10731,32 +10789,6 @@ }, "query": "\n UPDATE witness_inputs\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now()\n WHERE l1_batch_number = (\n SELECT l1_batch_number\n FROM witness_inputs\n WHERE l1_batch_number <= $3\n AND\n ( status = 'queued'\n OR (status = 'in_progress' AND processing_started_at < now() - $1::interval)\n OR (status = 'failed' AND attempts < $2)\n )\n AND protocol_version = ANY($4)\n ORDER BY l1_batch_number ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING witness_inputs.*\n " }, - "e03756d19dfdf4cdffa81154e690dc7c36024dad5363e0c5440606a5a50eef53": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Bool", - "Bytea", - "Int8", - "Bytea", - "Bytea", - "Bytea", - "Bytea", - "Int8" - ] - } - }, - "query": "UPDATE l1_batches SET hash = $1, merkle_root_hash = $2, commitment = $3, compressed_repeated_writes = $4, compressed_initial_writes = $5, l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, compressed_state_diffs = $14, updated_at = now() WHERE number = $15 AND hash IS NULL" - }, "e05a8c74653afc78c892ddfd08e60ab040d2b2f7c4b5ee110988eac2dd0dd90d": { "describe": { "columns": [ diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index a74b2654927..082f7318e4e 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -587,16 +587,15 @@ impl BlocksDal<'_, '_> { let update_result = sqlx::query!( "UPDATE l1_batches \ - SET hash = $1, merkle_root_hash = $2, commitment = $3, \ - compressed_repeated_writes = $4, compressed_initial_writes = $5, \ - l2_l1_compressed_messages = $6, l2_l1_merkle_root = $7, \ - zkporter_is_available = $8, parent_hash = $9, rollup_last_leaf_index = $10, \ - aux_data_hash = $11, pass_through_data_hash = $12, meta_parameters_hash = $13, \ - compressed_state_diffs = $14, updated_at = now() \ - WHERE number = $15 AND hash IS NULL", + SET hash = $1, merkle_root_hash = $2, \ + compressed_repeated_writes = $3, compressed_initial_writes = $4, \ + l2_l1_compressed_messages = $5, l2_l1_merkle_root = $6, \ + zkporter_is_available = $7, parent_hash = $8, rollup_last_leaf_index = $9, \ + pass_through_data_hash = $10, meta_parameters_hash = $11, \ + compressed_state_diffs = $12, updated_at = now() \ + WHERE number = $13 AND hash IS NULL", metadata.root_hash.as_bytes(), metadata.merkle_root_hash.as_bytes(), - metadata.commitment.as_bytes(), metadata.repeated_writes_compressed, metadata.initial_writes_compressed, metadata.l2_l1_messages_compressed, @@ -604,7 +603,6 @@ impl BlocksDal<'_, '_> { metadata.block_meta_params.zkporter_is_available, previous_root_hash.as_bytes(), metadata.rollup_last_leaf_index as i64, - metadata.aux_data_hash.as_bytes(), metadata.pass_through_data_hash.as_bytes(), metadata.meta_parameters_hash.as_bytes(), metadata.state_diffs_compressed, @@ -616,21 +614,38 @@ impl BlocksDal<'_, '_> { .execute(transaction.conn()) .await?; - sqlx::query!( - "INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) \ - VALUES ($1, $2, $3) \ - ON CONFLICT (l1_batch_number) DO UPDATE SET events_queue_commitment = $2, bootloader_initial_content_commitment = $3", - number.0 as i64, - metadata.events_queue_commitment.map(|h| h.0.to_vec()), - metadata - .bootloader_initial_content_commitment - .map(|h| h.0.to_vec()), - ) - .instrument("save_batch_commitments") - .with_arg("number", &number) - .report_latency() - .execute(transaction.conn()) - .await?; + if metadata.events_queue_commitment.is_some() { + // Save `commitment`, `aux_data_hash`, `events_queue_commitment`, `bootloader_initial_content_commitment`. + sqlx::query!( + "INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) \ + VALUES ($1, $2, $3) \ + ON CONFLICT (l1_batch_number) DO NOTHING", + number.0 as i64, + metadata.events_queue_commitment.map(|h| h.0.to_vec()), + metadata + .bootloader_initial_content_commitment + .map(|h| h.0.to_vec()), + ) + .instrument("save_batch_commitments") + .with_arg("number", &number) + .report_latency() + .execute(transaction.conn()) + .await?; + + sqlx::query!( + "UPDATE l1_batches \ + SET commitment = $2, aux_data_hash = $3, updated_at = now() \ + WHERE number = $1", + number.0 as i64, + metadata.commitment.as_bytes(), + metadata.aux_data_hash.as_bytes(), + ) + .instrument("save_batch_aux_commitment") + .with_arg("number", &number) + .report_latency() + .execute(transaction.conn()) + .await?; + } if update_result.rows_affected() == 0 { tracing::debug!( @@ -739,6 +754,21 @@ impl BlocksDal<'_, '_> { Ok(L1BatchNumber(row.number as u32)) } + pub async fn get_eth_commit_tx_id( + &mut self, + l1_batch_number: L1BatchNumber, + ) -> sqlx::Result> { + let row = sqlx::query!( + "SELECT eth_commit_tx_id FROM l1_batches \ + WHERE number = $1", + l1_batch_number.0 as i64 + ) + .fetch_optional(self.storage.conn()) + .await?; + + Ok(row.and_then(|row| row.eth_commit_tx_id.map(|n| n as u64))) + } + /// Returns the number of the last L1 batch for which an Ethereum prove tx was sent and confirmed. pub async fn get_number_of_last_l1_batch_proven_on_eth( &mut self, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index af2745b68b0..59a9d8eb452 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -196,6 +196,7 @@ impl VmTracer for PubdataTracer { // Apply the pubdata to the current memory let mut memory_to_apply = vec![]; + apply_pubdata_to_memory(&mut memory_to_apply, pubdata_input); state.memory.populate_page( BOOTLOADER_HEAP_PAGE as usize, diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 296d3ea87b6..8f2a4620a86 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -7,11 +7,14 @@ //! transactions, thus the calculations are done separately and asynchronously. use serde::{Deserialize, Serialize}; +use zksync_utils::u256_to_h256; use std::{collections::HashMap, convert::TryFrom}; use zksync_mini_merkle_tree::MiniMerkleTree; -use zksync_system_constants::ZKPORTER_IS_AVAILABLE; +use zksync_system_constants::{ + L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY, ZKPORTER_IS_AVAILABLE, +}; use crate::{ block::L1BatchHeader, @@ -353,6 +356,22 @@ impl L1BatchAuxiliaryOutput { events_state_queue_hash: H256, protocol_version: ProtocolVersionId, ) -> Self { + let state_diff_hash_from_logs = system_logs.iter().find_map(|log| { + if log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY.into()) { + Some(log.0.value) + } else { + None + } + }); + + let merke_tree_root_from_logs = system_logs.iter().find_map(|log| { + if log.0.key == u256_to_h256(L2_TO_L1_LOGS_TREE_ROOT_KEY.into()) { + Some(log.0.value) + } else { + None + } + }); + let ( l2_l1_logs_compressed, initial_writes_compressed, @@ -403,6 +422,19 @@ impl L1BatchAuxiliaryOutput { let l2_l1_logs_merkle_root = MiniMerkleTree::new(merkle_tree_leaves, Some(min_tree_size)).merkle_root(); + if !system_logs.is_empty() { + assert_eq!( + state_diffs_hash, + state_diff_hash_from_logs.unwrap(), + "State diff hash mismatch" + ); + assert_eq!( + l2_l1_logs_merkle_root, + merke_tree_root_from_logs.unwrap(), + "L2 L1 logs tree root mismatch" + ); + } + Self { l2_l1_logs_compressed, initial_writes_compressed, diff --git a/core/lib/types/src/protocol_version.rs b/core/lib/types/src/protocol_version.rs index fa7a07e9d6f..5047a035002 100644 --- a/core/lib/types/src/protocol_version.rs +++ b/core/lib/types/src/protocol_version.rs @@ -274,18 +274,19 @@ impl TryFrom for ProtocolUpgrade { ParamType::Uint(256), // version id ParamType::Address, // allow list address ])], - &init_calldata[4..], + init_calldata + .get(4..) + .ok_or(crate::ethabi::Error::InvalidData)?, )?; - let mut decoded = match decoded.remove(0) { - Token::Tuple(x) => x, - _ => unreachable!(), + let Token::Tuple(mut decoded) = decoded.remove(0) else { + unreachable!(); }; - let mut transaction = match decoded.remove(0) { - Token::Tuple(x) => x, - _ => unreachable!(), + let Token::Tuple(mut transaction) = decoded.remove(0) else { + unreachable!() }; + let factory_deps = decoded.remove(0).into_array().unwrap(); let tx = { @@ -399,9 +400,8 @@ impl TryFrom for ProtocolUpgrade { let default_account_code_hash = H256::from_slice(&decoded.remove(0).into_fixed_bytes().unwrap()); let verifier_address = decoded.remove(0).into_address().unwrap(); - let mut verifier_params = match decoded.remove(0) { - Token::Tuple(tx) => tx, - _ => unreachable!(), + let Token::Tuple(mut verifier_params) = decoded.remove(0) else { + unreachable!() }; let recursion_node_level_vk_hash = H256::from_slice(&verifier_params.remove(0).into_fixed_bytes().unwrap()); diff --git a/core/lib/zksync_core/src/eth_sender/aggregator.rs b/core/lib/zksync_core/src/eth_sender/aggregator.rs index 92a2cb324d8..9b6cd1d16ce 100644 --- a/core/lib/zksync_core/src/eth_sender/aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/aggregator.rs @@ -234,6 +234,14 @@ impl Aggregator { .await .unwrap(); let batch_to_prove = previous_proven_batch_number + 1; + + // Return `None` if batch is not committed yet. + storage + .blocks_dal() + .get_eth_commit_tx_id(batch_to_prove) + .await + .unwrap()?; + if let Some(version_id) = storage .blocks_dal() .get_batch_protocol_version_id(batch_to_prove) From c4a12b156cab8b84d8d55fbd2c5a8531d97ad0f0 Mon Sep 17 00:00:00 2001 From: CrytoInsight <150222426+CrytoInsight@users.noreply.github.com> Date: Mon, 13 Nov 2023 20:15:57 +0800 Subject: [PATCH 002/115] chore: the errors in the document have been corrected (#442) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - fixed document ## Why ❔ - fixed document ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Igor Aleksanov --- core/tests/ts-integration/tests/system.test.ts | 2 +- .../contracts/custom-account/custom-paymaster.sol | 2 +- .../contracts/custom-account/interfaces/IPaymaster.sol | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/tests/ts-integration/tests/system.test.ts b/core/tests/ts-integration/tests/system.test.ts index f1727d952c5..0eaf8c23b46 100644 --- a/core/tests/ts-integration/tests/system.test.ts +++ b/core/tests/ts-integration/tests/system.test.ts @@ -380,7 +380,7 @@ export interface TransactionData { // is to be passed to account and any changes to its structure // would mean a breaking change to these accounts. In order to prevent this, // we should keep some fields as "reserved". - // It is also recommneded that their length is fixed, since + // It is also recommended that their length is fixed, since // it would allow easier proof integration (in case we will need // some special circuit for preprocessing transactions). reserved: BigNumberish[]; diff --git a/etc/contracts-test-data/contracts/custom-account/custom-paymaster.sol b/etc/contracts-test-data/contracts/custom-account/custom-paymaster.sol index 743412c95b8..af9fec30f7d 100644 --- a/etc/contracts-test-data/contracts/custom-account/custom-paymaster.sol +++ b/etc/contracts-test-data/contracts/custom-account/custom-paymaster.sol @@ -56,7 +56,7 @@ contract CustomPaymaster is IPaymaster { bool success = _transaction.payToTheBootloader(); require(success, "Failed to transfer funds to the bootloader"); - // For now, refunds are not supported, so we just test the fact that the transfered context is correct + // For now, refunds are not supported, so we just test the fact that the transferred context is correct txCounter += 1; context = abi.encode(txCounter); } else { diff --git a/etc/contracts-test-data/contracts/custom-account/interfaces/IPaymaster.sol b/etc/contracts-test-data/contracts/custom-account/interfaces/IPaymaster.sol index cf5ced94878..1bd5b81f32b 100644 --- a/etc/contracts-test-data/contracts/custom-account/interfaces/IPaymaster.sol +++ b/etc/contracts-test-data/contracts/custom-account/interfaces/IPaymaster.sol @@ -37,7 +37,7 @@ interface IPaymaster { /// @param _context, the context of the execution, returned by the "validateAndPayForPaymasterTransaction" method. /// @param _transaction, the users' transaction. /// @param _txResult, the result of the transaction execution (success or failure). - /// @param _maxRefundedGas, the upper bound on the amout of gas that could be refunded to the paymaster. + /// @param _maxRefundedGas, the upper bound on the amount of gas that could be refunded to the paymaster. /// @dev The exact amount refunded depends on the gas spent by the "postOp" itself and so the developers should /// take that into account. function postTransaction( From 664ce33622af220a24360f7f11a52a14141c3fdc Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 13 Nov 2023 15:50:52 +0200 Subject: [PATCH 003/115] fix(metadata-calculator): Save commitment for pre-boojum (#481) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Saves commitment for pre-boojum batches unconditionally ## Why ❔ They don't have `events_queue_commitment` but `commitment` should be saved anyway ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/dal/src/blocks_dal.rs | 3 ++- core/lib/zksync_core/src/eth_sender/tests.rs | 1 + core/lib/zksync_core/src/metadata_calculator/updater.rs | 7 ++++++- core/lib/zksync_core/src/state_keeper/io/tests/mod.rs | 7 ++++++- core/lib/zksync_core/src/sync_layer/tests.rs | 7 ++++++- 5 files changed, 21 insertions(+), 4 deletions(-) diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 082f7318e4e..8f73c84d3b1 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -582,6 +582,7 @@ impl BlocksDal<'_, '_> { number: L1BatchNumber, metadata: &L1BatchMetadata, previous_root_hash: H256, + protocol_version: ProtocolVersionId, ) -> anyhow::Result<()> { let mut transaction = self.storage.start_transaction().await?; @@ -614,7 +615,7 @@ impl BlocksDal<'_, '_> { .execute(transaction.conn()) .await?; - if metadata.events_queue_commitment.is_some() { + if metadata.events_queue_commitment.is_some() || protocol_version.is_pre_boojum() { // Save `commitment`, `aux_data_hash`, `events_queue_commitment`, `bootloader_initial_content_commitment`. sqlx::query!( "INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) \ diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index 5c7a1cba678..e15d8e91eff 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -897,6 +897,7 @@ async fn insert_l1_batch(tester: &EthSenderTester, number: L1BatchNumber) -> L1B header.number, &default_l1_batch_metadata(), Default::default(), + Default::default(), ) .await .unwrap(); diff --git a/core/lib/zksync_core/src/metadata_calculator/updater.rs b/core/lib/zksync_core/src/metadata_calculator/updater.rs index a1f58e7350f..08652300434 100644 --- a/core/lib/zksync_core/src/metadata_calculator/updater.rs +++ b/core/lib/zksync_core/src/metadata_calculator/updater.rs @@ -167,7 +167,12 @@ impl TreeUpdater { let save_postgres_latency = METRICS.start_stage(TreeUpdateStage::SavePostgres); storage .blocks_dal() - .save_l1_batch_metadata(l1_batch_number, &metadata, previous_root_hash) + .save_l1_batch_metadata( + l1_batch_number, + &metadata, + previous_root_hash, + header.protocol_version.unwrap(), + ) .await .unwrap(); // ^ Note that `save_l1_batch_metadata()` will not blindly overwrite changes if L1 batch diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index e70964c4957..634483e8421 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -353,7 +353,12 @@ async fn test_miniblock_and_l1_batch_processing( // Save metadata for the genesis L1 batch so that we don't hang in `seal_l1_batch`. let metadata = create_l1_batch_metadata(0); conn.blocks_dal() - .save_l1_batch_metadata(L1BatchNumber(0), &metadata, H256::zero()) + .save_l1_batch_metadata( + L1BatchNumber(0), + &metadata, + H256::zero(), + ProtocolVersionId::latest(), + ) .await .unwrap(); drop(conn); diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index e454c0098e1..71d98fb6f73 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -411,7 +411,12 @@ async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { let metadata = create_l1_batch_metadata(number); storage .blocks_dal() - .save_l1_batch_metadata(L1BatchNumber(1), &metadata, H256::zero()) + .save_l1_batch_metadata( + L1BatchNumber(1), + &metadata, + H256::zero(), + ProtocolVersionId::latest(), + ) .await .unwrap(); break; From 580cada003bdfe2fff686a1fc3ce001b4959aa4d Mon Sep 17 00:00:00 2001 From: "Ramon \"9Tails\" Canales" Date: Mon, 13 Nov 2023 14:13:14 +0000 Subject: [PATCH 004/115] feat(hyperchain): Adding prover related commands to zk stack (#440) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add commands to zk stack to setup a prover (for now only CPU, it blocks the GPU option), and adds the necessary components to the compose file ## Why ❔ "Without a prover, a hyperchain is nothing more than a even worst OP rollup" - Canales, Ramon ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .gitignore | 3 + .../docker-compose-hyperchain-template | 116 ++++----- infrastructure/zk/src/hyperchain_wizard.ts | 246 +++++++++++++++--- infrastructure/zk/src/index.ts | 2 + infrastructure/zk/src/init.ts | 19 +- infrastructure/zk/src/prover_setup.ts | 235 +++++++++++++++++ prover/prover_fri/README.md | 4 +- yarn.lock | 185 +++++++++---- 8 files changed, 648 insertions(+), 162 deletions(-) create mode 100644 infrastructure/zk/src/prover_setup.ts diff --git a/.gitignore b/.gitignore index 2da2c3d986d..eff8079e75d 100644 --- a/.gitignore +++ b/.gitignore @@ -71,4 +71,7 @@ cache-zk/ zksolc verified_sources +# Hyperchain related hyperchain-*.yml +/etc/hyperchains/prover-keys +/etc/hyperchains/artifacts diff --git a/etc/hyperchains/docker-compose-hyperchain-template b/etc/hyperchains/docker-compose-hyperchain-template index 3666770d531..00cb0ebc2a7 100644 --- a/etc/hyperchains/docker-compose-hyperchain-template +++ b/etc/hyperchains/docker-compose-hyperchain-template @@ -3,24 +3,27 @@ networks: zkstack: driver: bridge volumes: - prover_artifacts: - server_artifacts: + artifacts: services: - zkstack_core: + zkstack-core: networks: - zkstack image: {{orgName}}/server-v2:latest - command: ["--components", "tree_new,eth,data_fetcher,state_keeper,housekeeper"] + command: ["--components", "tree_new,eth,data_fetcher,state_keeper,housekeeper,proof_data_handler"] + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:3071/health"] + interval: 10s + timeout: 5s + retries: 10 env_file: - {{envFilePath}} environment: ZKSYNC_HOME: / ports: # assumes default ports in .env - # - "3312:3312" # prometheus metrics # we need a separate metrics port for each component - - "3075:3075" # proof_data_handler api + #- "3312:3312" # prometheus metrics # we need a separate metrics port for each component + - "3320:3320" # proof_data_handler api volumes: - - prover_artifacts:/etc_prover_artifacts - - server_artifacts:/etc_server_artifacts + - artifacts:{{artifactsPath}} zkstack-apis: networks: - zkstack @@ -44,101 +47,88 @@ services: zkstack-prover-fri-gateway: networks: - zkstack - build: - context: . - dockerfile: ./docker/prover-fri-gateway/Dockerfile + image: matterlabs/prover-fri-gateway:latest + depends_on: + zkstack-core: + condition: "service_healthy" env_file: - - ./common.env - env: - FRI_PROVER_GATEWAY_API_URL: http://zkstack-core:3075 - ports: # assumes default ports in .env - - "3312:3312" # prometheus metrics + - {{envFilePath}} + environment: + FRI_PROVER_GATEWAY_API_URL: http://zkstack-core:3320 + # ports: # assumes default ports in .env + # - "3312:3312" # prometheus metrics volumes: - - prover_artifacts:/etc_prover_artifacts - - server_artifacts:/etc_server_artifacts + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} zkstack-witness-generator-basic-circuits: networks: - zkstack - build: - context: . - dockerfile: ./docker/witness-generator/Dockerfile + image: matterlabs/witness-generator:latest command: ["--round", "basic_circuits"] env_file: - {{envFilePath}} - ports: # assumes default ports in .env - - "3312:3312" # prometheus metrics + # ports: # assumes default ports in .env + # - "3312:3312" # prometheus metrics volumes: - - prover_artifacts:/etc_prover_artifacts - - server_artifacts:/etc_server_artifacts + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} zkstack-witness-generator-leaf-aggregation: networks: - zkstack - build: - context: . - dockerfile: ./docker/witness-generator/Dockerfile + image: matterlabs/witness-generator:latest command: ["--round", "leaf_aggregation"] env_file: - {{envFilePath}} - ports: # assumes default ports in .env - - "3312:3312" # prometheus metrics + # ports: # assumes default ports in .env + # - "3312:3312" # prometheus metrics volumes: - - prover_artifacts:/etc_prover_artifacts - - server_artifacts:/etc_server_artifacts + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} zkstack-witness-generator-node-aggregation: networks: - zkstack - build: - context: . - dockerfile: ./docker/witness-generator/Dockerfile + image: matterlabs/witness-generator:latest command: ["--round", "node_aggregation"] env_file: - {{envFilePath}} - ports: # assumes default ports in .env - - "3312:3312" # prometheus metrics + # ports: # assumes default ports in .env + # - "3312:3312" # prometheus metrics volumes: - - prover_artifacts:/etc_prover_artifacts - - server_artifacts:/etc_server_artifacts + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} zkstack-witness-generator-scheduler: networks: - zkstack - build: - context: . - dockerfile: ./docker/witness-generator/Dockerfile + image: matterlabs/witness-generator:latest command: ["--round", "scheduler"] env_file: - {{envFilePath}} - ports: # assumes default ports in .env - - "3312:3312" # prometheus metrics + # ports: # assumes default ports in .env + # - "3312:3312" # prometheus metrics volumes: - - prover_artifacts:/etc_prover_artifacts - - server_artifacts:/etc_server_artifacts + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} zkstack-prover-fri: networks: - zkstack - build: - context: . - dockerfile: ./docker/prover-fri/Dockerfile + image: matterlabs/prover-fri:latest env_file: - {{envFilePath}} - env: - FRI_PROVER_SETUP_DATA_PATH: /etc/prover_setup_data - ports: # assumes default ports in .env - - "3312:3312" # prometheus metrics + # ports: # assumes default ports in .env + # - "3312:3312" # prometheus metrics volumes: - - prover_artifacts:/etc_prover_artifacts - - server_artifacts:/etc_server_artifacts - - ./prover_setup-data:/etc/prover_setup_data + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} zkstack-proof-fri-compressor: networks: - zkstack - build: - context: . - dockerfile: ./docker/proof-fri-compressor/Dockerfile + image: matterlabs/proof-fri-compressor:latest env_file: - {{envFilePath}} - ports: # assumes default ports in .env - - "3312:3312" # prometheus metrics + # ports: # assumes default ports in .env + # - "3312:3312" # prometheus metrics volumes: - - prover_artifacts:/etc_prover_artifacts - - server_artifacts:/etc_server_artifacts + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} {{/if}} + \ No newline at end of file diff --git a/infrastructure/zk/src/hyperchain_wizard.ts b/infrastructure/zk/src/hyperchain_wizard.ts index efacedf0d1c..1fdc191ec92 100644 --- a/infrastructure/zk/src/hyperchain_wizard.ts +++ b/infrastructure/zk/src/hyperchain_wizard.ts @@ -12,6 +12,7 @@ import * as fs from 'fs'; import fetch from 'node-fetch'; import { up } from './up'; import * as Handlebars from 'handlebars'; +import { ProverType, setupProver } from './prover_setup'; const title = chalk.blueBright; const warning = chalk.yellowBright; @@ -26,30 +27,35 @@ enum BaseNetwork { MAINNET = 'mainnet' } -interface BasePromptOptions { +enum ProverTypeOption { + NONE = 'No (this hyperchain is for testing purposes only)', + CPU = 'Yes - With a CPU implementation', + GPU = 'Yes - With a GPU implementation (Coming soon)' +} + +export interface BasePromptOptions { name: string | (() => string); type: string | (() => string); message: string | (() => string) | (() => Promise); initial?: any; required?: boolean; - choices?: string[]; + choices?: string[] | object[]; skip?: ((state: object) => boolean | Promise) | boolean; } -// An init command that allows configuring and spinning up a new Hyperchain network. +// An init command that allows configuring and spinning up a new hyperchain network. async function initHyperchain() { - await announced('Initializing Hyperchain creation', setupConfiguration()); + await announced('Initializing hyperchain creation', setupConfiguration()); const deployerPrivateKey = process.env.DEPLOYER_PRIVATE_KEY; const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; - const governorAddress = process.env.GOVERNOR_ADDRESS; const deployL2Weth = Boolean(process.env.DEPLOY_L2_WETH || false); const deployTestTokens = Boolean(process.env.DEPLOY_TEST_TOKENS || false); const initArgs: InitArgs = { skipSubmodulesCheckout: false, skipEnvSetup: true, - deployerL1ContractInputArgs: ['--private-key', deployerPrivateKey, '--governor-address', governorAddress], + skipPlonkStep: true, governorPrivateKeyArgs: ['--private-key', governorPrivateKey], deployerL2ContractInput: { args: ['--private-key', deployerPrivateKey], @@ -66,7 +72,9 @@ async function initHyperchain() { env.mergeInitToEnv(); - console.log(announce(`\nYour Hyperchain configuration is available at ${process.env.ENV_FILE}\n`)); + console.log(announce(`\nYour hyperchain configuration is available at ${process.env.ENV_FILE}\n`)); + + console.log(warning(`\nIf you want to add a prover to your hyperchain, please run zk stack prover-setup now.\n`)); await announced('Start server', startServer()); } @@ -86,8 +94,8 @@ async function setupConfiguration() { const results: any = await enquirer.prompt(questions); if (results.config === CONFIGURE) { - await announced('Setting Hyperchain configuration', setHyperchainMetadata()); - await announced('Validating information and balances to deploy Hyperchain', checkReadinessToDeploy()); + await announced('Setting hyperchain configuration', setHyperchainMetadata()); + await announced('Validating information and balances to deploy hyperchain', checkReadinessToDeploy()); } else { const envName = await selectHyperchainConfiguration(); @@ -107,19 +115,19 @@ async function setHyperchainMetadata() { const INSERT_KEYS = 'Insert keys'; const questions: BasePromptOptions[] = [ { - message: 'What is your Hyperchain name?', + message: 'What is your hyperchain name?', name: 'chainName', type: 'input', required: true }, { - message: 'What is your Hyperchain id? Make sure this is not used by other chains.', + message: 'What is your hyperchain id? Make sure this is not used by other chains.', name: 'chainId', type: 'numeral', required: true }, { - message: 'To which L1 Network will your Hyperchain rollup to?', + message: 'To which L1 Network will your hyperchain rollup to?', name: 'l1Chain', type: 'select', required: true, @@ -323,6 +331,8 @@ async function setHyperchainMetadata() { wrapEnvModify('GOVERNOR_PRIVATE_KEY', governor.privateKey); wrapEnvModify('GOVERNOR_ADDRESS', governor.address); wrapEnvModify('CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR', feeReceiverAddress); + wrapEnvModify('ETH_SENDER_SENDER_PROOF_SENDING_MODE', 'SkipEveryProof'); + if (feeReceiver) { wrapEnvModify('FEE_RECEIVER_PRIVATE_KEY', feeReceiver.privateKey); } @@ -335,6 +345,50 @@ async function setHyperchainMetadata() { env.load(); } +async function setupHyperchainProver() { + let proverType = ProverTypeOption.NONE; + + const proverQuestions: BasePromptOptions[] = [ + { + message: 'Which ZK Prover implementation you want for your hyperchain?', + name: 'prover', + type: 'select', + required: true, + choices: [ProverTypeOption.NONE, ProverTypeOption.CPU, ProverTypeOption.GPU] + } + ]; + + const proverResults: any = await enquirer.prompt(proverQuestions); + + proverType = proverResults.prover; + + if (proverType === ProverTypeOption.GPU) { + const gpuQuestions: BasePromptOptions[] = [ + { + message: 'GPU prover is not yet available. Do you want to use the CPU implementation?', + name: 'prover', + type: 'confirm', + required: true + } + ]; + + const gpuResults: any = await enquirer.prompt(gpuQuestions); + + if (gpuResults.prover) { + proverType = ProverTypeOption.CPU; + } + } + + switch (proverType) { + case ProverTypeOption.NONE: + wrapEnvModify('ETH_SENDER_SENDER_PROOF_SENDING_MODE', 'SkipEveryProof'); + env.mergeInitToEnv(); + break; + default: + await setupProver(proverType === ProverTypeOption.CPU ? ProverType.CPU : ProverType.GPU); + } +} + function printAddressInfo(name: string, address: string) { console.log(title(name)); console.log(`Address - ${address}`); @@ -344,7 +398,7 @@ function printAddressInfo(name: string, address: string) { async function initializeTestERC20s() { const questions: BasePromptOptions[] = [ { - message: 'Do you want to deploy some test ERC20s to your Hyperchain (only use on testing scenarios)?', + message: 'Do you want to deploy some test ERC20s to your hyperchain (only use on testing scenarios)?', name: 'deployERC20s', type: 'confirm' } @@ -367,7 +421,7 @@ async function initializeTestERC20s() { async function initializeWethTokenForHyperchain() { const questions: BasePromptOptions[] = [ { - message: 'Do you want to deploy Wrapped ETH to your Hyperchain?', + message: 'Do you want to deploy Wrapped ETH to your hyperchain?', name: 'deployWeth', type: 'confirm' } @@ -424,7 +478,7 @@ async function startServer() { const questions: BasePromptOptions[] = [ { - message: 'Do you want to start your Hyperchain server now?', + message: 'Do you want to start your hyperchain server now?', name: 'start', type: 'select', choices: [YES_DEFAULT, YES_CUSTOM, NO] @@ -457,7 +511,7 @@ async function startServer() { } // The current env.modify requires to write down the variable name twice. This wraps it so the caller only writes the name and the value. -function wrapEnvModify(variable: string, assignedVariable: string) { +export function wrapEnvModify(variable: string, assignedVariable: string) { env.modify(variable, `${variable}=${assignedVariable}`); } @@ -500,7 +554,7 @@ async function checkReadinessToDeploy() { const fundResults: any = await enquirer.prompt(fundQuestions); if (fundResults.fund === EXIT) { - console.log('Exiting Hyperchain initializer.'); + console.log('Exiting hyperchain initializer.'); process.exit(0); } } @@ -580,7 +634,7 @@ async function selectHyperchainConfiguration() { const envQuestions = [ { - message: 'Which Hyperchain configuration do you want to use?', + message: 'Which hyperchain configuration do you want to use?', name: 'env', type: 'select', choices: [...envs].sort() @@ -592,35 +646,55 @@ async function selectHyperchainConfiguration() { } async function generateDockerImages(cmd: Command) { + await _generateDockerImages(cmd.customDockerOrg); +} + +async function _generateDockerImages(_orgName?: string) { console.log(warning(`\nThis process will build the docker images and it can take a while. Please be patient.\n`)); const envName = await selectHyperchainConfiguration(); - const orgName = cmd.customDockerOrg ?? envName; + env.set(envName); + + const orgName = _orgName || envName; await docker.customBuildForHyperchain('server-v2', orgName); console.log(warning(`\nDocker image for server created: Server image: ${orgName}/server-v2:latest\n`)); let hasProver = false; + let artifactsPath, proverSetupArtifacts; if (process.env.ETH_SENDER_SENDER_PROOF_SENDING_MODE !== 'SkipEveryProof') { hasProver = true; - // TODO: (PRO-48) Hyperchain is using prover, so we must include Boojum images - wait for Boojum merge - // proof-fri-compressor, prover-fri, witness-generator, prover-fri-gateway - // Must be added to the init flow - // Setup key is downloaded and added somewhere - reference: https://github.com/matter-labs/zksync-era/blob/7b23ab0ba14cb6600ecf7e596a9e9536ffa5fda2/.github/workflows/build-core-template.yml#L72C1-L73C1 - // Data keys are already downloaded from: https://console.cloud.google.com/storage/browser/matterlabs-zksync-v2-infra-blob-store/prover_setup_data/2d33a27?pageState=(%22StorageObjectListTable%22:(%22f%22:%22%255B%255D%22))&orgonly=true&project=matterlabs-infra&supportedpurview=organizationId&prefix=&forceOnObjectsSortingFiltering=false - // to: ./prover_setup-data - // - Following should be added to the hyperchain env file: - // OBJECT_STORE_FILE_BACKED_BASE_PATH: /path/to/server/artifacts - // PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH: /path/to/prover/artifacts - // - Inspired by https://github.com/matter-labs/zksync-era/tree/main/prover/prover_fri + if (process.env.OBJECT_STORE_MODE === 'FileBacked') { + artifactsPath = process.env.OBJECT_STORE_FILE_BACKED_BASE_PATH; + proverSetupArtifacts = process.env.FRI_PROVER_SETUP_DATA_PATH; + } + + if (process.env.PROVER_TYPE === ProverType.GPU) { + throw new Error('GPU prover configuration not available yet'); + } + + // For Now use only the public images. Too soon to allow prover to be customized + // await docker.customBuildForHyperchain('witness-generator', orgName); + // await docker.customBuildForHyperchain('witness-vector-generator', orgName); + // await docker.customBuildForHyperchain('prover-fri-gateway', orgName); + // await docker.customBuildForHyperchain('proof-fri-compressor', orgName); + // if (process.env.PROVER_TYPE === ProverType.CPU) { + // isCPUProver = true; + // await docker.customBuildForHyperchain('prover-fri', orgName); + // } else { + // await docker.customBuildForHyperchain('witness-vector-generator', orgName); + // await docker.customBuildForHyperchain('prover-gpu-fri', orgName); + // } } const composeArgs = { envFilePath: `./etc/env/${envName}.env`, orgName, - hasProver + hasProver, + artifactsPath, + proverSetupArtifacts }; const templateFileName = './etc/hyperchains/docker-compose-hyperchain-template'; @@ -632,19 +706,123 @@ async function generateDockerImages(cmd: Command) { console.log( announce( - `Docker images generated successfully, and compose file generate (hyperchain-${envName}.yml). Run the images with "docker compose -f hyperchain-${envName} up)".\n\n` + `Docker images generated successfully, and compose file generate (hyperchain-${envName}.yml). Run the images with "docker compose -f hyperchain-${envName}.yml up -d".\n\n` ) ); } -export const initHyperchainCommand = new Command('stack').description('ZK Stack Hyperchains management'); +async function configDemoHyperchain(cmd: Command) { + fs.existsSync('/etc/env/demo.env') && fs.unlinkSync('/etc/env/demo.env'); + fs.existsSync('/etc/hyperchains/hyperchain-demo.yml') && fs.unlinkSync('/etc/hyperchains/hyperchain-demo.yml'); + await compileConfig('demo'); + env.set('demo'); + + wrapEnvModify('CHAIN_ETH_ZKSYNC_NETWORK', 'Zeek hyperchain'); + wrapEnvModify('CHAIN_ETH_ZKSYNC_NETWORK_ID', '1337'); + wrapEnvModify('ETH_SENDER_SENDER_PROOF_SENDING_MODE', 'SkipEveryProof'); + wrapEnvModify('ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS', '20'); + + const richWalletsRaw = await fetch( + 'https://raw.githubusercontent.com/matter-labs/local-setup/main/rich-wallets.json' + ); + + const richWallets = await richWalletsRaw.json(); + + const deployer = new ethers.Wallet(richWallets[0].privateKey); + const governor = new ethers.Wallet(richWallets[1].privateKey); + + wrapEnvModify('DEPLOYER_PRIVATE_KEY', deployer.privateKey); + wrapEnvModify('GOVERNOR_PRIVATE_KEY', governor.privateKey); + wrapEnvModify('GOVERNOR_ADDRESS', governor.address); + + env.load(); + + const deployerPrivateKey = process.env.DEPLOYER_PRIVATE_KEY; + const governorPrivateKey = process.env.GOVERNOR_PRIVATE_KEY; + const deployL2Weth = Boolean(process.env.DEPLOY_L2_WETH || false); + const deployTestTokens = Boolean(process.env.DEPLOY_TEST_TOKENS || false); + + const initArgs: InitArgs = { + skipSubmodulesCheckout: false, + skipEnvSetup: cmd.skipEnvSetup, + skipPlonkStep: true, + governorPrivateKeyArgs: ['--private-key', governorPrivateKey], + deployerL2ContractInput: { + args: ['--private-key', deployerPrivateKey], + includePaymaster: false, + includeL2WETH: deployL2Weth + }, + testTokens: { + deploy: deployTestTokens, + args: ['--private-key', deployerPrivateKey, '--envFile', process.env.CHAIN_ETH_NETWORK!] + } + }; + + if (!cmd.skipEnvSetup) { + await up(); + } + await init(initArgs); + + env.mergeInitToEnv(); + + if (cmd.prover) { + await setupProver(cmd.prover === 'gpu' ? ProverType.GPU : ProverType.CPU); + } +} + +function printReadme() { + console.log( + title( + '-----------------------------------\nWelcome to ZK Stack hyperchain CLI\n-----------------------------------\n' + ) + ); + + console.log( + announce('Please follow these steps/commands to get your hyperchain tailored to your (and your users) needs.\n') + ); + + console.log( + `${chalk.bgBlueBright('zk stack init')} ${chalk.blueBright('- Wizard for hyperchain creation/configuration')}` + ); + console.log( + `${chalk.bgBlueBright('zk stack prover-setup')} ${chalk.blueBright( + '- Configure the ZK Prover instance for your hyperchain' + )}` + ); + console.log( + `${chalk.bgBlueBright('zk stack docker-setup')} ${chalk.blueBright( + '- Generate docker images and compose file for your hyperchain' + )}` + ); + console.log( + `${chalk.bgBlueBright('zk stack demo')} ${chalk.blueBright( + '- Spin up a demo hyperchain with default settings for testing purposes' + )}` + ); + + console.log('\n'); +} + +export const initHyperchainCommand = new Command('stack') + .description('ZK Stack hyperchains management') + .action(printReadme); initHyperchainCommand .command('init') - .description('Wizard for Hyperchain creation/configuration') + .description('Wizard for hyperchain creation/configuration') .action(initHyperchain); initHyperchainCommand .command('docker-setup') .option('--custom-docker-org ', 'Custom organization name for the docker images') - .description('Generate docker images and compose file for your Hyperchain') + .description('Generate docker images and compose file for your hyperchain') .action(generateDockerImages); +initHyperchainCommand + .command('prover-setup') + .description('Configure the ZK Prover instance for your hyperchain') + .action(setupHyperchainProver); +initHyperchainCommand + .command('demo') + .option('--prover ', 'Add a cpu or gpu prover to the hyperchain') + .option('--skip-env-setup', 'Run env setup automatically (pull docker containers, etc)') + .description('Spin up a demo hyperchain with default settings for testing purposes') + .action(configDemoHyperchain); diff --git a/infrastructure/zk/src/index.ts b/infrastructure/zk/src/index.ts index ea06029609e..5d65df824d7 100644 --- a/infrastructure/zk/src/index.ts +++ b/infrastructure/zk/src/index.ts @@ -20,6 +20,7 @@ import { command as config } from './config'; import { command as clean } from './clean'; import { command as db } from './database'; import { command as verifyUpgrade } from './verify-upgrade'; +import { proverCommand } from './prover_setup'; import { command as status } from './status'; import * as env from './env'; @@ -44,6 +45,7 @@ const COMMANDS = [ clean, compiler, verifyUpgrade, + proverCommand, env.command, status, completion(program as Command) diff --git a/infrastructure/zk/src/init.ts b/infrastructure/zk/src/init.ts index b3753e62aef..0fe3c113763 100644 --- a/infrastructure/zk/src/init.ts +++ b/infrastructure/zk/src/init.ts @@ -21,9 +21,8 @@ export async function init(initArgs: InitArgs = DEFAULT_ARGS) { const { skipSubmodulesCheckout, skipEnvSetup, + skipPlonkStep, testTokens, - // eslint-disable-next-line @typescript-eslint/no-unused-vars - deployerL1ContractInputArgs, governorPrivateKeyArgs, deployerL2ContractInput } = initArgs; @@ -33,7 +32,7 @@ export async function init(initArgs: InitArgs = DEFAULT_ARGS) { await announced('Checking environment', checkEnv()); await announced('Checking git hooks', env.gitHooks()); await announced('Setting up containers', up()); - await announced('Checking PLONK setup', run.plonkSetup()); + !skipPlonkStep && (await announced('Checking PLONK setup', run.plonkSetup())); } if (!skipSubmodulesCheckout) { await announced('Checkout system-contracts submodule', submoduleUpdate()); @@ -67,7 +66,13 @@ export async function init(initArgs: InitArgs = DEFAULT_ARGS) { if (deployerL2ContractInput.includeL2WETH) { await announced('Initializing L2 WETH token', contract.initializeWethToken(governorPrivateKeyArgs)); } - await announced('Initializing governance', contract.initializeGovernance(governorPrivateKeyArgs)); + await announced( + 'Initializing governance', + contract.initializeGovernance([ + ...governorPrivateKeyArgs, + !deployerL2ContractInput.includeL2WETH ? ['--skip-weth-bridge'] : [] + ]) + ); } // A smaller version of `init` that "resets" the localhost environment, for which `init` was already called before. @@ -144,7 +149,7 @@ async function checkEnv() { export interface InitArgs { skipSubmodulesCheckout: boolean; skipEnvSetup: boolean; - deployerL1ContractInputArgs: any[]; + skipPlonkStep: boolean; governorPrivateKeyArgs: any[]; deployerL2ContractInput: { args: any[]; @@ -160,7 +165,7 @@ export interface InitArgs { const DEFAULT_ARGS: InitArgs = { skipSubmodulesCheckout: false, skipEnvSetup: false, - deployerL1ContractInputArgs: [], + skipPlonkStep: false, governorPrivateKeyArgs: [], deployerL2ContractInput: { args: [], includePaymaster: true, includeL2WETH: true }, testTokens: { deploy: true, args: [] } @@ -174,7 +179,7 @@ export const initCommand = new Command('init') const initArgs: InitArgs = { skipSubmodulesCheckout: cmd.skipSubmodulesCheckout, skipEnvSetup: cmd.skipEnvSetup, - deployerL1ContractInputArgs: [], + skipPlonkStep: false, governorPrivateKeyArgs: [], deployerL2ContractInput: { args: [], includePaymaster: true, includeL2WETH: true }, testTokens: { deploy: true, args: [] } diff --git a/infrastructure/zk/src/prover_setup.ts b/infrastructure/zk/src/prover_setup.ts new file mode 100644 index 00000000000..b438eea055d --- /dev/null +++ b/infrastructure/zk/src/prover_setup.ts @@ -0,0 +1,235 @@ +import { Command } from 'commander'; +import * as utils from './utils'; +import fs from 'fs'; +import enquirer from 'enquirer'; +import { BasePromptOptions, wrapEnvModify } from './hyperchain_wizard'; +import fetch from 'node-fetch'; +import chalk from 'chalk'; +import * as env from './env'; + +export enum ProverType { + CPU = 'cpu', + GPU = 'gpu' +} + +export async function setupProver(proverType: ProverType) { + // avoid doing work if receives the wrong param from the CLI + if (proverType == ProverType.GPU || proverType == ProverType.CPU) { + wrapEnvModify('PROVER_TYPE', proverType); + wrapEnvModify('ETH_SENDER_SENDER_PROOF_SENDING_MODE', 'OnlyRealProofs'); + wrapEnvModify('ETH_SENDER_SENDER_PROOF_LOADING_MODE', 'FriProofFromGcs'); + wrapEnvModify('FRI_PROVER_GATEWAY_API_POLL_DURATION_SECS', '120'); + await setupArtifactsMode(); + if (!process.env.CI) { + await setupProverKeys(proverType); + } else { + wrapEnvModify( + 'FRI_PROVER_SETUP_DATA_PATH', + `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${ + proverType === ProverType.GPU ? 'gpu' : 'cpu' + }/` + ); + } + env.mergeInitToEnv(); + } else { + console.error(`Unknown prover type: ${proverType}`); + process.exit(1); + } +} + +async function downloadCSR(proverType: ProverType) { + const currentEnv = env.get(); + fs.mkdirSync(`${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`, { + recursive: true + }); + process.chdir(`${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`); + console.log(chalk.yellow('Downloading ceremony (CSR) file')); + await utils.spawn('wget -c https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^24.key'); + await utils.sleep(1); + process.chdir(process.env.ZKSYNC_HOME as string); + wrapEnvModify('CRS_FILE', `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`); +} + +async function setupProverKeys(proverType: ProverType) { + const DOWNLOAD = 'Download default keys'; + const GENERATE = 'Generate locally'; + const questions: BasePromptOptions[] = [ + { + message: + 'Do you want to download default Boojum prover setup keys, or generate them locally (takes some time - only needed if you changed anything on the prover code)?', + name: 'proverKeys', + type: 'select', + choices: [DOWNLOAD, GENERATE] + } + ]; + + const results: any = await enquirer.prompt(questions); + + await downloadCSR(proverType); + if (results.proverKeys == DOWNLOAD) { + await downloadDefaultSetupKeys(proverType); + } else { + await generateAllSetupData(proverType); + } + + wrapEnvModify( + 'FRI_PROVER_SETUP_DATA_PATH', + `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${ + proverType === ProverType.GPU ? 'gpu' : 'cpu' + }/` + ); +} + +async function setupArtifactsMode() { + if (process.env.CI) { + const currentEnv = env.get(); + const path = `${process.env.ZKSYNC_HOME}/etc/hyperchains/artifacts/${currentEnv}/`; + wrapEnvModify('OBJECT_STORE_MODE', 'FileBacked'); + wrapEnvModify('PUBLIC_OBJECT_STORE_MODE', 'FileBacked'); + wrapEnvModify('PROVER_OBJECT_STORE_MODE', 'FileBacked'); + wrapEnvModify('OBJECT_STORE_FILE_BACKED_BASE_PATH', path); + wrapEnvModify('PUBLIC_OBJECT_STORE_FILE_BACKED_BASE_PATH', path); + wrapEnvModify('PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH', path); + return; + } + + const LOCAL = 'Local folder'; + const GCP = 'GCP'; + const questions: BasePromptOptions[] = [ + { + message: 'Will you use a local folder for storing prover artifacts, or Google Cloud Platform (GCP)?', + name: 'mode', + type: 'select', + choices: [LOCAL, GCP] + } + ]; + + const results: any = await enquirer.prompt(questions); + + if (results.mode == LOCAL) { + const currentEnv = env.get(); + + const folderQuestion: BasePromptOptions[] = [ + { + message: 'Please select the path to store the proving process artifacts.', + name: 'path', + type: 'input', + required: true, + initial: `${process.env.ZKSYNC_HOME}/etc/hyperchains/artifacts/${currentEnv}/` + } + ]; + + const folder: any = await enquirer.prompt(folderQuestion); + + wrapEnvModify('OBJECT_STORE_MODE', 'FileBacked'); + wrapEnvModify('PUBLIC_OBJECT_STORE_MODE', 'FileBacked'); + wrapEnvModify('PROVER_OBJECT_STORE_MODE', 'FileBacked'); + wrapEnvModify('OBJECT_STORE_FILE_BACKED_BASE_PATH', folder.path); + wrapEnvModify('PUBLIC_OBJECT_STORE_FILE_BACKED_BASE_PATH', folder.path); + wrapEnvModify('PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH', folder.path); + } else { + const gcpQuestions: BasePromptOptions[] = [ + { + message: 'Please provide the path for a GCP credential file.', + name: 'gcpPath', + type: 'input', + required: true + }, + { + message: 'Please provide the bucket name on GCP where artifacts should be stored.', + name: 'bucket', + type: 'input', + required: true + } + ]; + + const gcp: any = await enquirer.prompt(gcpQuestions); + + wrapEnvModify('OBJECT_STORE_MODE', 'GCSWithCredentialFile'); + wrapEnvModify('PUBLIC_OBJECT_STORE_MODE', 'GCSWithCredentialFile'); + wrapEnvModify('PROVER_OBJECT_STORE_MODE', 'GCSWithCredentialFile'); + wrapEnvModify('OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH', gcp.gcpPath); + wrapEnvModify('PUBLIC_OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH', gcp.gcpPath); + wrapEnvModify('PUBLIC_OBJECT_STORE_BUCKET_BASE_URL', gcp.bucket); + wrapEnvModify('PROVER_OBJECT_STORE_BUCKET_BASE_URL', gcp.bucket); + } +} + +async function generateSetupDataForBaseLayer(proverType: ProverType) { + await generateSetupData(true, proverType); +} + +async function generateSetupDataForRecursiveLayers(proverType: ProverType) { + await generateSetupData(false, proverType); +} + +async function generateSetupData(isBaseLayer: boolean, proverType: ProverType) { + const currentEnv = env.get(); + fs.mkdirSync(`${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`, { + recursive: true + }); + process.chdir(`${process.env.ZKSYNC_HOME}/prover`); + await utils.spawn( + `for i in {1..${isBaseLayer ? '13' : '15'}}; do zk f cargo run ${ + proverType == ProverType.GPU ? '--features "gpu"' : '' + } --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i ${ + isBaseLayer ? '--is_base_layer' : '' + }; done` + ); + process.chdir(process.env.ZKSYNC_HOME as string); +} + +async function generateAllSetupData(proverType: ProverType) { + await generateSetupDataForBaseLayer(proverType); + await generateSetupDataForRecursiveLayers(proverType); +} + +async function downloadDefaultSetupKeys(proverType: ProverType, region: 'us' | 'asia' | 'europe' = 'us') { + const proverKeysUrls = require(`${process.env.ZKSYNC_HOME}/prover/setup-data-${proverType}-keys.json`); + const currentEnv = env.get(); + await downloadFilesFromGCP( + proverKeysUrls[region], + `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/` + ); + + await utils.spawn( + `cp -r ${process.env.ZKSYNC_HOME}/prover/vk_setup_data_generator_server_fri/data/* ${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/` + ); +} + +async function listFilesFromGCP(gcpUri: string): Promise { + const matches = gcpUri.match(/gs:\/\/([^\/]*)\/([^\/]*)\/?/); + if (matches != null) { + const url = `https://storage.googleapis.com/storage/v1/b/${matches[1]}/o?prefix=${matches[2]}%2F`; + const response = await fetch(url); + if (response.ok) { + const json = await response.json(); + return json.items.map((item: any) => `https://storage.googleapis.com/${matches[1]}/${item.name}`); + } + } + return []; +} + +async function downloadFilesFromGCP(gcpUri: string, destination: string): Promise { + const files = await listFilesFromGCP(gcpUri); + + fs.mkdirSync(destination, { recursive: true }); + process.chdir(destination); + + const length = files.length; + for (const index in files) { + console.log(chalk.yellow(`Downloading file ${Number(index) + 1} of ${length}`)); + const file = files[index]; + await utils.spawn(`wget -c ${file}`); + await utils.sleep(1); + console.log(``); + } + process.chdir(process.env.ZKSYNC_HOME as string); +} + +export const proverCommand = new Command('prover').description('Prover setup related commands'); + +proverCommand + .command('setup') + .arguments('[type]') + .action((type: ProverType) => setupProver(type)); diff --git a/prover/prover_fri/README.md b/prover/prover_fri/README.md index 0e9490358e5..cec915494b6 100644 --- a/prover/prover_fri/README.md +++ b/prover/prover_fri/README.md @@ -23,9 +23,9 @@ Machine specs: ```markdown for i in {1..13}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i - --is_base_layer done + --is_base_layer; done - for i in {1..15}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i done + for i in {1..15}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i; done ``` 3. Initialize DB and run migrations: `zk init` diff --git a/yarn.lock b/yarn.lock index 6877daf3fa2..eefeb367168 100644 --- a/yarn.lock +++ b/yarn.lock @@ -418,10 +418,10 @@ minimatch "^3.0.4" strip-json-comments "^3.1.1" -"@eslint/eslintrc@^2.1.2": - version "2.1.2" - resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.1.2.tgz#c6936b4b328c64496692f76944e755738be62396" - integrity sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g== +"@eslint/eslintrc@^2.1.3": + version "2.1.3" + resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.1.3.tgz#797470a75fe0fbd5a53350ee715e85e87baff22d" + integrity sha512-yZzuIG+jnVu6hNSzFEN07e8BxF3uAzYtQb6uDkaYZLo6oYZDCq454c5kB8zxnzfCYyP4MIuyBn10L0DqwujTmA== dependencies: ajv "^6.12.4" debug "^4.3.2" @@ -433,10 +433,10 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@eslint/js@8.52.0": - version "8.52.0" - resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.52.0.tgz#78fe5f117840f69dc4a353adf9b9cd926353378c" - integrity sha512-mjZVbpaeMZludF2fsWLD0Z9gCref1Tk4i9+wddjRvpUNqqcndPkBD09N/Mapey0b3jaXbLm2kICwFv2E64QinA== +"@eslint/js@8.53.0": + version "8.53.0" + resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.53.0.tgz#bea56f2ed2b5baea164348ff4d5a879f6f81f20d" + integrity sha512-Kn7K8dx/5U6+cT1yEhpX1w4PCSg0M+XyRILPgvwcEBjerFWCwQj5sbr3/VmxqV0JGHCBCzyd6LxypEuehypY1w== "@ethereum-waffle/chai@^3.4.4": version "3.4.4" @@ -1569,10 +1569,10 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-vyper@^0.2.0": - version "0.2.2" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-0.2.2.tgz#d42e171b58c7c3bfacc79817ff975fb3952587a9" - integrity sha512-FU22i4XrSR6WqISrsjVXDIY9BEEGUtb5ictOH4SR+EwzGgfSPm4C7cCTKXdtJ5ZHCOR99cwx/okVfKCwv7XVFw== +"@matterlabs/hardhat-zksync-vyper@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.0.0.tgz#661227401db08ffa6f3934f29d5a0c83ebac713f" + integrity sha512-RKrzuy/SqnB9AS62lTfm70Z0Wg571Di3B/IOSf3fqiEhwt6Jjc8og3acEUdIdn0TNQUNBoLqZdQdVG9T00yDJA== dependencies: "@nomiclabs/hardhat-docker" "^2.0.0" chalk "4.1.2" @@ -2050,10 +2050,10 @@ fs-extra "^7.0.1" solpp "^0.11.5" -"@nomiclabs/hardhat-vyper@^3.0.3": - version "3.0.4" - resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-vyper/-/hardhat-vyper-3.0.4.tgz#ebd5590812225bea63a2e8a297be07802ae707e6" - integrity sha512-VSmNCs0MQCn7qgWubfSkJkFEJmsTvbimPsbxknM5jLFi7pNeDVB5eO00GaoweuZiWKBVTHYJsylVK5686GoPrQ== +"@nomiclabs/hardhat-vyper@^3.0.5": + version "3.0.5" + resolved "https://registry.yarnpkg.com/@nomiclabs/hardhat-vyper/-/hardhat-vyper-3.0.5.tgz#44594b8a27e9c627534013fdebe6a485275f846e" + integrity sha512-i/Q771sr4vnSTaNTMGe3kX4Nl2on7hiXHHcz1MrW0+MKAJfi3A4sEloXX3aim6TylCPFq0M1/esDX+Y0WPmfbQ== dependencies: debug "^4.1.1" fs-extra "^7.0.1" @@ -2419,13 +2419,18 @@ "@types/responselike" "^1.0.0" "@types/chai-as-promised@^7.1.3", "@types/chai-as-promised@^7.1.4": - version "7.1.7" - resolved "https://registry.yarnpkg.com/@types/chai-as-promised/-/chai-as-promised-7.1.7.tgz#fd16a981ba9542c83d4e1d2f40c7899aae82aa38" - integrity sha512-APucaP5rlmTRYKtRA6FE5QPP87x76ejw5t5guRJ4y5OgMnwtsvigw7HHhKZlx2MGXLeZd6R/GNZR/IqDHcbtQw== + version "7.1.8" + resolved "https://registry.yarnpkg.com/@types/chai-as-promised/-/chai-as-promised-7.1.8.tgz#f2b3d82d53c59626b5d6bbc087667ccb4b677fe9" + integrity sha512-ThlRVIJhr69FLlh6IctTXFkmhtP3NpMZ2QGq69StYLyKZFp/HOp1VdKZj7RvfNWYYcJ1xlbLGLLWj1UvP5u/Gw== dependencies: "@types/chai" "*" -"@types/chai@*", "@types/chai@^4.2.21", "@types/chai@^4.3.1": +"@types/chai@*", "@types/chai@^4.3.1": + version "4.3.10" + resolved "https://registry.yarnpkg.com/@types/chai/-/chai-4.3.10.tgz#2ad2959d1767edee5b0e4efb1a0cd2b500747317" + integrity sha512-of+ICnbqjmFCiixUnqRulbylyXQrPqIGf/B3Jax1wIF3DvSheysQxAWvqHhZiW3IQrycvokcLcFQlveGp+vyNg== + +"@types/chai@^4.2.21": version "4.3.9" resolved "https://registry.yarnpkg.com/@types/chai/-/chai-4.3.9.tgz#144d762491967db8c6dea38e03d2206c2623feec" integrity sha512-69TtiDzu0bcmKQv3yg1Zx409/Kd7r0b5F1PfpYJfSHzLGtB53547V4u+9iqKYsTu/O2ai6KTb0TInNpvuQ3qmg== @@ -2465,9 +2470,9 @@ "@types/node" "*" "@types/http-cache-semantics@*": - version "4.0.3" - resolved "https://registry.yarnpkg.com/@types/http-cache-semantics/-/http-cache-semantics-4.0.3.tgz#a3ff232bf7d5c55f38e4e45693eda2ebb545794d" - integrity sha512-V46MYLFp08Wf2mmaBhvgjStM3tPa+2GAdy/iqoX+noX1//zje2x4XmrIU0cAwyClATsTmahbtoQ2EwP7I5WSiA== + version "4.0.4" + resolved "https://registry.yarnpkg.com/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz#b979ebad3919799c979b17c72621c0bc0a31c6c4" + integrity sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA== "@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1": version "2.0.5" @@ -2514,9 +2519,9 @@ "@types/node" "*" "@types/lodash@^4.14.199": - version "4.14.200" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.200.tgz#435b6035c7eba9cdf1e039af8212c9e9281e7149" - integrity sha512-YI/M/4HRImtNf3pJgbF+W6FrXovqj+T+/HpENLTooK9PnkacBsDpeP3IpHab40CClUfhNmdM2WTNP2sa2dni5Q== + version "4.14.201" + resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.201.tgz#76f47cb63124e806824b6c18463daf3e1d480239" + integrity sha512-y9euML0cim1JrykNxADLfaG0FgD1g/yTHwUs/Jg9ZIU7WKj2/4IW9Lbb1WZbvck78W/lfGXFfe+u2EGfIJXdLQ== "@types/lru-cache@^5.1.0": version "5.1.1" @@ -2557,7 +2562,15 @@ resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-9.1.1.tgz#e7c4f1001eefa4b8afbd1eee27a237fee3bf29c4" integrity sha512-Z61JK7DKDtdKTWwLeElSEBcWGRLY8g95ic5FoQqI9CMx0ns/Ghep3B4DfcEimiKMvtamNVULVNKEsiwV3aQmXw== -"@types/node-fetch@^2.5.5", "@types/node-fetch@^2.5.7": +"@types/node-fetch@^2.5.5": + version "2.6.9" + resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.9.tgz#15f529d247f1ede1824f7e7acdaa192d5f28071e" + integrity sha512-bQVlnMLFJ2d35DkPNjEPmd9ueO/rh5EiaZt2bhqiSarPjZIuIV6bPQVqcrEyvNo+AfTrRGVazle1tl597w3gfA== + dependencies: + "@types/node" "*" + form-data "^4.0.0" + +"@types/node-fetch@^2.5.7": version "2.6.7" resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.7.tgz#a1abe2ce24228b58ad97f99480fdcf9bbc6ab16d" integrity sha512-lX17GZVpJ/fuCjguZ5b3TjEbSENxmEk1B2z02yoXSK9WMEWRivhdSY73wWMn6bpcCDAOh6qAdktpKHIlkDk2lg== @@ -2619,9 +2632,9 @@ integrity sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA== "@types/qs@^6.2.31": - version "6.9.9" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.9.tgz#66f7b26288f6799d279edf13da7ccd40d2fa9197" - integrity sha512-wYLxw35euwqGvTDx6zfY1vokBFnsK0HNrzc6xNHchxfO2hpuRg74GbkEW7e3sSmPvj0TjCDT1VCa6OtHXnubsg== + version "6.9.10" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.10.tgz#0af26845b5067e1c9a622658a51f60a3934d51e8" + integrity sha512-3Gnx08Ns1sEoCrWssEgTSJs/rsT2vhGP+Ja9cnnk9k4ALxinORlQneLXFeFKOTJMOeZUFD1s7w+w2AphTpvzZw== "@types/readable-stream@^2.3.13": version "2.3.15" @@ -2639,9 +2652,9 @@ "@types/node" "*" "@types/responselike@^1.0.0": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@types/responselike/-/responselike-1.0.2.tgz#8de1b0477fd7c12df77e50832fa51701a8414bd6" - integrity sha512-/4YQT5Kp6HxUDb4yhRkm0bJ7TbjvTddqX7PZ5hz6qV3pxSo72f/6YPRo+Mu2DU307tm9IioO69l7uAwn5XNcFA== + version "1.0.3" + resolved "https://registry.yarnpkg.com/@types/responselike/-/responselike-1.0.3.tgz#cc29706f0a397cfe6df89debfe4bf5cea159db50" + integrity sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw== dependencies: "@types/node" "*" @@ -2709,14 +2722,14 @@ debug "^4.3.1" "@typescript-eslint/parser@^6.7.4": - version "6.9.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.9.1.tgz#4f685f672f8b9580beb38d5fb99d52fc3e34f7a3" - integrity sha512-C7AK2wn43GSaCUZ9do6Ksgi2g3mwFkMO3Cis96kzmgudoVaKyt62yNzJOktP0HDLb/iO2O0n2lBOzJgr6Q/cyg== - dependencies: - "@typescript-eslint/scope-manager" "6.9.1" - "@typescript-eslint/types" "6.9.1" - "@typescript-eslint/typescript-estree" "6.9.1" - "@typescript-eslint/visitor-keys" "6.9.1" + version "6.10.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.10.0.tgz#578af79ae7273193b0b6b61a742a2bc8e02f875a" + integrity sha512-+sZwIj+s+io9ozSxIWbNB5873OSdfeBEH/FR0re14WLI6BaKuSOnnwCJ2foUiu8uXf4dRp1UqHP0vrZ1zXGrog== + dependencies: + "@typescript-eslint/scope-manager" "6.10.0" + "@typescript-eslint/types" "6.10.0" + "@typescript-eslint/typescript-estree" "6.10.0" + "@typescript-eslint/visitor-keys" "6.10.0" debug "^4.3.4" "@typescript-eslint/scope-manager@4.33.0": @@ -2727,6 +2740,14 @@ "@typescript-eslint/types" "4.33.0" "@typescript-eslint/visitor-keys" "4.33.0" +"@typescript-eslint/scope-manager@6.10.0": + version "6.10.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.10.0.tgz#b0276118b13d16f72809e3cecc86a72c93708540" + integrity sha512-TN/plV7dzqqC2iPNf1KrxozDgZs53Gfgg5ZHyw8erd6jd5Ta/JIEcdCheXFt9b1NYb93a1wmIIVW/2gLkombDg== + dependencies: + "@typescript-eslint/types" "6.10.0" + "@typescript-eslint/visitor-keys" "6.10.0" + "@typescript-eslint/scope-manager@6.9.1": version "6.9.1" resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.9.1.tgz#e96afeb9a68ad1cd816dba233351f61e13956b75" @@ -2750,6 +2771,11 @@ resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-4.33.0.tgz#a1e59036a3b53ae8430ceebf2a919dc7f9af6d72" integrity sha512-zKp7CjQzLQImXEpLt2BUw1tvOMPfNoTAfb8l51evhYbOEEzdWyQNmHWWGPR6hwKJDAi+1VXSBmnhL9kyVTTOuQ== +"@typescript-eslint/types@6.10.0": + version "6.10.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.10.0.tgz#f4f0a84aeb2ac546f21a66c6e0da92420e921367" + integrity sha512-36Fq1PWh9dusgo3vH7qmQAj5/AZqARky1Wi6WpINxB6SkQdY5vQoT2/7rW7uBIsPDcvvGCLi4r10p0OJ7ITAeg== + "@typescript-eslint/types@6.9.1": version "6.9.1" resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.9.1.tgz#a6cfc20db0fcedcb2f397ea728ef583e0ee72459" @@ -2768,6 +2794,19 @@ semver "^7.3.5" tsutils "^3.21.0" +"@typescript-eslint/typescript-estree@6.10.0": + version "6.10.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.10.0.tgz#667381eed6f723a1a8ad7590a31f312e31e07697" + integrity sha512-ek0Eyuy6P15LJVeghbWhSrBCj/vJpPXXR+EpaRZqou7achUWL8IdYnMSC5WHAeTWswYQuP2hAZgij/bC9fanBg== + dependencies: + "@typescript-eslint/types" "6.10.0" + "@typescript-eslint/visitor-keys" "6.10.0" + debug "^4.3.4" + globby "^11.1.0" + is-glob "^4.0.3" + semver "^7.5.4" + ts-api-utils "^1.0.1" + "@typescript-eslint/typescript-estree@6.9.1": version "6.9.1" resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.9.1.tgz#8c77910a49a04f0607ba94d78772da07dab275ad" @@ -2802,6 +2841,14 @@ "@typescript-eslint/types" "4.33.0" eslint-visitor-keys "^2.0.0" +"@typescript-eslint/visitor-keys@6.10.0": + version "6.10.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.10.0.tgz#b9eaf855a1ac7e95633ae1073af43d451e8f84e3" + integrity sha512-xMGluxQIEtOM7bqFCo+rCMh5fqI+ZxV5RUUOa29iVPz1OgCZrtc7rFnz5cLUazlkPKYqX+75iuDq7m0HQ48nCg== + dependencies: + "@typescript-eslint/types" "6.10.0" + eslint-visitor-keys "^3.4.1" + "@typescript-eslint/visitor-keys@6.9.1": version "6.9.1" resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.9.1.tgz#6753a9225a0ba00459b15d6456b9c2780b66707d" @@ -4464,7 +4511,12 @@ camelcase@^6.0.0, camelcase@^6.2.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== -caniuse-lite@^1.0.30000844, caniuse-lite@^1.0.30001541: +caniuse-lite@^1.0.30000844: + version "1.0.30001561" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001561.tgz#752f21f56f96f1b1a52e97aae98c57c562d5d9da" + integrity sha512-NTt0DNoKe958Q0BE0j0c1V9jbUzhBxHIEJy7asmGrpE0yG63KTV7PLHPnK2E1O9RsQrQ081I3NLuXGS6zht3cw== + +caniuse-lite@^1.0.30001541: version "1.0.30001558" resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001558.tgz#d2c6e21fdbfe83817f70feab902421a19b7983ee" integrity sha512-/Et7DwLqpjS47JPEcz6VnxU9PwcIdVi0ciLXRWBQdj1XFye68pSQYpV0QtPTfUKWuOaEig+/Vez2l74eDc1tPQ== @@ -4947,9 +4999,9 @@ copy-descriptor@^0.1.0: integrity sha512-XgZ0pFcakEUlbwQEVNg3+QAis1FyTL3Qel9FYy8pSkQqoG3PNoT0bOCQtOXcOkur21r2Eq2kI+IE+gsmAEVlYw== core-js-pure@^3.0.1: - version "3.33.1" - resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.33.1.tgz#7f27dd239da8eb97dbea30120071be8e5565cb0e" - integrity sha512-wCXGbLjnsP10PlK/thHSQlOLlLKNEkaWbTzVvHHZ79fZNeN1gUmw2gBlpItxPv/pvqldevEXFh/d5stdNvl6EQ== + version "3.33.2" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.33.2.tgz#644830db2507ef84d068a70980ccd99c275f5fa6" + integrity sha512-a8zeCdyVk7uF2elKIGz67AjcXOxjRbwOLz8SbklEso1V+2DoW4OkAMZN9S9GBgvZIaqQi/OemFX4OiSoQEmg1Q== core-js@^2.4.0, core-js@^2.5.0: version "2.6.12" @@ -5507,7 +5559,12 @@ ee-first@1.1.1: resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== -electron-to-chromium@^1.3.47, electron-to-chromium@^1.4.535: +electron-to-chromium@^1.3.47: + version "1.4.578" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.578.tgz#7a3510f333bcd55e87882799ebeb7518d6ab4d95" + integrity sha512-V0ZhSu1BQZKfG0yNEL6Dadzik8E1vAzfpVOapdSiT9F6yapEJ3Bk+4tZ4SMPdWiUchCgnM/ByYtBzp5ntzDMIA== + +electron-to-chromium@^1.4.535: version "1.4.569" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.569.tgz#1298b67727187ffbaac005a7425490d157f3ad03" integrity sha512-LsrJjZ0IbVy12ApW3gpYpcmHS3iRxH4bkKOW98y1/D+3cvDUWGcbzbsFinfUS8knpcZk/PG/2p/RnkMCYN7PVg== @@ -5908,14 +5965,14 @@ eslint@^7.16.0: v8-compile-cache "^2.0.3" eslint@^8.51.0: - version "8.52.0" - resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.52.0.tgz#d0cd4a1fac06427a61ef9242b9353f36ea7062fc" - integrity sha512-zh/JHnaixqHZsolRB/w9/02akBk9EPrOs9JwcTP2ek7yL5bVvXuRariiaAjjoJ5DvuwQ1WAE/HsMz+w17YgBCg== + version "8.53.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.53.0.tgz#14f2c8244298fcae1f46945459577413ba2697ce" + integrity sha512-N4VuiPjXDUa4xVeV/GC/RV3hQW9Nw+Y463lkWaKKXKYMvmRiRDAtfpuPFLN+E1/6ZhyR8J2ig+eVREnYgUsiag== dependencies: "@eslint-community/eslint-utils" "^4.2.0" "@eslint-community/regexpp" "^4.6.1" - "@eslint/eslintrc" "^2.1.2" - "@eslint/js" "8.52.0" + "@eslint/eslintrc" "^2.1.3" + "@eslint/js" "8.53.0" "@humanwhocodes/config-array" "^0.11.13" "@humanwhocodes/module-importer" "^1.0.1" "@nodelib/fs.walk" "^1.2.8" @@ -6820,7 +6877,18 @@ fast-diff@^1.1.2, fast-diff@^1.2.0: resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.3.0.tgz#ece407fa550a64d638536cd727e129c61616e0f0" integrity sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw== -fast-glob@^3.0.3, fast-glob@^3.2.12, fast-glob@^3.2.9, fast-glob@^3.3.0, fast-glob@^3.3.1: +fast-glob@^3.0.3, fast-glob@^3.3.0, fast-glob@^3.3.1: + version "3.3.2" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" + integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + +fast-glob@^3.2.12, fast-glob@^3.2.9: version "3.3.1" resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.1.tgz#784b4e897340f3dbbef17413b3f11acf03c874c4" integrity sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg== @@ -11351,11 +11419,16 @@ punycode@^1.4.1: resolved "https://registry.yarnpkg.com/punycode/-/punycode-1.4.1.tgz#c0d5a63b2718800ad8e1eb0fa5269c84dd41845e" integrity sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ== -punycode@^2.1.0, punycode@^2.1.1: +punycode@^2.1.0: version "2.3.0" resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.0.tgz#f67fa67c94da8f4d0cfff981aee4118064199b8f" integrity sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA== +punycode@^2.1.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" + integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== + pure-rand@^6.0.0: version "6.0.4" resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.0.4.tgz#50b737f6a925468679bff00ad20eade53f37d5c7" @@ -14293,9 +14366,9 @@ yocto-queue@^1.0.0: integrity sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g== zksync-web3@^0.14.3: - version "0.14.3" - resolved "https://registry.yarnpkg.com/zksync-web3/-/zksync-web3-0.14.3.tgz#64ac2a16d597464c3fc4ae07447a8007631c57c9" - integrity sha512-hT72th4AnqyLW1d5Jlv8N2B/qhEnl2NePK2A3org7tAa24niem/UAaHMkEvmWI3SF9waYUPtqAtjpf+yvQ9zvQ== + version "0.14.4" + resolved "https://registry.yarnpkg.com/zksync-web3/-/zksync-web3-0.14.4.tgz#0b70a7e1a9d45cc57c0971736079185746d46b1f" + integrity sha512-kYehMD/S6Uhe1g434UnaMN+sBr9nQm23Ywn0EUP5BfQCsbjcr3ORuS68PosZw8xUTu3pac7G6YMSnNHk+fwzvg== zksync-web3@^0.15.4, zksync-web3@^0.15.5: version "0.15.5" From 4421be4e2ec92371d3c4e5ae9a1ec100c949c158 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 13 Nov 2023 16:20:21 +0200 Subject: [PATCH 005/115] refactor(merkle tree): Remove lifetime in `MerkleTree` (#472) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removes the lifetime parameter in `MerkleTree`, `MiniMerkleTree` and related types. ## Why ❔ The lifetime is there because of `HashTree` / `HashEmptySubtree` traits, which only have stateless `'static` implementations now and for the foreseeable future. Thus, these lifetimes can be replaced with `'static`. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/merkle_tree/src/consistency.rs | 8 ++--- core/lib/merkle_tree/src/domain.rs | 4 +-- core/lib/merkle_tree/src/getters.rs | 15 ++++---- core/lib/merkle_tree/src/hasher/mod.rs | 35 ++++++++++++++----- core/lib/merkle_tree/src/hasher/nodes.rs | 5 ++- core/lib/merkle_tree/src/hasher/proofs.rs | 2 +- core/lib/merkle_tree/src/lib.rs | 22 ++++++------ core/lib/merkle_tree/src/recovery.rs | 30 ++++++++-------- core/lib/merkle_tree/src/storage/tests.rs | 2 +- .../tests/integration/merkle_tree.rs | 4 +-- core/lib/mini_merkle_tree/src/lib.rs | 30 +++++++--------- 11 files changed, 85 insertions(+), 72 deletions(-) diff --git a/core/lib/merkle_tree/src/consistency.rs b/core/lib/merkle_tree/src/consistency.rs index 2cbe1691b39..afe0111f9a3 100644 --- a/core/lib/merkle_tree/src/consistency.rs +++ b/core/lib/merkle_tree/src/consistency.rs @@ -6,6 +6,7 @@ use std::sync::atomic::{AtomicU64, Ordering}; use crate::{ errors::DeserializeError, + hasher::{HashTree, HasherWithStats}, types::{LeafNode, Nibbles, Node, NodeKey, Root}, Database, Key, MerkleTree, ValueHash, }; @@ -65,10 +66,7 @@ pub enum ConsistencyError { RootVersionMismatch { max_child_version: u64 }, } -impl MerkleTree<'_, DB> -where - DB: Database, -{ +impl MerkleTree { /// Verifies the internal tree consistency as stored in the database. /// /// # Errors @@ -169,7 +167,7 @@ where } let level = key.nibbles.nibble_count() * 4; - Ok(node.hash(&mut self.hasher.into(), level)) + Ok(node.hash(&mut HasherWithStats::new(&self.hasher), level)) } } diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index a4992e8f687..bb82233aec2 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -49,7 +49,7 @@ enum TreeMode { /// or discarded via [`Self::reset()`]. #[derive(Debug)] pub struct ZkSyncTree { - tree: MerkleTree<'static, Patched>, + tree: MerkleTree>, thread_pool: Option, mode: TreeMode, } @@ -426,7 +426,7 @@ impl ZkSyncTree { /// Readonly handle to a [`ZkSyncTree`]. #[derive(Debug)] -pub struct ZkSyncTreeReader(MerkleTree<'static, RocksDBWrapper>); +pub struct ZkSyncTreeReader(MerkleTree); // While cloning `MerkleTree` is logically unsound, cloning a reader is reasonable since it is readonly. impl Clone for ZkSyncTreeReader { diff --git a/core/lib/merkle_tree/src/getters.rs b/core/lib/merkle_tree/src/getters.rs index 66b9c303c50..67ce2aa9877 100644 --- a/core/lib/merkle_tree/src/getters.rs +++ b/core/lib/merkle_tree/src/getters.rs @@ -4,13 +4,10 @@ use crate::{ hasher::HasherWithStats, storage::{LoadAncestorsResult, SortedKeys, WorkingPatchSet}, types::{Nibbles, Node, TreeEntry, TreeEntryWithProof}, - Database, Key, MerkleTree, NoVersionError, ValueHash, + Database, HashTree, Key, MerkleTree, NoVersionError, ValueHash, }; -impl MerkleTree<'_, DB> -where - DB: Database, -{ +impl MerkleTree { /// Reads entries with the specified keys from the tree. The entries are returned in the same order /// as requested. /// @@ -72,7 +69,7 @@ where version: u64, leaf_keys: &[Key], ) -> Result, NoVersionError> { - let mut hasher = HasherWithStats::from(self.hasher); + let mut hasher = HasherWithStats::new(&self.hasher); self.load_and_transform_entries( version, leaf_keys, @@ -110,7 +107,7 @@ mod tests { let entries = tree.entries_with_proofs(0, &[missing_key]).unwrap(); assert_eq!(entries.len(), 1); assert!(entries[0].base.is_empty()); - entries[0].verify(tree.hasher, missing_key, tree.hasher.empty_tree_hash()); + entries[0].verify(&tree.hasher, missing_key, tree.hasher.empty_tree_hash()); } #[test] @@ -128,8 +125,8 @@ mod tests { let entries = tree.entries_with_proofs(0, &[key, missing_key]).unwrap(); assert_eq!(entries.len(), 2); assert!(!entries[0].base.is_empty()); - entries[0].verify(tree.hasher, key, output.root_hash); + entries[0].verify(&tree.hasher, key, output.root_hash); assert!(entries[1].base.is_empty()); - entries[1].verify(tree.hasher, missing_key, output.root_hash); + entries[1].verify(&tree.hasher, missing_key, output.root_hash); } } diff --git a/core/lib/merkle_tree/src/hasher/mod.rs b/core/lib/merkle_tree/src/hasher/mod.rs index cf64c5ec3ae..8b2478c43d3 100644 --- a/core/lib/merkle_tree/src/hasher/mod.rs +++ b/core/lib/merkle_tree/src/hasher/mod.rs @@ -29,13 +29,32 @@ pub trait HashTree: Send + Sync { /// Returns the hash of an empty subtree with the given depth. Implementations /// are encouraged to cache the returned values. fn empty_subtree_hash(&self, depth: usize) -> ValueHash; -} -impl dyn HashTree + '_ { - pub(crate) fn empty_tree_hash(&self) -> ValueHash { + /// Returns the hash of the empty tree. The default implementation uses [`Self::empty_subtree_hash()`]. + fn empty_tree_hash(&self) -> ValueHash { self.empty_subtree_hash(TREE_DEPTH) } +} + +impl HashTree for &H { + fn name(&self) -> &'static str { + (**self).name() + } + + fn hash_leaf(&self, value_hash: &ValueHash, leaf_index: u64) -> ValueHash { + (**self).hash_leaf(value_hash, leaf_index) + } + fn hash_branch(&self, lhs: &ValueHash, rhs: &ValueHash) -> ValueHash { + (**self).hash_branch(lhs, rhs) + } + + fn empty_subtree_hash(&self, depth: usize) -> ValueHash { + (**self).empty_subtree_hash(depth) + } +} + +impl dyn HashTree + '_ { /// Extends the provided `path` to length `TREE_DEPTH`. fn extend_merkle_path<'a>( &'a self, @@ -68,7 +87,7 @@ impl dyn HashTree + '_ { pub(crate) fn with_stats<'a>(&'a self, stats: &'a HashingStats) -> HasherWithStats<'a> { HasherWithStats { shared_metrics: Some(stats), - ..HasherWithStats::from(self) + ..HasherWithStats::new(self) } } } @@ -143,8 +162,8 @@ pub(crate) struct HasherWithStats<'a> { local_hashed_bytes: u64, } -impl<'a> From<&'a dyn HashTree> for HasherWithStats<'a> { - fn from(inner: &'a dyn HashTree) -> Self { +impl<'a> HasherWithStats<'a> { + pub fn new(inner: &'a dyn HashTree) -> Self { Self { inner, shared_metrics: None, @@ -153,7 +172,7 @@ impl<'a> From<&'a dyn HashTree> for HasherWithStats<'a> { } } -impl<'a> AsRef for HasherWithStats<'a> { +impl<'a> AsRef<(dyn HashTree + 'a)> for HasherWithStats<'a> { fn as_ref(&self) -> &(dyn HashTree + 'a) { self.inner } @@ -257,7 +276,7 @@ mod tests { let key = key.hashed_key_u256(); let leaf = LeafNode::new(key, H256([1; 32]), 1); - let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); + let mut hasher = HasherWithStats::new(&Blake2Hasher); let leaf_hash = leaf.hash(&mut hasher, 2); assert!(key.bit(254) && !key.bit(255)); let merkle_path = [H256([2; 32]), H256([3; 32])]; diff --git a/core/lib/merkle_tree/src/hasher/nodes.rs b/core/lib/merkle_tree/src/hasher/nodes.rs index e4432b86b69..d36c58c0ae1 100644 --- a/core/lib/merkle_tree/src/hasher/nodes.rs +++ b/core/lib/merkle_tree/src/hasher/nodes.rs @@ -259,7 +259,6 @@ impl Node { #[cfg(test)] mod tests { use super::*; - use crate::hasher::HashTree; use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; use zksync_types::H256; @@ -272,7 +271,7 @@ mod tests { internal_node.child_ref_mut(nibble).unwrap().hash = H256([nibble; 32]); } - let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); + let mut hasher = HasherWithStats::new(&Blake2Hasher); let node_hash = InternalNode::hash_inner(internal_node.child_hashes(), &mut hasher, 252, None); @@ -311,7 +310,7 @@ mod tests { fn test_updating_child_hash_in_internal_node(child_indexes: &[u8]) { let mut internal_node = InternalNode::default(); - let mut hasher = (&Blake2Hasher as &dyn HashTree).into(); + let mut hasher = HasherWithStats::new(&Blake2Hasher); for (child_idx, &nibble) in child_indexes.iter().enumerate() { internal_node.insert_child_ref(nibble, ChildRef::leaf(1)); diff --git a/core/lib/merkle_tree/src/hasher/proofs.rs b/core/lib/merkle_tree/src/hasher/proofs.rs index e496acb3f88..d97df0ad97d 100644 --- a/core/lib/merkle_tree/src/hasher/proofs.rs +++ b/core/lib/merkle_tree/src/hasher/proofs.rs @@ -145,7 +145,7 @@ impl<'a> TreeRangeDigest<'a> { }); let left_contour: Vec<_> = left_contour.collect(); Self { - hasher: hasher.into(), + hasher: HasherWithStats::new(hasher), current_leaf: LeafNode::new( start_key, start_entry.base.value_hash, diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 07a9668a61a..166400cbb64 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -78,7 +78,7 @@ pub use crate::{ }, }; -use crate::{storage::Storage, types::Root}; +use crate::{hasher::HasherWithStats, storage::Storage, types::Root}; use zksync_crypto::hasher::blake2::Blake2Hasher; /// Binary Merkle tree implemented using AR16MT from Diem [Jellyfish Merkle tree] white paper. @@ -123,31 +123,33 @@ use zksync_crypto::hasher::blake2::Blake2Hasher; /// /// [Jellyfish Merkle tree]: https://developers.diem.com/papers/jellyfish-merkle-tree/2021-01-14.pdf #[derive(Debug)] -pub struct MerkleTree<'a, DB> { +pub struct MerkleTree { db: DB, - hasher: &'a dyn HashTree, + hasher: H, } -impl<'a, DB: Database> MerkleTree<'a, DB> { +impl MerkleTree { /// Loads a tree with the default Blake2 hasher. /// /// # Panics /// /// Panics in the same situations as [`Self::with_hasher()`]. pub fn new(db: DB) -> Self { - Self::with_hasher(db, &Blake2Hasher) + Self::with_hasher(db, Blake2Hasher) } +} +impl MerkleTree { /// Loads a tree with the specified hasher. /// /// # Panics /// /// Panics if the hasher or basic tree parameters (e.g., the tree depth) /// do not match those of the tree loaded from the database. - pub fn with_hasher(db: DB, hasher: &'a dyn HashTree) -> Self { + pub fn with_hasher(db: DB, hasher: H) -> Self { let tags = db.manifest().and_then(|manifest| manifest.tags); if let Some(tags) = tags { - tags.assert_consistency(hasher, false); + tags.assert_consistency(&hasher, false); } // If there are currently no tags in the tree, we consider that it fits // for backward compatibility. The tags will be added the next time the tree is saved. @@ -162,7 +164,7 @@ impl<'a, DB: Database> MerkleTree<'a, DB> { let Root::Filled { node, .. } = root else { return Some(self.hasher.empty_tree_hash()); }; - Some(node.hash(&mut self.hasher.into(), 0)) + Some(node.hash(&mut HasherWithStats::new(&self.hasher), 0)) } pub(crate) fn root(&self, version: u64) -> Option { @@ -209,7 +211,7 @@ impl<'a, DB: Database> MerkleTree<'a, DB> { /// Returns information about the update such as the final tree hash. pub fn extend(&mut self, key_value_pairs: Vec<(Key, ValueHash)>) -> BlockOutput { let next_version = self.db.manifest().unwrap_or_default().version_count; - let storage = Storage::new(&self.db, self.hasher, next_version, true); + let storage = Storage::new(&self.db, &self.hasher, next_version, true); let (output, patch) = storage.extend(key_value_pairs); self.db.apply_patch(patch); output @@ -227,7 +229,7 @@ impl<'a, DB: Database> MerkleTree<'a, DB> { instructions: Vec<(Key, TreeInstruction)>, ) -> BlockOutputWithProofs { let next_version = self.db.manifest().unwrap_or_default().version_count; - let storage = Storage::new(&self.db, self.hasher, next_version, true); + let storage = Storage::new(&self.db, &self.hasher, next_version, true); let (output, patch) = storage.extend_with_proofs(instructions); self.db.apply_patch(patch); output diff --git a/core/lib/merkle_tree/src/recovery.rs b/core/lib/merkle_tree/src/recovery.rs index 7e7450596d8..9700e401fa2 100644 --- a/core/lib/merkle_tree/src/recovery.rs +++ b/core/lib/merkle_tree/src/recovery.rs @@ -38,7 +38,7 @@ use std::time::Instant; use crate::{ - hasher::HashTree, + hasher::{HashTree, HasherWithStats}, storage::{PatchSet, PruneDatabase, PrunePatchSet, Storage}, types::{Key, Manifest, Root, TreeTags, ValueHash}, MerkleTree, @@ -59,22 +59,24 @@ pub struct RecoveryEntry { /// Handle to a Merkle tree during its recovery. #[derive(Debug)] -pub struct MerkleTreeRecovery<'a, DB> { +pub struct MerkleTreeRecovery { db: DB, - hasher: &'a dyn HashTree, + hasher: H, recovered_version: u64, } -impl<'a, DB: PruneDatabase> MerkleTreeRecovery<'a, DB> { +impl MerkleTreeRecovery { /// Creates tree recovery with the default Blake2 hasher. /// /// # Panics /// /// Panics in the same situations as [`Self::with_hasher()`]. pub fn new(db: DB, recovered_version: u64) -> Self { - Self::with_hasher(db, recovered_version, &Blake2Hasher) + Self::with_hasher(db, recovered_version, Blake2Hasher) } +} +impl MerkleTreeRecovery { /// Loads a tree with the specified hasher. /// /// # Panics @@ -83,7 +85,7 @@ impl<'a, DB: PruneDatabase> MerkleTreeRecovery<'a, DB> { /// for a different tree version. /// - Panics if the hasher or basic tree parameters (e.g., the tree depth) /// do not match those of the tree loaded from the database. - pub fn with_hasher(mut db: DB, recovered_version: u64, hasher: &'a dyn HashTree) -> Self { + pub fn with_hasher(mut db: DB, recovered_version: u64, hasher: H) -> Self { let manifest = db.manifest(); let mut manifest = if let Some(manifest) = manifest { if manifest.version_count > 0 { @@ -105,9 +107,9 @@ impl<'a, DB: PruneDatabase> MerkleTreeRecovery<'a, DB> { manifest.version_count = recovered_version + 1; if let Some(tags) = &manifest.tags { - tags.assert_consistency(hasher, true); + tags.assert_consistency(&hasher, true); } else { - let mut tags = TreeTags::new(hasher); + let mut tags = TreeTags::new(&hasher); tags.is_recovering = true; manifest.tags = Some(tags); } @@ -126,12 +128,12 @@ impl<'a, DB: PruneDatabase> MerkleTreeRecovery<'a, DB> { let Some(Root::Filled { node, .. }) = root else { return self.hasher.empty_tree_hash(); }; - node.hash(&mut self.hasher.into(), 0) + node.hash(&mut HasherWithStats::new(&self.hasher), 0) } /// Returns the last key processed during the recovery process. pub fn last_processed_key(&self) -> Option { - let storage = Storage::new(&self.db, self.hasher, self.recovered_version, false); + let storage = Storage::new(&self.db, &self.hasher, self.recovered_version, false); storage.greatest_key() } @@ -156,7 +158,7 @@ impl<'a, DB: PruneDatabase> MerkleTreeRecovery<'a, DB> { tracing::debug!("Started extending tree"); let started_at = Instant::now(); - let storage = Storage::new(&self.db, self.hasher, self.recovered_version, false); + let storage = Storage::new(&self.db, &self.hasher, self.recovered_version, false); let patch = storage.extend_during_recovery(entries); tracing::debug!("Finished processing keys; took {:?}", started_at.elapsed()); @@ -172,7 +174,7 @@ impl<'a, DB: PruneDatabase> MerkleTreeRecovery<'a, DB> { fields(recovered_version = self.recovered_version), )] #[allow(clippy::missing_panics_doc, clippy::range_plus_one)] - pub fn finalize(mut self) -> MerkleTree<'a, DB> { + pub fn finalize(mut self) -> MerkleTree { let mut manifest = self.db.manifest().unwrap(); // ^ `unwrap()` is safe: manifest is inserted into the DB on creation @@ -204,7 +206,7 @@ impl<'a, DB: PruneDatabase> MerkleTreeRecovery<'a, DB> { manifest .tags - .get_or_insert_with(|| TreeTags::new(self.hasher)) + .get_or_insert_with(|| TreeTags::new(&self.hasher)) .is_recovering = false; self.db.apply_patch(PatchSet::from_manifest(manifest)); tracing::debug!("Updated tree manifest to mark recovery as complete"); @@ -264,7 +266,7 @@ mod tests { let tree = recovery.finalize(); assert_eq!(tree.latest_version(), Some(42)); - let mut hasher = HasherWithStats::from(&Blake2Hasher as &dyn HashTree); + let mut hasher = HasherWithStats::new(&Blake2Hasher); assert_eq!( tree.latest_root_hash(), LeafNode::new( diff --git a/core/lib/merkle_tree/src/storage/tests.rs b/core/lib/merkle_tree/src/storage/tests.rs index 02ec9d4c800..d00ed4d3e05 100644 --- a/core/lib/merkle_tree/src/storage/tests.rs +++ b/core/lib/merkle_tree/src/storage/tests.rs @@ -261,7 +261,7 @@ fn proving_keys_existence_and_absence() { updater.patch_set.ensure_internal_root_node(); // Necessary for proofs to work. updater.insert(FIRST_KEY, H256([1; 32]), &Nibbles::EMPTY, || 1); - let mut hasher = (&() as &dyn HashTree).into(); + let mut hasher = HasherWithStats::new(&()); let (op, merkle_path) = updater.prove(&mut hasher, FIRST_KEY, &Nibbles::EMPTY); assert_matches!(op, TreeLogEntry::Read { .. }); let merkle_path = finalize_merkle_path(merkle_path, &hasher); diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index ad9467b8e5f..eb84bb7248e 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -691,7 +691,7 @@ mod rocksdb { let mut tree = MerkleTree::new(&mut db); tree.extend(vec![(U256::zero(), H256::zero())]); - MerkleTree::with_hasher(&mut db, &()); + MerkleTree::with_hasher(&mut db, ()); } #[test] @@ -703,6 +703,6 @@ mod rocksdb { drop(tree); let db = RocksDBWrapper::new(dir.path()); - MerkleTree::with_hasher(db, &()); + MerkleTree::with_hasher(db, ()); } } diff --git a/core/lib/mini_merkle_tree/src/lib.rs b/core/lib/mini_merkle_tree/src/lib.rs index 4eced5a93c0..a6cbf37213c 100644 --- a/core/lib/mini_merkle_tree/src/lib.rs +++ b/core/lib/mini_merkle_tree/src/lib.rs @@ -7,7 +7,7 @@ use once_cell::sync::Lazy; -use std::{fmt, iter, str::FromStr}; +use std::{iter, str::FromStr}; #[cfg(test)] mod tests; @@ -25,13 +25,13 @@ const MAX_TREE_DEPTH: usize = 32; /// can be specified larger than the number of provided leaves. In this case, the remaining leaves /// will be considered to equal `[0_u8; LEAF_SIZE]`. #[derive(Debug, Clone)] -pub struct MiniMerkleTree<'a, const LEAF_SIZE: usize> { - hasher: &'a dyn HashEmptySubtree, +pub struct MiniMerkleTree { + hasher: H, hashes: Box<[H256]>, binary_tree_size: usize, } -impl MiniMerkleTree<'static, LEAF_SIZE> +impl MiniMerkleTree where KeccakHasher: HashEmptySubtree, { @@ -46,11 +46,14 @@ where leaves: impl Iterator, min_tree_size: Option, ) -> Self { - Self::with_hasher(&KeccakHasher, leaves, min_tree_size) + Self::with_hasher(KeccakHasher, leaves, min_tree_size) } } -impl<'a, const LEAF_SIZE: usize> MiniMerkleTree<'a, LEAF_SIZE> { +impl MiniMerkleTree +where + H: HashEmptySubtree, +{ /// Creates a new Merkle tree from the supplied leaves. If `min_tree_size` is supplied and is larger than the /// number of the supplied leaves, the leaves are padded to `min_tree_size` with `[0_u8; LEAF_SIZE]` entries. /// @@ -60,7 +63,7 @@ impl<'a, const LEAF_SIZE: usize> MiniMerkleTree<'a, LEAF_SIZE> { /// /// - `min_tree_size` (if supplied) is not a power of 2. pub fn with_hasher( - hasher: &'a dyn HashEmptySubtree, + hasher: H, leaves: impl Iterator, min_tree_size: Option, ) -> Self { @@ -154,21 +157,14 @@ fn tree_depth_by_size(tree_size: usize) -> usize { } /// Hashing of empty binary Merkle trees. -pub trait HashEmptySubtree: Hasher { +pub trait HashEmptySubtree: + 'static + Send + Sync + Hasher +{ /// Returns the hash of an empty subtree with the given depth. Implementations /// are encouraged to cache the returned values. fn empty_subtree_hash(&self, depth: usize) -> H256; } -impl fmt::Debug for dyn HashEmptySubtree + '_ { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter - .debug_struct("HashTree") - .field("LEAF_SIZE", &LEAF_SIZE) - .finish() - } -} - impl HashEmptySubtree<88> for KeccakHasher { fn empty_subtree_hash(&self, depth: usize) -> H256 { static EMPTY_TREE_HASHES: Lazy> = Lazy::new(compute_empty_tree_hashes::<88>); From 69a7afd096d83bc395e7665cb199460bd5a9c7f8 Mon Sep 17 00:00:00 2001 From: Igor Borodin Date: Mon, 13 Nov 2023 16:10:25 +0100 Subject: [PATCH 006/115] ci: Move current Prover FRI bucket keys extraction to a separate script (#483) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Moves current prover FRI setup keys paths extraction from copy-pasted inline GHA steps to a separate script ## Why ❔ - DRY - Can be used in another places, like local testing, workflows in another repos, etc ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Artem Makhortov <13339874+artmakh@users.noreply.github.com> --- .github/workflows/build-docker-from-tag.yml | 11 +---------- .github/workflows/ci.yml | 1 + .github/workflows/release-test-stage.yml | 11 +---------- prover/extract-setup-data-keys.sh | 15 +++++++++++++++ 4 files changed, 18 insertions(+), 20 deletions(-) create mode 100755 prover/extract-setup-data-keys.sh diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 0d499a33c7a..a5bc7884f28 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -47,16 +47,7 @@ jobs: - name: Generate outputs with Prover FRI setup data keys IDs id: extract-prover-fri-setup-key-ids run: | - declare -A json_files=( - ["cpu"]="setup-data-cpu-keys.json" - ["gpu"]="setup-data-gpu-keys.json" - ) - for type in "${!json_files[@]}"; do - file=${json_files[$type]} - value=$(jq -r '.us' "./prover/$file") - short_sha=$(echo $value | sed 's|gs://matterlabs-setup-data-us/\(.*\)/|\1|') - echo "${type}_short_commit_sha=$short_sha" >> $GITHUB_OUTPUT - done + ./prover/extract-setup-data-keys.sh >> $GITHUB_OUTPUT build-push-core-images: name: Build and push image diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8bb0530888d..2812f28778a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,6 +38,7 @@ jobs: - '!prover/CHANGELOG.md' - '!prover/setup-data-cpu-keys.json' - '!prover/setup-data-gpu-keys.json' + - '!prover/extract-setup-data-keys.sh' - 'docker/prover*/**' - '.github/workflows/build-prover-template.yml' - '.github/workflows/ci-prover-reusable.yml' diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index ba90843f863..ae3294ee62e 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -58,16 +58,7 @@ jobs: - name: Generate outputs with Prover FRI setup data keys IDs id: extract-prover-fri-setup-key-ids run: | - declare -A json_files=( - ["cpu"]="setup-data-cpu-keys.json" - ["gpu"]="setup-data-gpu-keys.json" - ) - for type in "${!json_files[@]}"; do - file=${json_files[$type]} - value=$(jq -r '.us' "./prover/$file") - short_sha=$(echo $value | sed 's|gs://matterlabs-setup-data-us/\(.*\)/|\1|') - echo "${type}_short_commit_sha=$short_sha" >> $GITHUB_OUTPUT - done + ./prover/extract-setup-data-keys.sh >> $GITHUB_OUTPUT build-push-core-images: name: Build and push images diff --git a/prover/extract-setup-data-keys.sh b/prover/extract-setup-data-keys.sh new file mode 100755 index 00000000000..1fc4afe23da --- /dev/null +++ b/prover/extract-setup-data-keys.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Define file names +cpu_file="setup-data-cpu-keys.json" +gpu_file="setup-data-gpu-keys.json" + +# Process CPU file +value=$(jq -r '.us' "./prover/$cpu_file") +short_sha=$(echo $value | sed 's|gs://matterlabs-setup-data-us/\(.*\)/|\1|') +echo "cpu_short_commit_sha=$short_sha" + +# Process GPU file +value=$(jq -r '.us' "./prover/$gpu_file") +short_sha=$(echo $value | sed 's|gs://matterlabs-setup-data-us/\(.*\)/|\1|') +echo "gpu_short_commit_sha=$short_sha" From 4e3cf93305067b5d5fd2b43b1102ef70e70b269d Mon Sep 17 00:00:00 2001 From: "Ramon \"9Tails\" Canales" Date: Mon, 13 Nov 2023 15:26:44 +0000 Subject: [PATCH 007/115] chore(Fri-compressor): Update README.md for FRI compressor (#484) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Changes README for module ## Why ❔ Current title is wrong ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- prover/proof_fri_compressor/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prover/proof_fri_compressor/README.md b/prover/proof_fri_compressor/README.md index 4b0fa52ed9f..bea0c9ac390 100644 --- a/prover/proof_fri_compressor/README.md +++ b/prover/proof_fri_compressor/README.md @@ -1,4 +1,4 @@ -# Witness vector generator +# FRI compressor Used to compress FRI proof to Bellman proof that gets sent to L1. From cb394f3c3ce93d345f24e5b9ee34e22ebca3abb0 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:47:02 +0200 Subject: [PATCH 008/115] fix: change vks upgrade logic (#491) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ vks should be updated if any vk is update ## Why ❔ contract were changed, changing server as well ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/types/src/protocol_version.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/lib/types/src/protocol_version.rs b/core/lib/types/src/protocol_version.rs index 5047a035002..09a722c72cd 100644 --- a/core/lib/types/src/protocol_version.rs +++ b/core/lib/types/src/protocol_version.rs @@ -428,8 +428,8 @@ impl TryFrom for ProtocolUpgrade { default_account_code_hash: (default_account_code_hash != H256::zero()) .then_some(default_account_code_hash), verifier_params: (recursion_node_level_vk_hash != H256::zero() - && recursion_leaf_level_vk_hash != H256::zero() - && recursion_circuits_set_vks_hash != H256::zero()) + || recursion_leaf_level_vk_hash != H256::zero() + || recursion_circuits_set_vks_hash != H256::zero()) .then_some(VerifierParams { recursion_node_level_vk_hash, recursion_leaf_level_vk_hash, From 38384e7bba702870440c3508f9370e20a26cefd0 Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Tue, 14 Nov 2023 16:01:14 +0100 Subject: [PATCH 009/115] chore: Prepare boojum stage upgrade (#473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- contracts | 2 +- etc/system-contracts | 2 +- etc/upgrades/1699353977-boojum/common.json | 5 + .../1699353977-boojum/stage2/crypto.json | 11 + .../1699353977-boojum/stage2/facetCuts.json | 177 + .../1699353977-boojum/stage2/facets.json | 18 + .../1699353977-boojum/stage2/l2Upgrade.json | 323 ++ .../stage2/transactions.json | 232 ++ infrastructure/protocol-upgrade/README.md | 4 +- .../protocol-upgrade/pre-boojum/IZkSync.d.ts | 3661 +++++++++++++++++ .../pre-boojum/IZkSyncFactory.ts | 1948 +++++++++ .../protocol-upgrade/src/crypto/crypto.ts | 7 +- .../protocol-upgrade/src/crypto/deployer.ts | 3 + .../src/l2upgrade/transactions.ts | 6 +- .../protocol-upgrade/src/transaction.ts | 6 +- yarn.lock | 188 +- 16 files changed, 6468 insertions(+), 125 deletions(-) create mode 100644 etc/upgrades/1699353977-boojum/common.json create mode 100644 etc/upgrades/1699353977-boojum/stage2/crypto.json create mode 100644 etc/upgrades/1699353977-boojum/stage2/facetCuts.json create mode 100644 etc/upgrades/1699353977-boojum/stage2/facets.json create mode 100644 etc/upgrades/1699353977-boojum/stage2/l2Upgrade.json create mode 100644 etc/upgrades/1699353977-boojum/stage2/transactions.json create mode 100644 infrastructure/protocol-upgrade/pre-boojum/IZkSync.d.ts create mode 100644 infrastructure/protocol-upgrade/pre-boojum/IZkSyncFactory.ts diff --git a/contracts b/contracts index 85cf7f814ac..ff745288455 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 85cf7f814ac8ba557722bdf7c01a787100e6d2f1 +Subproject commit ff74528845586bd175d74edc45dca1f1ae2ea454 diff --git a/etc/system-contracts b/etc/system-contracts index 3377d27d7dc..a00ab9a1164 160000 --- a/etc/system-contracts +++ b/etc/system-contracts @@ -1 +1 @@ -Subproject commit 3377d27d7dc26b9f0e1ec0637af34dbc4cb8c2e3 +Subproject commit a00ab9a11643f3a918ed95cdf8a04edff5499d92 diff --git a/etc/upgrades/1699353977-boojum/common.json b/etc/upgrades/1699353977-boojum/common.json new file mode 100644 index 00000000000..e4b34ed8383 --- /dev/null +++ b/etc/upgrades/1699353977-boojum/common.json @@ -0,0 +1,5 @@ +{ + "name": "boojum", + "creationTimestamp": 1699353977, + "protocolVersion": "18" +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/stage2/crypto.json b/etc/upgrades/1699353977-boojum/stage2/crypto.json new file mode 100644 index 00000000000..6496eb39d5a --- /dev/null +++ b/etc/upgrades/1699353977-boojum/stage2/crypto.json @@ -0,0 +1,11 @@ +{ + "verifier": { + "address": "0xB465882F67d236DcC0D090F78ebb0d838e9719D8", + "txHash": "0x1f39124e7dded035a620893118e70a3554a798ed3ca8f410b7e0c438f4e976f5" + }, + "keys": { + "recursionNodeLevelVkHash": "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + "recursionLeafLevelVkHash": "0x14628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/stage2/facetCuts.json b/etc/upgrades/1699353977-boojum/stage2/facetCuts.json new file mode 100644 index 00000000000..f2f6d4affa0 --- /dev/null +++ b/etc/upgrades/1699353977-boojum/stage2/facetCuts.json @@ -0,0 +1,177 @@ +[ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x33ce93fe", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xe58bb639", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xD059478a564dF1353A54AC0D0e7Fc55A90b92246", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 0, + "isFreezable": true + } +] \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/stage2/facets.json b/etc/upgrades/1699353977-boojum/stage2/facets.json new file mode 100644 index 00000000000..112aaeab216 --- /dev/null +++ b/etc/upgrades/1699353977-boojum/stage2/facets.json @@ -0,0 +1,18 @@ +{ + "ExecutorFacet": { + "address": "0xD059478a564dF1353A54AC0D0e7Fc55A90b92246", + "txHash": "0x94fd7d716460787fb58aa523590157d4458ca3058c4494edde40481602b8e73a" + }, + "AdminFacet": { + "address": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "GettersFacet": { + "address": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "MailboxFacet": { + "address": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/stage2/l2Upgrade.json b/etc/upgrades/1699353977-boojum/stage2/l2Upgrade.json new file mode 100644 index 00000000000..19977b5cc2a --- /dev/null +++ b/etc/upgrades/1699353977-boojum/stage2/l2Upgrade.json @@ -0,0 +1,323 @@ +{ + "systemContracts": [ + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7" + ], + "address": "0x0000000000000000000000000000000000000000" + }, + { + "name": "Ecrecover", + "bytecodeHashes": [ + "0x010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c" + ], + "address": "0x0000000000000000000000000000000000000001" + }, + { + "name": "SHA256", + "bytecodeHashes": [ + "0x010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d" + ], + "address": "0x0000000000000000000000000000000000000002" + }, + { + "name": "EcAdd", + "bytecodeHashes": [ + "0x010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d433" + ], + "address": "0x0000000000000000000000000000000000000006" + }, + { + "name": "EcMul", + "bytecodeHashes": [ + "0x0100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba571350675" + ], + "address": "0x0000000000000000000000000000000000000007" + }, + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7" + ], + "address": "0x0000000000000000000000000000000000008001" + }, + { + "name": "AccountCodeStorage", + "bytecodeHashes": [ + "0x0100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c" + ], + "address": "0x0000000000000000000000000000000000008002" + }, + { + "name": "NonceHolder", + "bytecodeHashes": [ + "0x0100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd470" + ], + "address": "0x0000000000000000000000000000000000008003" + }, + { + "name": "KnownCodesStorage", + "bytecodeHashes": [ + "0x0100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e" + ], + "address": "0x0000000000000000000000000000000000008004" + }, + { + "name": "ImmutableSimulator", + "bytecodeHashes": [ + "0x01000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c5" + ], + "address": "0x0000000000000000000000000000000000008005" + }, + { + "name": "ContractDeployer", + "bytecodeHashes": [ + "0x010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb4212" + ], + "address": "0x0000000000000000000000000000000000008006" + }, + { + "name": "L1Messenger", + "bytecodeHashes": [ + "0x01000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa" + ], + "address": "0x0000000000000000000000000000000000008008" + }, + { + "name": "MsgValueSimulator", + "bytecodeHashes": [ + "0x0100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb" + ], + "address": "0x0000000000000000000000000000000000008009" + }, + { + "name": "L2EthToken", + "bytecodeHashes": [ + "0x01000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3" + ], + "address": "0x000000000000000000000000000000000000800a" + }, + { + "name": "SystemContext", + "bytecodeHashes": [ + "0x0100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436" + ], + "address": "0x000000000000000000000000000000000000800b" + }, + { + "name": "BootloaderUtilities", + "bytecodeHashes": [ + "0x010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0" + ], + "address": "0x000000000000000000000000000000000000800c" + }, + { + "name": "EventWriter", + "bytecodeHashes": [ + "0x01000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339" + ], + "address": "0x000000000000000000000000000000000000800d" + }, + { + "name": "Compressor", + "bytecodeHashes": [ + "0x010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496" + ], + "address": "0x000000000000000000000000000000000000800e" + }, + { + "name": "ComplexUpgrader", + "bytecodeHashes": [ + "0x0100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc" + ], + "address": "0x000000000000000000000000000000000000800f" + }, + { + "name": "Keccak256", + "bytecodeHashes": [ + "0x0100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a89" + ], + "address": "0x0000000000000000000000000000000000008010" + } + ], + "defaultAA": { + "name": "DefaultAccount", + "bytecodeHashes": [ + "0x01000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d" + ] + }, + "bootloader": { + "name": "Bootloader", + "bytecodeHashes": [ + "0x01000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b88" + ] + }, + "forcedDeployments": [ + { + "bytecodeHash": "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7", + "newAddress": "0x0000000000000000000000000000000000000000", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c", + "newAddress": "0x0000000000000000000000000000000000000001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d", + "newAddress": "0x0000000000000000000000000000000000000002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d433", + "newAddress": "0x0000000000000000000000000000000000000006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba571350675", + "newAddress": "0x0000000000000000000000000000000000000007", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7", + "newAddress": "0x0000000000000000000000000000000000008001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c", + "newAddress": "0x0000000000000000000000000000000000008002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd470", + "newAddress": "0x0000000000000000000000000000000000008003", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e", + "newAddress": "0x0000000000000000000000000000000000008004", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c5", + "newAddress": "0x0000000000000000000000000000000000008005", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb4212", + "newAddress": "0x0000000000000000000000000000000000008006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa", + "newAddress": "0x0000000000000000000000000000000000008008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb", + "newAddress": "0x0000000000000000000000000000000000008009", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3", + "newAddress": "0x000000000000000000000000000000000000800a", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436", + "newAddress": "0x000000000000000000000000000000000000800b", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0", + "newAddress": "0x000000000000000000000000000000000000800c", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339", + "newAddress": "0x000000000000000000000000000000000000800d", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496", + "newAddress": "0x000000000000000000000000000000000000800e", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc", + "newAddress": "0x000000000000000000000000000000000000800f", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a89", + "newAddress": "0x0000000000000000000000000000000000008010", + "value": 0, + "input": "0x", + "callConstructor": false + } + ], + "forcedDeploymentCalldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "calldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "tx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "18", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/stage2/transactions.json b/etc/upgrades/1699353977-boojum/stage2/transactions.json new file mode 100644 index 00000000000..d8e72a2bf5f --- /dev/null +++ b/etc/upgrades/1699353977-boojum/stage2/transactions.json @@ -0,0 +1,232 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "18", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x01000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b88", + "defaultAccountHash": "0x01000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d", + "verifier": "0xB465882F67d236DcC0D090F78ebb0d838e9719D8", + "verifierParams": { + "recursionNodeLevelVkHash": "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + "recursionLeafLevelVkHash": "0x14628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x00" + }, + "factoryDeps": [], + "newProtocolVersion": "18", + "newAllowList": "0x7fEA2e79176e8AAED17cc9408a284f484b3843dC" + }, + "l1upgradeCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000007fea2e79176e8aaed17cc9408a284f484b3843dc00000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0x5Dc2BD01377b62b1C42654462cb7a2371Fd9d92A", + "protocolVersion": "18", + "diamondUpgradeProposalId": "0", + "upgradeTimestamp": "0", + "proposeTransparentUpgradeCalldata": "0x8043760a0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000005dc2bd01377b62b1c42654462cb7a2371fd9d92a00000000000000000000000000000000000000000000000000000000000015400000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000007c000000000000000000000000000000000000000000000000000000000000009200000000000000000000000000000000000000000000000000000000000000a400000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000d60000000000000000000000000000000000000000000000000000000000000124000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000023cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff0000000000000000000000000000000000000000000000000000000033ce93fe000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005e58bb63900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000000000000000000000000000409560de546e057ce5bd5db487edf2bb5e785bab000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000f3acf6a03ea4a914b78ec788624b25cec37c14a40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000022cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b70000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d0000000000000000000000000000000000000000000000000000000000000000000000000000000063b5ec36b09384ffa7106a80ec7cfdfca521fd0800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000d059478a564df1353a54ac0d0e7fc55a90b922460000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000017041ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000007fea2e79176e8aaed17cc9408a284f484b3843dc00000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "transparentUpgrade": { + "facetCuts": [ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x33ce93fe", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xe58bb639", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xD059478a564dF1353A54AC0D0e7Fc55A90b92246", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 0, + "isFreezable": true + } + ], + "initAddress": "0x5Dc2BD01377b62b1C42654462cb7a2371Fd9d92A", + "initCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000007fea2e79176e8aaed17cc9408a284f484b3843dc00000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "executeUpgradeCalldata": "0x36d4eb840000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000005dc2bd01377b62b1c42654462cb7a2371fd9d92a00000000000000000000000000000000000000000000000000000000000015400000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000007c000000000000000000000000000000000000000000000000000000000000009200000000000000000000000000000000000000000000000000000000000000a400000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000d60000000000000000000000000000000000000000000000000000000000000124000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000023cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff0000000000000000000000000000000000000000000000000000000033ce93fe000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005e58bb63900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000000000000000000000000000409560de546e057ce5bd5db487edf2bb5e785bab000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000f3acf6a03ea4a914b78ec788624b25cec37c14a40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000022cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b70000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d0000000000000000000000000000000000000000000000000000000000000000000000000000000063b5ec36b09384ffa7106a80ec7cfdfca521fd0800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000d059478a564df1353a54ac0d0e7fc55a90b922460000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000017041ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000007fea2e79176e8aaed17cc9408a284f484b3843dc00000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md index 676126aca45..595bea8a84f 100644 --- a/infrastructure/protocol-upgrade/README.md +++ b/infrastructure/protocol-upgrade/README.md @@ -80,8 +80,7 @@ $ zk f yarn start facets deploy \ --zksync-address \ --environment \ --executor \ ---governance \ ---diamond-cut \ +--admin \ --getters \ --mailbox ``` @@ -174,7 +173,6 @@ $ zk f yarn start crypto deploy-verifier --gas-price \ --nonce \ --create2-address \ ---zksync-address \ --environment ``` diff --git a/infrastructure/protocol-upgrade/pre-boojum/IZkSync.d.ts b/infrastructure/protocol-upgrade/pre-boojum/IZkSync.d.ts new file mode 100644 index 00000000000..e0a567a9af3 --- /dev/null +++ b/infrastructure/protocol-upgrade/pre-boojum/IZkSync.d.ts @@ -0,0 +1,3661 @@ +/* Autogenerated file. Do not edit manually. */ +/* tslint:disable */ +/* eslint-disable */ + +import { ethers, EventFilter, Signer, BigNumber, BigNumberish, PopulatedTransaction } from 'ethers'; +import { Contract, ContractTransaction, Overrides, PayableOverrides, CallOverrides } from '@ethersproject/contracts'; +import { BytesLike } from '@ethersproject/bytes'; +import { Listener, Provider } from '@ethersproject/providers'; +import { FunctionFragment, EventFragment, Result } from '@ethersproject/abi'; + +interface IZkSyncInterface extends ethers.utils.Interface { + functions: { + 'acceptGovernor()': FunctionFragment; + 'cancelUpgradeProposal(bytes32)': FunctionFragment; + 'commitBlocks(tuple,tuple[])': FunctionFragment; + 'executeBlocks(tuple[])': FunctionFragment; + 'executeUpgrade(tuple,bytes32)': FunctionFragment; + 'facetAddress(bytes4)': FunctionFragment; + 'facetAddresses()': FunctionFragment; + 'facetFunctionSelectors(address)': FunctionFragment; + 'facets()': FunctionFragment; + 'finalizeEthWithdrawal(uint256,uint256,uint16,bytes,bytes32[])': FunctionFragment; + 'freezeDiamond()': FunctionFragment; + 'getAllowList()': FunctionFragment; + 'getCurrentProposalId()': FunctionFragment; + 'getFirstUnprocessedPriorityTx()': FunctionFragment; + 'getGovernor()': FunctionFragment; + 'getL2BootloaderBytecodeHash()': FunctionFragment; + 'getL2DefaultAccountBytecodeHash()': FunctionFragment; + 'getL2SystemContractsUpgradeBlockNumber()': FunctionFragment; + 'getL2SystemContractsUpgradeTxHash()': FunctionFragment; + 'getName()': FunctionFragment; + 'getPendingGovernor()': FunctionFragment; + 'getPriorityQueueSize()': FunctionFragment; + 'getPriorityTxMaxGasLimit()': FunctionFragment; + 'getProposedUpgradeHash()': FunctionFragment; + 'getProposedUpgradeTimestamp()': FunctionFragment; + 'getProtocolVersion()': FunctionFragment; + 'getSecurityCouncil()': FunctionFragment; + 'getTotalBlocksCommitted()': FunctionFragment; + 'getTotalBlocksExecuted()': FunctionFragment; + 'getTotalBlocksVerified()': FunctionFragment; + 'getTotalPriorityTxs()': FunctionFragment; + 'getUpgradeProposalState()': FunctionFragment; + 'getVerifier()': FunctionFragment; + 'getVerifierParams()': FunctionFragment; + 'isApprovedBySecurityCouncil()': FunctionFragment; + 'isDiamondStorageFrozen()': FunctionFragment; + 'isEthWithdrawalFinalized(uint256,uint256)': FunctionFragment; + 'isFacetFreezable(address)': FunctionFragment; + 'isFunctionFreezable(bytes4)': FunctionFragment; + 'isValidator(address)': FunctionFragment; + 'l2LogsRootHash(uint256)': FunctionFragment; + 'l2TransactionBaseCost(uint256,uint256,uint256)': FunctionFragment; + 'priorityQueueFrontOperation()': FunctionFragment; + 'proposeShadowUpgrade(bytes32,uint40)': FunctionFragment; + 'proposeTransparentUpgrade(tuple,uint40)': FunctionFragment; + 'proveBlocks(tuple,tuple[],tuple)': FunctionFragment; + 'proveL1ToL2TransactionStatus(bytes32,uint256,uint256,uint16,bytes32[],uint8)': FunctionFragment; + 'proveL2LogInclusion(uint256,uint256,tuple,bytes32[])': FunctionFragment; + 'proveL2MessageInclusion(uint256,uint256,tuple,bytes32[])': FunctionFragment; + 'requestL2Transaction(address,uint256,bytes,uint256,uint256,bytes[],address)': FunctionFragment; + 'revertBlocks(uint256)': FunctionFragment; + 'securityCouncilUpgradeApprove(bytes32)': FunctionFragment; + 'setPendingGovernor(address)': FunctionFragment; + 'setPorterAvailability(bool)': FunctionFragment; + 'setPriorityTxMaxGasLimit(uint256)': FunctionFragment; + 'setValidator(address,bool)': FunctionFragment; + 'storedBlockHash(uint256)': FunctionFragment; + 'unfreezeDiamond()': FunctionFragment; + 'upgradeProposalHash(tuple,uint256,bytes32)': FunctionFragment; + }; + + encodeFunctionData(functionFragment: 'acceptGovernor', values?: undefined): string; + encodeFunctionData(functionFragment: 'cancelUpgradeProposal', values: [BytesLike]): string; + encodeFunctionData( + functionFragment: 'commitBlocks', + values: [ + { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[] + ] + ): string; + encodeFunctionData( + functionFragment: 'executeBlocks', + values: [ + { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[] + ] + ): string; + encodeFunctionData( + functionFragment: 'executeUpgrade', + values: [ + { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + BytesLike + ] + ): string; + encodeFunctionData(functionFragment: 'facetAddress', values: [BytesLike]): string; + encodeFunctionData(functionFragment: 'facetAddresses', values?: undefined): string; + encodeFunctionData(functionFragment: 'facetFunctionSelectors', values: [string]): string; + encodeFunctionData(functionFragment: 'facets', values?: undefined): string; + encodeFunctionData( + functionFragment: 'finalizeEthWithdrawal', + values: [BigNumberish, BigNumberish, BigNumberish, BytesLike, BytesLike[]] + ): string; + encodeFunctionData(functionFragment: 'freezeDiamond', values?: undefined): string; + encodeFunctionData(functionFragment: 'getAllowList', values?: undefined): string; + encodeFunctionData(functionFragment: 'getCurrentProposalId', values?: undefined): string; + encodeFunctionData(functionFragment: 'getFirstUnprocessedPriorityTx', values?: undefined): string; + encodeFunctionData(functionFragment: 'getGovernor', values?: undefined): string; + encodeFunctionData(functionFragment: 'getL2BootloaderBytecodeHash', values?: undefined): string; + encodeFunctionData(functionFragment: 'getL2DefaultAccountBytecodeHash', values?: undefined): string; + encodeFunctionData(functionFragment: 'getL2SystemContractsUpgradeBlockNumber', values?: undefined): string; + encodeFunctionData(functionFragment: 'getL2SystemContractsUpgradeTxHash', values?: undefined): string; + encodeFunctionData(functionFragment: 'getName', values?: undefined): string; + encodeFunctionData(functionFragment: 'getPendingGovernor', values?: undefined): string; + encodeFunctionData(functionFragment: 'getPriorityQueueSize', values?: undefined): string; + encodeFunctionData(functionFragment: 'getPriorityTxMaxGasLimit', values?: undefined): string; + encodeFunctionData(functionFragment: 'getProposedUpgradeHash', values?: undefined): string; + encodeFunctionData(functionFragment: 'getProposedUpgradeTimestamp', values?: undefined): string; + encodeFunctionData(functionFragment: 'getProtocolVersion', values?: undefined): string; + encodeFunctionData(functionFragment: 'getSecurityCouncil', values?: undefined): string; + encodeFunctionData(functionFragment: 'getTotalBlocksCommitted', values?: undefined): string; + encodeFunctionData(functionFragment: 'getTotalBlocksExecuted', values?: undefined): string; + encodeFunctionData(functionFragment: 'getTotalBlocksVerified', values?: undefined): string; + encodeFunctionData(functionFragment: 'getTotalPriorityTxs', values?: undefined): string; + encodeFunctionData(functionFragment: 'getUpgradeProposalState', values?: undefined): string; + encodeFunctionData(functionFragment: 'getVerifier', values?: undefined): string; + encodeFunctionData(functionFragment: 'getVerifierParams', values?: undefined): string; + encodeFunctionData(functionFragment: 'isApprovedBySecurityCouncil', values?: undefined): string; + encodeFunctionData(functionFragment: 'isDiamondStorageFrozen', values?: undefined): string; + encodeFunctionData(functionFragment: 'isEthWithdrawalFinalized', values: [BigNumberish, BigNumberish]): string; + encodeFunctionData(functionFragment: 'isFacetFreezable', values: [string]): string; + encodeFunctionData(functionFragment: 'isFunctionFreezable', values: [BytesLike]): string; + encodeFunctionData(functionFragment: 'isValidator', values: [string]): string; + encodeFunctionData(functionFragment: 'l2LogsRootHash', values: [BigNumberish]): string; + encodeFunctionData( + functionFragment: 'l2TransactionBaseCost', + values: [BigNumberish, BigNumberish, BigNumberish] + ): string; + encodeFunctionData(functionFragment: 'priorityQueueFrontOperation', values?: undefined): string; + encodeFunctionData(functionFragment: 'proposeShadowUpgrade', values: [BytesLike, BigNumberish]): string; + encodeFunctionData( + functionFragment: 'proposeTransparentUpgrade', + values: [ + { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + BigNumberish + ] + ): string; + encodeFunctionData( + functionFragment: 'proveBlocks', + values: [ + { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + } + ] + ): string; + encodeFunctionData( + functionFragment: 'proveL1ToL2TransactionStatus', + values: [BytesLike, BigNumberish, BigNumberish, BigNumberish, BytesLike[], BigNumberish] + ): string; + encodeFunctionData( + functionFragment: 'proveL2LogInclusion', + values: [ + BigNumberish, + BigNumberish, + { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + BytesLike[] + ] + ): string; + encodeFunctionData( + functionFragment: 'proveL2MessageInclusion', + values: [ + BigNumberish, + BigNumberish, + { txNumberInBlock: BigNumberish; sender: string; data: BytesLike }, + BytesLike[] + ] + ): string; + encodeFunctionData( + functionFragment: 'requestL2Transaction', + values: [string, BigNumberish, BytesLike, BigNumberish, BigNumberish, BytesLike[], string] + ): string; + encodeFunctionData(functionFragment: 'revertBlocks', values: [BigNumberish]): string; + encodeFunctionData(functionFragment: 'securityCouncilUpgradeApprove', values: [BytesLike]): string; + encodeFunctionData(functionFragment: 'setPendingGovernor', values: [string]): string; + encodeFunctionData(functionFragment: 'setPorterAvailability', values: [boolean]): string; + encodeFunctionData(functionFragment: 'setPriorityTxMaxGasLimit', values: [BigNumberish]): string; + encodeFunctionData(functionFragment: 'setValidator', values: [string, boolean]): string; + encodeFunctionData(functionFragment: 'storedBlockHash', values: [BigNumberish]): string; + encodeFunctionData(functionFragment: 'unfreezeDiamond', values?: undefined): string; + encodeFunctionData( + functionFragment: 'upgradeProposalHash', + values: [ + { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + BigNumberish, + BytesLike + ] + ): string; + + decodeFunctionResult(functionFragment: 'acceptGovernor', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'cancelUpgradeProposal', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'commitBlocks', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'executeBlocks', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'executeUpgrade', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'facetAddress', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'facetAddresses', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'facetFunctionSelectors', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'facets', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'finalizeEthWithdrawal', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'freezeDiamond', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getAllowList', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getCurrentProposalId', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getFirstUnprocessedPriorityTx', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getGovernor', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getL2BootloaderBytecodeHash', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getL2DefaultAccountBytecodeHash', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getL2SystemContractsUpgradeBlockNumber', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getL2SystemContractsUpgradeTxHash', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getName', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getPendingGovernor', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getPriorityQueueSize', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getPriorityTxMaxGasLimit', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getProposedUpgradeHash', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getProposedUpgradeTimestamp', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getProtocolVersion', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getSecurityCouncil', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getTotalBlocksCommitted', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getTotalBlocksExecuted', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getTotalBlocksVerified', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getTotalPriorityTxs', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getUpgradeProposalState', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getVerifier', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'getVerifierParams', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'isApprovedBySecurityCouncil', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'isDiamondStorageFrozen', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'isEthWithdrawalFinalized', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'isFacetFreezable', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'isFunctionFreezable', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'isValidator', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'l2LogsRootHash', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'l2TransactionBaseCost', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'priorityQueueFrontOperation', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'proposeShadowUpgrade', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'proposeTransparentUpgrade', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'proveBlocks', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'proveL1ToL2TransactionStatus', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'proveL2LogInclusion', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'proveL2MessageInclusion', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'requestL2Transaction', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'revertBlocks', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'securityCouncilUpgradeApprove', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'setPendingGovernor', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'setPorterAvailability', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'setPriorityTxMaxGasLimit', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'setValidator', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'storedBlockHash', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'unfreezeDiamond', data: BytesLike): Result; + decodeFunctionResult(functionFragment: 'upgradeProposalHash', data: BytesLike): Result; + + events: { + 'BlockCommit(uint256,bytes32,bytes32)': EventFragment; + 'BlockExecution(uint256,bytes32,bytes32)': EventFragment; + 'BlocksRevert(uint256,uint256,uint256)': EventFragment; + 'BlocksVerification(uint256,uint256)': EventFragment; + 'CancelUpgradeProposal(uint256,bytes32)': EventFragment; + 'EthWithdrawalFinalized(address,uint256)': EventFragment; + 'ExecuteUpgrade(uint256,bytes32,bytes32)': EventFragment; + 'Freeze()': EventFragment; + 'IsPorterAvailableStatusUpdate(bool)': EventFragment; + 'NewGovernor(address,address)': EventFragment; + 'NewPendingGovernor(address,address)': EventFragment; + 'NewPriorityRequest(uint256,bytes32,uint64,tuple,bytes[])': EventFragment; + 'NewPriorityTxMaxGasLimit(uint256,uint256)': EventFragment; + 'ProposeShadowUpgrade(uint256,bytes32)': EventFragment; + 'ProposeTransparentUpgrade(tuple,uint256,bytes32)': EventFragment; + 'SecurityCouncilUpgradeApprove(uint256,bytes32)': EventFragment; + 'Unfreeze()': EventFragment; + 'ValidatorStatusUpdate(address,bool)': EventFragment; + }; + + getEvent(nameOrSignatureOrTopic: 'BlockCommit'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'BlockExecution'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'BlocksRevert'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'BlocksVerification'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'CancelUpgradeProposal'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'EthWithdrawalFinalized'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'ExecuteUpgrade'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'Freeze'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'IsPorterAvailableStatusUpdate'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'NewGovernor'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'NewPendingGovernor'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'NewPriorityRequest'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'NewPriorityTxMaxGasLimit'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'ProposeShadowUpgrade'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'ProposeTransparentUpgrade'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'SecurityCouncilUpgradeApprove'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'Unfreeze'): EventFragment; + getEvent(nameOrSignatureOrTopic: 'ValidatorStatusUpdate'): EventFragment; +} + +export class IZkSync extends Contract { + connect(signerOrProvider: Signer | Provider | string): this; + attach(addressOrName: string): this; + deployed(): Promise; + + on(event: EventFilter | string, listener: Listener): this; + once(event: EventFilter | string, listener: Listener): this; + addListener(eventName: EventFilter | string, listener: Listener): this; + removeAllListeners(eventName: EventFilter | string): this; + removeListener(eventName: any, listener: Listener): this; + + interface: IZkSyncInterface; + + functions: { + acceptGovernor(overrides?: Overrides): Promise; + + 'acceptGovernor()'(overrides?: Overrides): Promise; + + cancelUpgradeProposal(_proposedUpgradeHash: BytesLike, overrides?: Overrides): Promise; + + 'cancelUpgradeProposal(bytes32)'( + _proposedUpgradeHash: BytesLike, + overrides?: Overrides + ): Promise; + + commitBlocks( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: Overrides + ): Promise; + + 'commitBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[])'( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: Overrides + ): Promise; + + executeBlocks( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: Overrides + ): Promise; + + 'executeBlocks(tuple[])'( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: Overrides + ): Promise; + + executeUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: Overrides + ): Promise; + + 'executeUpgrade((tuple[],address,bytes),bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: Overrides + ): Promise; + + facetAddress( + _selector: BytesLike, + overrides?: CallOverrides + ): Promise<{ + facet: string; + 0: string; + }>; + + 'facetAddress(bytes4)'( + _selector: BytesLike, + overrides?: CallOverrides + ): Promise<{ + facet: string; + 0: string; + }>; + + facetAddresses(overrides?: CallOverrides): Promise<{ + facets: string[]; + 0: string[]; + }>; + + 'facetAddresses()'(overrides?: CallOverrides): Promise<{ + facets: string[]; + 0: string[]; + }>; + + facetFunctionSelectors( + _facet: string, + overrides?: CallOverrides + ): Promise<{ + 0: string[]; + }>; + + 'facetFunctionSelectors(address)'( + _facet: string, + overrides?: CallOverrides + ): Promise<{ + 0: string[]; + }>; + + facets(overrides?: CallOverrides): Promise<{ + 0: { addr: string; selectors: string[]; 0: string; 1: string[] }[]; + }>; + + 'facets()'(overrides?: CallOverrides): Promise<{ + 0: { addr: string; selectors: string[]; 0: string; 1: string[] }[]; + }>; + + finalizeEthWithdrawal( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides + ): Promise; + + 'finalizeEthWithdrawal(uint256,uint256,uint16,bytes,bytes32[])'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides + ): Promise; + + freezeDiamond(overrides?: Overrides): Promise; + + 'freezeDiamond()'(overrides?: Overrides): Promise; + + getAllowList(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getAllowList()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getCurrentProposalId(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getCurrentProposalId()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getFirstUnprocessedPriorityTx(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getFirstUnprocessedPriorityTx()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getGovernor(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getGovernor()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getL2BootloaderBytecodeHash(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getL2BootloaderBytecodeHash()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getL2DefaultAccountBytecodeHash(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getL2DefaultAccountBytecodeHash()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getL2SystemContractsUpgradeBlockNumber(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getL2SystemContractsUpgradeBlockNumber()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getL2SystemContractsUpgradeTxHash(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getL2SystemContractsUpgradeTxHash()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getName(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getName()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getPendingGovernor(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getPendingGovernor()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getPriorityQueueSize(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getPriorityQueueSize()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getPriorityTxMaxGasLimit()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getProposedUpgradeHash(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getProposedUpgradeHash()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getProposedUpgradeTimestamp(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getProposedUpgradeTimestamp()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getProtocolVersion(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getProtocolVersion()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getSecurityCouncil(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getSecurityCouncil()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getTotalBlocksCommitted(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getTotalBlocksCommitted()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getTotalBlocksExecuted(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getTotalBlocksExecuted()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getTotalBlocksVerified(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getTotalBlocksVerified()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getTotalPriorityTxs(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + 'getTotalPriorityTxs()'(overrides?: CallOverrides): Promise<{ + 0: BigNumber; + }>; + + getUpgradeProposalState(overrides?: CallOverrides): Promise<{ + 0: number; + }>; + + 'getUpgradeProposalState()'(overrides?: CallOverrides): Promise<{ + 0: number; + }>; + + getVerifier(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + 'getVerifier()'(overrides?: CallOverrides): Promise<{ + 0: string; + }>; + + getVerifierParams(overrides?: CallOverrides): Promise<{ + 0: { + recursionNodeLevelVkHash: string; + recursionLeafLevelVkHash: string; + recursionCircuitsSetVksHash: string; + 0: string; + 1: string; + 2: string; + }; + }>; + + 'getVerifierParams()'(overrides?: CallOverrides): Promise<{ + 0: { + recursionNodeLevelVkHash: string; + recursionLeafLevelVkHash: string; + recursionCircuitsSetVksHash: string; + 0: string; + 1: string; + 2: string; + }; + }>; + + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise<{ + 0: boolean; + }>; + + 'isApprovedBySecurityCouncil()'(overrides?: CallOverrides): Promise<{ + 0: boolean; + }>; + + isDiamondStorageFrozen(overrides?: CallOverrides): Promise<{ + 0: boolean; + }>; + + 'isDiamondStorageFrozen()'(overrides?: CallOverrides): Promise<{ + 0: boolean; + }>; + + isEthWithdrawalFinalized( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + 'isEthWithdrawalFinalized(uint256,uint256)'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + isFacetFreezable( + _facet: string, + overrides?: CallOverrides + ): Promise<{ + isFreezable: boolean; + 0: boolean; + }>; + + 'isFacetFreezable(address)'( + _facet: string, + overrides?: CallOverrides + ): Promise<{ + isFreezable: boolean; + 0: boolean; + }>; + + isFunctionFreezable( + _selector: BytesLike, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + 'isFunctionFreezable(bytes4)'( + _selector: BytesLike, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + isValidator( + _address: string, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + 'isValidator(address)'( + _address: string, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + l2LogsRootHash( + _blockNumber: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + hash: string; + 0: string; + }>; + + 'l2LogsRootHash(uint256)'( + _blockNumber: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + hash: string; + 0: string; + }>; + + l2TransactionBaseCost( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + 0: BigNumber; + }>; + + 'l2TransactionBaseCost(uint256,uint256,uint256)'( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + 0: BigNumber; + }>; + + priorityQueueFrontOperation(overrides?: CallOverrides): Promise<{ + 0: { + canonicalTxHash: string; + expirationTimestamp: BigNumber; + layer2Tip: BigNumber; + 0: string; + 1: BigNumber; + 2: BigNumber; + }; + }>; + + 'priorityQueueFrontOperation()'(overrides?: CallOverrides): Promise<{ + 0: { + canonicalTxHash: string; + expirationTimestamp: BigNumber; + layer2Tip: BigNumber; + 0: string; + 1: BigNumber; + 2: BigNumber; + }; + }>; + + proposeShadowUpgrade( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + 'proposeShadowUpgrade(bytes32,uint40)'( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + proposeTransparentUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + 'proposeTransparentUpgrade((tuple[],address,bytes),uint40)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + proveBlocks( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: Overrides + ): Promise; + + 'proveBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[],(uint256[],uint256[]))'( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: Overrides + ): Promise; + + proveL1ToL2TransactionStatus( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + 'proveL1ToL2TransactionStatus(bytes32,uint256,uint256,uint16,bytes32[],uint8)'( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + proveL2LogInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + 'proveL2LogInclusion(uint256,uint256,(uint8,bool,uint16,address,bytes32,bytes32),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + proveL2MessageInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + 'proveL2MessageInclusion(uint256,uint256,(uint16,address,bytes),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise<{ + 0: boolean; + }>; + + requestL2Transaction( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: PayableOverrides + ): Promise; + + 'requestL2Transaction(address,uint256,bytes,uint256,uint256,bytes[],address)'( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: PayableOverrides + ): Promise; + + revertBlocks(_newLastBlock: BigNumberish, overrides?: Overrides): Promise; + + 'revertBlocks(uint256)'(_newLastBlock: BigNumberish, overrides?: Overrides): Promise; + + securityCouncilUpgradeApprove( + _upgradeProposalHash: BytesLike, + overrides?: Overrides + ): Promise; + + 'securityCouncilUpgradeApprove(bytes32)'( + _upgradeProposalHash: BytesLike, + overrides?: Overrides + ): Promise; + + setPendingGovernor(_newPendingGovernor: string, overrides?: Overrides): Promise; + + 'setPendingGovernor(address)'(_newPendingGovernor: string, overrides?: Overrides): Promise; + + setPorterAvailability(_zkPorterIsAvailable: boolean, overrides?: Overrides): Promise; + + 'setPorterAvailability(bool)'( + _zkPorterIsAvailable: boolean, + overrides?: Overrides + ): Promise; + + setPriorityTxMaxGasLimit( + _newPriorityTxMaxGasLimit: BigNumberish, + overrides?: Overrides + ): Promise; + + 'setPriorityTxMaxGasLimit(uint256)'( + _newPriorityTxMaxGasLimit: BigNumberish, + overrides?: Overrides + ): Promise; + + setValidator(_validator: string, _active: boolean, overrides?: Overrides): Promise; + + 'setValidator(address,bool)'( + _validator: string, + _active: boolean, + overrides?: Overrides + ): Promise; + + storedBlockHash( + _blockNumber: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + 0: string; + }>; + + 'storedBlockHash(uint256)'( + _blockNumber: BigNumberish, + overrides?: CallOverrides + ): Promise<{ + 0: string; + }>; + + unfreezeDiamond(overrides?: Overrides): Promise; + + 'unfreezeDiamond()'(overrides?: Overrides): Promise; + + upgradeProposalHash( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise<{ + 0: string; + }>; + + 'upgradeProposalHash((tuple[],address,bytes),uint256,bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise<{ + 0: string; + }>; + }; + + acceptGovernor(overrides?: Overrides): Promise; + + 'acceptGovernor()'(overrides?: Overrides): Promise; + + cancelUpgradeProposal(_proposedUpgradeHash: BytesLike, overrides?: Overrides): Promise; + + 'cancelUpgradeProposal(bytes32)'( + _proposedUpgradeHash: BytesLike, + overrides?: Overrides + ): Promise; + + commitBlocks( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: Overrides + ): Promise; + + 'commitBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[])'( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: Overrides + ): Promise; + + executeBlocks( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: Overrides + ): Promise; + + 'executeBlocks(tuple[])'( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: Overrides + ): Promise; + + executeUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: Overrides + ): Promise; + + 'executeUpgrade((tuple[],address,bytes),bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: Overrides + ): Promise; + + facetAddress(_selector: BytesLike, overrides?: CallOverrides): Promise; + + 'facetAddress(bytes4)'(_selector: BytesLike, overrides?: CallOverrides): Promise; + + facetAddresses(overrides?: CallOverrides): Promise; + + 'facetAddresses()'(overrides?: CallOverrides): Promise; + + facetFunctionSelectors(_facet: string, overrides?: CallOverrides): Promise; + + 'facetFunctionSelectors(address)'(_facet: string, overrides?: CallOverrides): Promise; + + facets(overrides?: CallOverrides): Promise<{ addr: string; selectors: string[]; 0: string; 1: string[] }[]>; + + 'facets()'(overrides?: CallOverrides): Promise<{ addr: string; selectors: string[]; 0: string; 1: string[] }[]>; + + finalizeEthWithdrawal( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides + ): Promise; + + 'finalizeEthWithdrawal(uint256,uint256,uint16,bytes,bytes32[])'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides + ): Promise; + + freezeDiamond(overrides?: Overrides): Promise; + + 'freezeDiamond()'(overrides?: Overrides): Promise; + + getAllowList(overrides?: CallOverrides): Promise; + + 'getAllowList()'(overrides?: CallOverrides): Promise; + + getCurrentProposalId(overrides?: CallOverrides): Promise; + + 'getCurrentProposalId()'(overrides?: CallOverrides): Promise; + + getFirstUnprocessedPriorityTx(overrides?: CallOverrides): Promise; + + 'getFirstUnprocessedPriorityTx()'(overrides?: CallOverrides): Promise; + + getGovernor(overrides?: CallOverrides): Promise; + + 'getGovernor()'(overrides?: CallOverrides): Promise; + + getL2BootloaderBytecodeHash(overrides?: CallOverrides): Promise; + + 'getL2BootloaderBytecodeHash()'(overrides?: CallOverrides): Promise; + + getL2DefaultAccountBytecodeHash(overrides?: CallOverrides): Promise; + + 'getL2DefaultAccountBytecodeHash()'(overrides?: CallOverrides): Promise; + + getL2SystemContractsUpgradeBlockNumber(overrides?: CallOverrides): Promise; + + 'getL2SystemContractsUpgradeBlockNumber()'(overrides?: CallOverrides): Promise; + + getL2SystemContractsUpgradeTxHash(overrides?: CallOverrides): Promise; + + 'getL2SystemContractsUpgradeTxHash()'(overrides?: CallOverrides): Promise; + + getName(overrides?: CallOverrides): Promise; + + 'getName()'(overrides?: CallOverrides): Promise; + + getPendingGovernor(overrides?: CallOverrides): Promise; + + 'getPendingGovernor()'(overrides?: CallOverrides): Promise; + + getPriorityQueueSize(overrides?: CallOverrides): Promise; + + 'getPriorityQueueSize()'(overrides?: CallOverrides): Promise; + + getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; + + 'getPriorityTxMaxGasLimit()'(overrides?: CallOverrides): Promise; + + getProposedUpgradeHash(overrides?: CallOverrides): Promise; + + 'getProposedUpgradeHash()'(overrides?: CallOverrides): Promise; + + getProposedUpgradeTimestamp(overrides?: CallOverrides): Promise; + + 'getProposedUpgradeTimestamp()'(overrides?: CallOverrides): Promise; + + getProtocolVersion(overrides?: CallOverrides): Promise; + + 'getProtocolVersion()'(overrides?: CallOverrides): Promise; + + getSecurityCouncil(overrides?: CallOverrides): Promise; + + 'getSecurityCouncil()'(overrides?: CallOverrides): Promise; + + getTotalBlocksCommitted(overrides?: CallOverrides): Promise; + + 'getTotalBlocksCommitted()'(overrides?: CallOverrides): Promise; + + getTotalBlocksExecuted(overrides?: CallOverrides): Promise; + + 'getTotalBlocksExecuted()'(overrides?: CallOverrides): Promise; + + getTotalBlocksVerified(overrides?: CallOverrides): Promise; + + 'getTotalBlocksVerified()'(overrides?: CallOverrides): Promise; + + getTotalPriorityTxs(overrides?: CallOverrides): Promise; + + 'getTotalPriorityTxs()'(overrides?: CallOverrides): Promise; + + getUpgradeProposalState(overrides?: CallOverrides): Promise; + + 'getUpgradeProposalState()'(overrides?: CallOverrides): Promise; + + getVerifier(overrides?: CallOverrides): Promise; + + 'getVerifier()'(overrides?: CallOverrides): Promise; + + getVerifierParams(overrides?: CallOverrides): Promise<{ + recursionNodeLevelVkHash: string; + recursionLeafLevelVkHash: string; + recursionCircuitsSetVksHash: string; + 0: string; + 1: string; + 2: string; + }>; + + 'getVerifierParams()'(overrides?: CallOverrides): Promise<{ + recursionNodeLevelVkHash: string; + recursionLeafLevelVkHash: string; + recursionCircuitsSetVksHash: string; + 0: string; + 1: string; + 2: string; + }>; + + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise; + + 'isApprovedBySecurityCouncil()'(overrides?: CallOverrides): Promise; + + isDiamondStorageFrozen(overrides?: CallOverrides): Promise; + + 'isDiamondStorageFrozen()'(overrides?: CallOverrides): Promise; + + isEthWithdrawalFinalized( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'isEthWithdrawalFinalized(uint256,uint256)'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise; + + isFacetFreezable(_facet: string, overrides?: CallOverrides): Promise; + + 'isFacetFreezable(address)'(_facet: string, overrides?: CallOverrides): Promise; + + isFunctionFreezable(_selector: BytesLike, overrides?: CallOverrides): Promise; + + 'isFunctionFreezable(bytes4)'(_selector: BytesLike, overrides?: CallOverrides): Promise; + + isValidator(_address: string, overrides?: CallOverrides): Promise; + + 'isValidator(address)'(_address: string, overrides?: CallOverrides): Promise; + + l2LogsRootHash(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + 'l2LogsRootHash(uint256)'(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + l2TransactionBaseCost( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'l2TransactionBaseCost(uint256,uint256,uint256)'( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + priorityQueueFrontOperation(overrides?: CallOverrides): Promise<{ + canonicalTxHash: string; + expirationTimestamp: BigNumber; + layer2Tip: BigNumber; + 0: string; + 1: BigNumber; + 2: BigNumber; + }>; + + 'priorityQueueFrontOperation()'(overrides?: CallOverrides): Promise<{ + canonicalTxHash: string; + expirationTimestamp: BigNumber; + layer2Tip: BigNumber; + 0: string; + 1: BigNumber; + 2: BigNumber; + }>; + + proposeShadowUpgrade( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + 'proposeShadowUpgrade(bytes32,uint40)'( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + proposeTransparentUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + 'proposeTransparentUpgrade((tuple[],address,bytes),uint40)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + proveBlocks( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: Overrides + ): Promise; + + 'proveBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[],(uint256[],uint256[]))'( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: Overrides + ): Promise; + + proveL1ToL2TransactionStatus( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'proveL1ToL2TransactionStatus(bytes32,uint256,uint256,uint16,bytes32[],uint8)'( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise; + + proveL2LogInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'proveL2LogInclusion(uint256,uint256,(uint8,bool,uint16,address,bytes32,bytes32),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + proveL2MessageInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'proveL2MessageInclusion(uint256,uint256,(uint16,address,bytes),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + requestL2Transaction( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: PayableOverrides + ): Promise; + + 'requestL2Transaction(address,uint256,bytes,uint256,uint256,bytes[],address)'( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: PayableOverrides + ): Promise; + + revertBlocks(_newLastBlock: BigNumberish, overrides?: Overrides): Promise; + + 'revertBlocks(uint256)'(_newLastBlock: BigNumberish, overrides?: Overrides): Promise; + + securityCouncilUpgradeApprove(_upgradeProposalHash: BytesLike, overrides?: Overrides): Promise; + + 'securityCouncilUpgradeApprove(bytes32)'( + _upgradeProposalHash: BytesLike, + overrides?: Overrides + ): Promise; + + setPendingGovernor(_newPendingGovernor: string, overrides?: Overrides): Promise; + + 'setPendingGovernor(address)'(_newPendingGovernor: string, overrides?: Overrides): Promise; + + setPorterAvailability(_zkPorterIsAvailable: boolean, overrides?: Overrides): Promise; + + 'setPorterAvailability(bool)'(_zkPorterIsAvailable: boolean, overrides?: Overrides): Promise; + + setPriorityTxMaxGasLimit( + _newPriorityTxMaxGasLimit: BigNumberish, + overrides?: Overrides + ): Promise; + + 'setPriorityTxMaxGasLimit(uint256)'( + _newPriorityTxMaxGasLimit: BigNumberish, + overrides?: Overrides + ): Promise; + + setValidator(_validator: string, _active: boolean, overrides?: Overrides): Promise; + + 'setValidator(address,bool)'( + _validator: string, + _active: boolean, + overrides?: Overrides + ): Promise; + + storedBlockHash(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + 'storedBlockHash(uint256)'(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + unfreezeDiamond(overrides?: Overrides): Promise; + + 'unfreezeDiamond()'(overrides?: Overrides): Promise; + + upgradeProposalHash( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise; + + 'upgradeProposalHash((tuple[],address,bytes),uint256,bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise; + + callStatic: { + acceptGovernor(overrides?: CallOverrides): Promise; + + 'acceptGovernor()'(overrides?: CallOverrides): Promise; + + cancelUpgradeProposal(_proposedUpgradeHash: BytesLike, overrides?: CallOverrides): Promise; + + 'cancelUpgradeProposal(bytes32)'(_proposedUpgradeHash: BytesLike, overrides?: CallOverrides): Promise; + + commitBlocks( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: CallOverrides + ): Promise; + + 'commitBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[])'( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: CallOverrides + ): Promise; + + executeBlocks( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: CallOverrides + ): Promise; + + 'executeBlocks(tuple[])'( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: CallOverrides + ): Promise; + + executeUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: CallOverrides + ): Promise; + + 'executeUpgrade((tuple[],address,bytes),bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: CallOverrides + ): Promise; + + facetAddress(_selector: BytesLike, overrides?: CallOverrides): Promise; + + 'facetAddress(bytes4)'(_selector: BytesLike, overrides?: CallOverrides): Promise; + + facetAddresses(overrides?: CallOverrides): Promise; + + 'facetAddresses()'(overrides?: CallOverrides): Promise; + + facetFunctionSelectors(_facet: string, overrides?: CallOverrides): Promise; + + 'facetFunctionSelectors(address)'(_facet: string, overrides?: CallOverrides): Promise; + + facets(overrides?: CallOverrides): Promise<{ addr: string; selectors: string[]; 0: string; 1: string[] }[]>; + + 'facets()'(overrides?: CallOverrides): Promise<{ addr: string; selectors: string[]; 0: string; 1: string[] }[]>; + + finalizeEthWithdrawal( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'finalizeEthWithdrawal(uint256,uint256,uint16,bytes,bytes32[])'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + freezeDiamond(overrides?: CallOverrides): Promise; + + 'freezeDiamond()'(overrides?: CallOverrides): Promise; + + getAllowList(overrides?: CallOverrides): Promise; + + 'getAllowList()'(overrides?: CallOverrides): Promise; + + getCurrentProposalId(overrides?: CallOverrides): Promise; + + 'getCurrentProposalId()'(overrides?: CallOverrides): Promise; + + getFirstUnprocessedPriorityTx(overrides?: CallOverrides): Promise; + + 'getFirstUnprocessedPriorityTx()'(overrides?: CallOverrides): Promise; + + getGovernor(overrides?: CallOverrides): Promise; + + 'getGovernor()'(overrides?: CallOverrides): Promise; + + getL2BootloaderBytecodeHash(overrides?: CallOverrides): Promise; + + 'getL2BootloaderBytecodeHash()'(overrides?: CallOverrides): Promise; + + getL2DefaultAccountBytecodeHash(overrides?: CallOverrides): Promise; + + 'getL2DefaultAccountBytecodeHash()'(overrides?: CallOverrides): Promise; + + getL2SystemContractsUpgradeBlockNumber(overrides?: CallOverrides): Promise; + + 'getL2SystemContractsUpgradeBlockNumber()'(overrides?: CallOverrides): Promise; + + getL2SystemContractsUpgradeTxHash(overrides?: CallOverrides): Promise; + + 'getL2SystemContractsUpgradeTxHash()'(overrides?: CallOverrides): Promise; + + getName(overrides?: CallOverrides): Promise; + + 'getName()'(overrides?: CallOverrides): Promise; + + getPendingGovernor(overrides?: CallOverrides): Promise; + + 'getPendingGovernor()'(overrides?: CallOverrides): Promise; + + getPriorityQueueSize(overrides?: CallOverrides): Promise; + + 'getPriorityQueueSize()'(overrides?: CallOverrides): Promise; + + getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; + + 'getPriorityTxMaxGasLimit()'(overrides?: CallOverrides): Promise; + + getProposedUpgradeHash(overrides?: CallOverrides): Promise; + + 'getProposedUpgradeHash()'(overrides?: CallOverrides): Promise; + + getProposedUpgradeTimestamp(overrides?: CallOverrides): Promise; + + 'getProposedUpgradeTimestamp()'(overrides?: CallOverrides): Promise; + + getProtocolVersion(overrides?: CallOverrides): Promise; + + 'getProtocolVersion()'(overrides?: CallOverrides): Promise; + + getSecurityCouncil(overrides?: CallOverrides): Promise; + + 'getSecurityCouncil()'(overrides?: CallOverrides): Promise; + + getTotalBlocksCommitted(overrides?: CallOverrides): Promise; + + 'getTotalBlocksCommitted()'(overrides?: CallOverrides): Promise; + + getTotalBlocksExecuted(overrides?: CallOverrides): Promise; + + 'getTotalBlocksExecuted()'(overrides?: CallOverrides): Promise; + + getTotalBlocksVerified(overrides?: CallOverrides): Promise; + + 'getTotalBlocksVerified()'(overrides?: CallOverrides): Promise; + + getTotalPriorityTxs(overrides?: CallOverrides): Promise; + + 'getTotalPriorityTxs()'(overrides?: CallOverrides): Promise; + + getUpgradeProposalState(overrides?: CallOverrides): Promise; + + 'getUpgradeProposalState()'(overrides?: CallOverrides): Promise; + + getVerifier(overrides?: CallOverrides): Promise; + + 'getVerifier()'(overrides?: CallOverrides): Promise; + + getVerifierParams(overrides?: CallOverrides): Promise<{ + recursionNodeLevelVkHash: string; + recursionLeafLevelVkHash: string; + recursionCircuitsSetVksHash: string; + 0: string; + 1: string; + 2: string; + }>; + + 'getVerifierParams()'(overrides?: CallOverrides): Promise<{ + recursionNodeLevelVkHash: string; + recursionLeafLevelVkHash: string; + recursionCircuitsSetVksHash: string; + 0: string; + 1: string; + 2: string; + }>; + + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise; + + 'isApprovedBySecurityCouncil()'(overrides?: CallOverrides): Promise; + + isDiamondStorageFrozen(overrides?: CallOverrides): Promise; + + 'isDiamondStorageFrozen()'(overrides?: CallOverrides): Promise; + + isEthWithdrawalFinalized( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'isEthWithdrawalFinalized(uint256,uint256)'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise; + + isFacetFreezable(_facet: string, overrides?: CallOverrides): Promise; + + 'isFacetFreezable(address)'(_facet: string, overrides?: CallOverrides): Promise; + + isFunctionFreezable(_selector: BytesLike, overrides?: CallOverrides): Promise; + + 'isFunctionFreezable(bytes4)'(_selector: BytesLike, overrides?: CallOverrides): Promise; + + isValidator(_address: string, overrides?: CallOverrides): Promise; + + 'isValidator(address)'(_address: string, overrides?: CallOverrides): Promise; + + l2LogsRootHash(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + 'l2LogsRootHash(uint256)'(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + l2TransactionBaseCost( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'l2TransactionBaseCost(uint256,uint256,uint256)'( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + priorityQueueFrontOperation(overrides?: CallOverrides): Promise<{ + canonicalTxHash: string; + expirationTimestamp: BigNumber; + layer2Tip: BigNumber; + 0: string; + 1: BigNumber; + 2: BigNumber; + }>; + + 'priorityQueueFrontOperation()'(overrides?: CallOverrides): Promise<{ + canonicalTxHash: string; + expirationTimestamp: BigNumber; + layer2Tip: BigNumber; + 0: string; + 1: BigNumber; + 2: BigNumber; + }>; + + proposeShadowUpgrade( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'proposeShadowUpgrade(bytes32,uint40)'( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: CallOverrides + ): Promise; + + proposeTransparentUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'proposeTransparentUpgrade((tuple[],address,bytes),uint40)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: CallOverrides + ): Promise; + + proveBlocks( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: CallOverrides + ): Promise; + + 'proveBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[],(uint256[],uint256[]))'( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: CallOverrides + ): Promise; + + proveL1ToL2TransactionStatus( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'proveL1ToL2TransactionStatus(bytes32,uint256,uint256,uint16,bytes32[],uint8)'( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise; + + proveL2LogInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'proveL2LogInclusion(uint256,uint256,(uint8,bool,uint16,address,bytes32,bytes32),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + proveL2MessageInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'proveL2MessageInclusion(uint256,uint256,(uint16,address,bytes),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + requestL2Transaction( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: CallOverrides + ): Promise; + + 'requestL2Transaction(address,uint256,bytes,uint256,uint256,bytes[],address)'( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: CallOverrides + ): Promise; + + revertBlocks(_newLastBlock: BigNumberish, overrides?: CallOverrides): Promise; + + 'revertBlocks(uint256)'(_newLastBlock: BigNumberish, overrides?: CallOverrides): Promise; + + securityCouncilUpgradeApprove(_upgradeProposalHash: BytesLike, overrides?: CallOverrides): Promise; + + 'securityCouncilUpgradeApprove(bytes32)'( + _upgradeProposalHash: BytesLike, + overrides?: CallOverrides + ): Promise; + + setPendingGovernor(_newPendingGovernor: string, overrides?: CallOverrides): Promise; + + 'setPendingGovernor(address)'(_newPendingGovernor: string, overrides?: CallOverrides): Promise; + + setPorterAvailability(_zkPorterIsAvailable: boolean, overrides?: CallOverrides): Promise; + + 'setPorterAvailability(bool)'(_zkPorterIsAvailable: boolean, overrides?: CallOverrides): Promise; + + setPriorityTxMaxGasLimit(_newPriorityTxMaxGasLimit: BigNumberish, overrides?: CallOverrides): Promise; + + 'setPriorityTxMaxGasLimit(uint256)'( + _newPriorityTxMaxGasLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + setValidator(_validator: string, _active: boolean, overrides?: CallOverrides): Promise; + + 'setValidator(address,bool)'(_validator: string, _active: boolean, overrides?: CallOverrides): Promise; + + storedBlockHash(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + 'storedBlockHash(uint256)'(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + unfreezeDiamond(overrides?: CallOverrides): Promise; + + 'unfreezeDiamond()'(overrides?: CallOverrides): Promise; + + upgradeProposalHash( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise; + + 'upgradeProposalHash((tuple[],address,bytes),uint256,bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise; + }; + + filters: { + BlockCommit( + blockNumber: BigNumberish | null, + blockHash: BytesLike | null, + commitment: BytesLike | null + ): EventFilter; + + BlockExecution( + blockNumber: BigNumberish | null, + blockHash: BytesLike | null, + commitment: BytesLike | null + ): EventFilter; + + BlocksRevert(totalBlocksCommitted: null, totalBlocksVerified: null, totalBlocksExecuted: null): EventFilter; + + BlocksVerification( + previousLastVerifiedBlock: BigNumberish | null, + currentLastVerifiedBlock: BigNumberish | null + ): EventFilter; + + CancelUpgradeProposal(proposalId: BigNumberish | null, proposalHash: BytesLike | null): EventFilter; + + EthWithdrawalFinalized(to: string | null, amount: null): EventFilter; + + ExecuteUpgrade( + proposalId: BigNumberish | null, + proposalHash: BytesLike | null, + proposalSalt: null + ): EventFilter; + + Freeze(): EventFilter; + + IsPorterAvailableStatusUpdate(isPorterAvailable: null): EventFilter; + + NewGovernor(oldGovernor: string | null, newGovernor: string | null): EventFilter; + + NewPendingGovernor(oldPendingGovernor: string | null, newPendingGovernor: string | null): EventFilter; + + NewPriorityRequest( + txId: null, + txHash: null, + expirationTimestamp: null, + transaction: null, + factoryDeps: null + ): EventFilter; + + NewPriorityTxMaxGasLimit(oldPriorityTxMaxGasLimit: null, newPriorityTxMaxGasLimit: null): EventFilter; + + ProposeShadowUpgrade(proposalId: BigNumberish | null, proposalHash: BytesLike | null): EventFilter; + + ProposeTransparentUpgrade(diamondCut: null, proposalId: BigNumberish | null, proposalSalt: null): EventFilter; + + SecurityCouncilUpgradeApprove(proposalId: BigNumberish | null, proposalHash: BytesLike | null): EventFilter; + + Unfreeze(): EventFilter; + + ValidatorStatusUpdate(validatorAddress: string | null, isActive: null): EventFilter; + }; + + estimateGas: { + acceptGovernor(overrides?: Overrides): Promise; + + 'acceptGovernor()'(overrides?: Overrides): Promise; + + cancelUpgradeProposal(_proposedUpgradeHash: BytesLike, overrides?: Overrides): Promise; + + 'cancelUpgradeProposal(bytes32)'(_proposedUpgradeHash: BytesLike, overrides?: Overrides): Promise; + + commitBlocks( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: Overrides + ): Promise; + + 'commitBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[])'( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: Overrides + ): Promise; + + executeBlocks( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: Overrides + ): Promise; + + 'executeBlocks(tuple[])'( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: Overrides + ): Promise; + + executeUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: Overrides + ): Promise; + + 'executeUpgrade((tuple[],address,bytes),bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: Overrides + ): Promise; + + facetAddress(_selector: BytesLike, overrides?: CallOverrides): Promise; + + 'facetAddress(bytes4)'(_selector: BytesLike, overrides?: CallOverrides): Promise; + + facetAddresses(overrides?: CallOverrides): Promise; + + 'facetAddresses()'(overrides?: CallOverrides): Promise; + + facetFunctionSelectors(_facet: string, overrides?: CallOverrides): Promise; + + 'facetFunctionSelectors(address)'(_facet: string, overrides?: CallOverrides): Promise; + + facets(overrides?: CallOverrides): Promise; + + 'facets()'(overrides?: CallOverrides): Promise; + + finalizeEthWithdrawal( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides + ): Promise; + + 'finalizeEthWithdrawal(uint256,uint256,uint16,bytes,bytes32[])'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides + ): Promise; + + freezeDiamond(overrides?: Overrides): Promise; + + 'freezeDiamond()'(overrides?: Overrides): Promise; + + getAllowList(overrides?: CallOverrides): Promise; + + 'getAllowList()'(overrides?: CallOverrides): Promise; + + getCurrentProposalId(overrides?: CallOverrides): Promise; + + 'getCurrentProposalId()'(overrides?: CallOverrides): Promise; + + getFirstUnprocessedPriorityTx(overrides?: CallOverrides): Promise; + + 'getFirstUnprocessedPriorityTx()'(overrides?: CallOverrides): Promise; + + getGovernor(overrides?: CallOverrides): Promise; + + 'getGovernor()'(overrides?: CallOverrides): Promise; + + getL2BootloaderBytecodeHash(overrides?: CallOverrides): Promise; + + 'getL2BootloaderBytecodeHash()'(overrides?: CallOverrides): Promise; + + getL2DefaultAccountBytecodeHash(overrides?: CallOverrides): Promise; + + 'getL2DefaultAccountBytecodeHash()'(overrides?: CallOverrides): Promise; + + getL2SystemContractsUpgradeBlockNumber(overrides?: CallOverrides): Promise; + + 'getL2SystemContractsUpgradeBlockNumber()'(overrides?: CallOverrides): Promise; + + getL2SystemContractsUpgradeTxHash(overrides?: CallOverrides): Promise; + + 'getL2SystemContractsUpgradeTxHash()'(overrides?: CallOverrides): Promise; + + getName(overrides?: CallOverrides): Promise; + + 'getName()'(overrides?: CallOverrides): Promise; + + getPendingGovernor(overrides?: CallOverrides): Promise; + + 'getPendingGovernor()'(overrides?: CallOverrides): Promise; + + getPriorityQueueSize(overrides?: CallOverrides): Promise; + + 'getPriorityQueueSize()'(overrides?: CallOverrides): Promise; + + getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; + + 'getPriorityTxMaxGasLimit()'(overrides?: CallOverrides): Promise; + + getProposedUpgradeHash(overrides?: CallOverrides): Promise; + + 'getProposedUpgradeHash()'(overrides?: CallOverrides): Promise; + + getProposedUpgradeTimestamp(overrides?: CallOverrides): Promise; + + 'getProposedUpgradeTimestamp()'(overrides?: CallOverrides): Promise; + + getProtocolVersion(overrides?: CallOverrides): Promise; + + 'getProtocolVersion()'(overrides?: CallOverrides): Promise; + + getSecurityCouncil(overrides?: CallOverrides): Promise; + + 'getSecurityCouncil()'(overrides?: CallOverrides): Promise; + + getTotalBlocksCommitted(overrides?: CallOverrides): Promise; + + 'getTotalBlocksCommitted()'(overrides?: CallOverrides): Promise; + + getTotalBlocksExecuted(overrides?: CallOverrides): Promise; + + 'getTotalBlocksExecuted()'(overrides?: CallOverrides): Promise; + + getTotalBlocksVerified(overrides?: CallOverrides): Promise; + + 'getTotalBlocksVerified()'(overrides?: CallOverrides): Promise; + + getTotalPriorityTxs(overrides?: CallOverrides): Promise; + + 'getTotalPriorityTxs()'(overrides?: CallOverrides): Promise; + + getUpgradeProposalState(overrides?: CallOverrides): Promise; + + 'getUpgradeProposalState()'(overrides?: CallOverrides): Promise; + + getVerifier(overrides?: CallOverrides): Promise; + + 'getVerifier()'(overrides?: CallOverrides): Promise; + + getVerifierParams(overrides?: CallOverrides): Promise; + + 'getVerifierParams()'(overrides?: CallOverrides): Promise; + + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise; + + 'isApprovedBySecurityCouncil()'(overrides?: CallOverrides): Promise; + + isDiamondStorageFrozen(overrides?: CallOverrides): Promise; + + 'isDiamondStorageFrozen()'(overrides?: CallOverrides): Promise; + + isEthWithdrawalFinalized( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'isEthWithdrawalFinalized(uint256,uint256)'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise; + + isFacetFreezable(_facet: string, overrides?: CallOverrides): Promise; + + 'isFacetFreezable(address)'(_facet: string, overrides?: CallOverrides): Promise; + + isFunctionFreezable(_selector: BytesLike, overrides?: CallOverrides): Promise; + + 'isFunctionFreezable(bytes4)'(_selector: BytesLike, overrides?: CallOverrides): Promise; + + isValidator(_address: string, overrides?: CallOverrides): Promise; + + 'isValidator(address)'(_address: string, overrides?: CallOverrides): Promise; + + l2LogsRootHash(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + 'l2LogsRootHash(uint256)'(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + l2TransactionBaseCost( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'l2TransactionBaseCost(uint256,uint256,uint256)'( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + priorityQueueFrontOperation(overrides?: CallOverrides): Promise; + + 'priorityQueueFrontOperation()'(overrides?: CallOverrides): Promise; + + proposeShadowUpgrade( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + 'proposeShadowUpgrade(bytes32,uint40)'( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + proposeTransparentUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + 'proposeTransparentUpgrade((tuple[],address,bytes),uint40)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + proveBlocks( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: Overrides + ): Promise; + + 'proveBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[],(uint256[],uint256[]))'( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: Overrides + ): Promise; + + proveL1ToL2TransactionStatus( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'proveL1ToL2TransactionStatus(bytes32,uint256,uint256,uint16,bytes32[],uint8)'( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise; + + proveL2LogInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'proveL2LogInclusion(uint256,uint256,(uint8,bool,uint16,address,bytes32,bytes32),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + proveL2MessageInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'proveL2MessageInclusion(uint256,uint256,(uint16,address,bytes),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + requestL2Transaction( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: PayableOverrides + ): Promise; + + 'requestL2Transaction(address,uint256,bytes,uint256,uint256,bytes[],address)'( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: PayableOverrides + ): Promise; + + revertBlocks(_newLastBlock: BigNumberish, overrides?: Overrides): Promise; + + 'revertBlocks(uint256)'(_newLastBlock: BigNumberish, overrides?: Overrides): Promise; + + securityCouncilUpgradeApprove(_upgradeProposalHash: BytesLike, overrides?: Overrides): Promise; + + 'securityCouncilUpgradeApprove(bytes32)'( + _upgradeProposalHash: BytesLike, + overrides?: Overrides + ): Promise; + + setPendingGovernor(_newPendingGovernor: string, overrides?: Overrides): Promise; + + 'setPendingGovernor(address)'(_newPendingGovernor: string, overrides?: Overrides): Promise; + + setPorterAvailability(_zkPorterIsAvailable: boolean, overrides?: Overrides): Promise; + + 'setPorterAvailability(bool)'(_zkPorterIsAvailable: boolean, overrides?: Overrides): Promise; + + setPriorityTxMaxGasLimit(_newPriorityTxMaxGasLimit: BigNumberish, overrides?: Overrides): Promise; + + 'setPriorityTxMaxGasLimit(uint256)'( + _newPriorityTxMaxGasLimit: BigNumberish, + overrides?: Overrides + ): Promise; + + setValidator(_validator: string, _active: boolean, overrides?: Overrides): Promise; + + 'setValidator(address,bool)'(_validator: string, _active: boolean, overrides?: Overrides): Promise; + + storedBlockHash(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + 'storedBlockHash(uint256)'(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + unfreezeDiamond(overrides?: Overrides): Promise; + + 'unfreezeDiamond()'(overrides?: Overrides): Promise; + + upgradeProposalHash( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise; + + 'upgradeProposalHash((tuple[],address,bytes),uint256,bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise; + }; + + populateTransaction: { + acceptGovernor(overrides?: Overrides): Promise; + + 'acceptGovernor()'(overrides?: Overrides): Promise; + + cancelUpgradeProposal(_proposedUpgradeHash: BytesLike, overrides?: Overrides): Promise; + + 'cancelUpgradeProposal(bytes32)'( + _proposedUpgradeHash: BytesLike, + overrides?: Overrides + ): Promise; + + commitBlocks( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: Overrides + ): Promise; + + 'commitBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[])'( + _lastCommittedBlockData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _newBlocksData: { + blockNumber: BigNumberish; + timestamp: BigNumberish; + indexRepeatedStorageChanges: BigNumberish; + newStateRoot: BytesLike; + numberOfLayer1Txs: BigNumberish; + l2LogsTreeRoot: BytesLike; + priorityOperationsHash: BytesLike; + initialStorageChanges: BytesLike; + repeatedStorageChanges: BytesLike; + l2Logs: BytesLike; + l2ArbitraryLengthMessages: BytesLike[]; + factoryDeps: BytesLike[]; + }[], + overrides?: Overrides + ): Promise; + + executeBlocks( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: Overrides + ): Promise; + + 'executeBlocks(tuple[])'( + _blocksData: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + overrides?: Overrides + ): Promise; + + executeUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: Overrides + ): Promise; + + 'executeUpgrade((tuple[],address,bytes),bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalSalt: BytesLike, + overrides?: Overrides + ): Promise; + + facetAddress(_selector: BytesLike, overrides?: CallOverrides): Promise; + + 'facetAddress(bytes4)'(_selector: BytesLike, overrides?: CallOverrides): Promise; + + facetAddresses(overrides?: CallOverrides): Promise; + + 'facetAddresses()'(overrides?: CallOverrides): Promise; + + facetFunctionSelectors(_facet: string, overrides?: CallOverrides): Promise; + + 'facetFunctionSelectors(address)'(_facet: string, overrides?: CallOverrides): Promise; + + facets(overrides?: CallOverrides): Promise; + + 'facets()'(overrides?: CallOverrides): Promise; + + finalizeEthWithdrawal( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides + ): Promise; + + 'finalizeEthWithdrawal(uint256,uint256,uint16,bytes,bytes32[])'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _message: BytesLike, + _merkleProof: BytesLike[], + overrides?: Overrides + ): Promise; + + freezeDiamond(overrides?: Overrides): Promise; + + 'freezeDiamond()'(overrides?: Overrides): Promise; + + getAllowList(overrides?: CallOverrides): Promise; + + 'getAllowList()'(overrides?: CallOverrides): Promise; + + getCurrentProposalId(overrides?: CallOverrides): Promise; + + 'getCurrentProposalId()'(overrides?: CallOverrides): Promise; + + getFirstUnprocessedPriorityTx(overrides?: CallOverrides): Promise; + + 'getFirstUnprocessedPriorityTx()'(overrides?: CallOverrides): Promise; + + getGovernor(overrides?: CallOverrides): Promise; + + 'getGovernor()'(overrides?: CallOverrides): Promise; + + getL2BootloaderBytecodeHash(overrides?: CallOverrides): Promise; + + 'getL2BootloaderBytecodeHash()'(overrides?: CallOverrides): Promise; + + getL2DefaultAccountBytecodeHash(overrides?: CallOverrides): Promise; + + 'getL2DefaultAccountBytecodeHash()'(overrides?: CallOverrides): Promise; + + getL2SystemContractsUpgradeBlockNumber(overrides?: CallOverrides): Promise; + + 'getL2SystemContractsUpgradeBlockNumber()'(overrides?: CallOverrides): Promise; + + getL2SystemContractsUpgradeTxHash(overrides?: CallOverrides): Promise; + + 'getL2SystemContractsUpgradeTxHash()'(overrides?: CallOverrides): Promise; + + getName(overrides?: CallOverrides): Promise; + + 'getName()'(overrides?: CallOverrides): Promise; + + getPendingGovernor(overrides?: CallOverrides): Promise; + + 'getPendingGovernor()'(overrides?: CallOverrides): Promise; + + getPriorityQueueSize(overrides?: CallOverrides): Promise; + + 'getPriorityQueueSize()'(overrides?: CallOverrides): Promise; + + getPriorityTxMaxGasLimit(overrides?: CallOverrides): Promise; + + 'getPriorityTxMaxGasLimit()'(overrides?: CallOverrides): Promise; + + getProposedUpgradeHash(overrides?: CallOverrides): Promise; + + 'getProposedUpgradeHash()'(overrides?: CallOverrides): Promise; + + getProposedUpgradeTimestamp(overrides?: CallOverrides): Promise; + + 'getProposedUpgradeTimestamp()'(overrides?: CallOverrides): Promise; + + getProtocolVersion(overrides?: CallOverrides): Promise; + + 'getProtocolVersion()'(overrides?: CallOverrides): Promise; + + getSecurityCouncil(overrides?: CallOverrides): Promise; + + 'getSecurityCouncil()'(overrides?: CallOverrides): Promise; + + getTotalBlocksCommitted(overrides?: CallOverrides): Promise; + + 'getTotalBlocksCommitted()'(overrides?: CallOverrides): Promise; + + getTotalBlocksExecuted(overrides?: CallOverrides): Promise; + + 'getTotalBlocksExecuted()'(overrides?: CallOverrides): Promise; + + getTotalBlocksVerified(overrides?: CallOverrides): Promise; + + 'getTotalBlocksVerified()'(overrides?: CallOverrides): Promise; + + getTotalPriorityTxs(overrides?: CallOverrides): Promise; + + 'getTotalPriorityTxs()'(overrides?: CallOverrides): Promise; + + getUpgradeProposalState(overrides?: CallOverrides): Promise; + + 'getUpgradeProposalState()'(overrides?: CallOverrides): Promise; + + getVerifier(overrides?: CallOverrides): Promise; + + 'getVerifier()'(overrides?: CallOverrides): Promise; + + getVerifierParams(overrides?: CallOverrides): Promise; + + 'getVerifierParams()'(overrides?: CallOverrides): Promise; + + isApprovedBySecurityCouncil(overrides?: CallOverrides): Promise; + + 'isApprovedBySecurityCouncil()'(overrides?: CallOverrides): Promise; + + isDiamondStorageFrozen(overrides?: CallOverrides): Promise; + + 'isDiamondStorageFrozen()'(overrides?: CallOverrides): Promise; + + isEthWithdrawalFinalized( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'isEthWithdrawalFinalized(uint256,uint256)'( + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + overrides?: CallOverrides + ): Promise; + + isFacetFreezable(_facet: string, overrides?: CallOverrides): Promise; + + 'isFacetFreezable(address)'(_facet: string, overrides?: CallOverrides): Promise; + + isFunctionFreezable(_selector: BytesLike, overrides?: CallOverrides): Promise; + + 'isFunctionFreezable(bytes4)'(_selector: BytesLike, overrides?: CallOverrides): Promise; + + isValidator(_address: string, overrides?: CallOverrides): Promise; + + 'isValidator(address)'(_address: string, overrides?: CallOverrides): Promise; + + l2LogsRootHash(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + 'l2LogsRootHash(uint256)'(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + l2TransactionBaseCost( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'l2TransactionBaseCost(uint256,uint256,uint256)'( + _gasPrice: BigNumberish, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + overrides?: CallOverrides + ): Promise; + + priorityQueueFrontOperation(overrides?: CallOverrides): Promise; + + 'priorityQueueFrontOperation()'(overrides?: CallOverrides): Promise; + + proposeShadowUpgrade( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + 'proposeShadowUpgrade(bytes32,uint40)'( + _proposalHash: BytesLike, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + proposeTransparentUpgrade( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + 'proposeTransparentUpgrade((tuple[],address,bytes),uint40)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + overrides?: Overrides + ): Promise; + + proveBlocks( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: Overrides + ): Promise; + + 'proveBlocks((uint64,bytes32,uint64,uint256,bytes32,bytes32,uint256,bytes32),tuple[],(uint256[],uint256[]))'( + _prevBlock: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }, + _committedBlocks: { + blockNumber: BigNumberish; + blockHash: BytesLike; + indexRepeatedStorageChanges: BigNumberish; + numberOfLayer1Txs: BigNumberish; + priorityOperationsHash: BytesLike; + l2LogsTreeRoot: BytesLike; + timestamp: BigNumberish; + commitment: BytesLike; + }[], + _proof: { + recursiveAggregationInput: BigNumberish[]; + serializedProof: BigNumberish[]; + }, + overrides?: Overrides + ): Promise; + + proveL1ToL2TransactionStatus( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise; + + 'proveL1ToL2TransactionStatus(bytes32,uint256,uint256,uint16,bytes32[],uint8)'( + _l2TxHash: BytesLike, + _l2BlockNumber: BigNumberish, + _l2MessageIndex: BigNumberish, + _l2TxNumberInBlock: BigNumberish, + _merkleProof: BytesLike[], + _status: BigNumberish, + overrides?: CallOverrides + ): Promise; + + proveL2LogInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'proveL2LogInclusion(uint256,uint256,(uint8,bool,uint16,address,bytes32,bytes32),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _log: { + l2ShardId: BigNumberish; + isService: boolean; + txNumberInBlock: BigNumberish; + sender: string; + key: BytesLike; + value: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + proveL2MessageInclusion( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + 'proveL2MessageInclusion(uint256,uint256,(uint16,address,bytes),bytes32[])'( + _blockNumber: BigNumberish, + _index: BigNumberish, + _message: { + txNumberInBlock: BigNumberish; + sender: string; + data: BytesLike; + }, + _proof: BytesLike[], + overrides?: CallOverrides + ): Promise; + + requestL2Transaction( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: PayableOverrides + ): Promise; + + 'requestL2Transaction(address,uint256,bytes,uint256,uint256,bytes[],address)'( + _contractL2: string, + _l2Value: BigNumberish, + _calldata: BytesLike, + _l2GasLimit: BigNumberish, + _l2GasPerPubdataByteLimit: BigNumberish, + _factoryDeps: BytesLike[], + _refundRecipient: string, + overrides?: PayableOverrides + ): Promise; + + revertBlocks(_newLastBlock: BigNumberish, overrides?: Overrides): Promise; + + 'revertBlocks(uint256)'(_newLastBlock: BigNumberish, overrides?: Overrides): Promise; + + securityCouncilUpgradeApprove( + _upgradeProposalHash: BytesLike, + overrides?: Overrides + ): Promise; + + 'securityCouncilUpgradeApprove(bytes32)'( + _upgradeProposalHash: BytesLike, + overrides?: Overrides + ): Promise; + + setPendingGovernor(_newPendingGovernor: string, overrides?: Overrides): Promise; + + 'setPendingGovernor(address)'( + _newPendingGovernor: string, + overrides?: Overrides + ): Promise; + + setPorterAvailability(_zkPorterIsAvailable: boolean, overrides?: Overrides): Promise; + + 'setPorterAvailability(bool)'( + _zkPorterIsAvailable: boolean, + overrides?: Overrides + ): Promise; + + setPriorityTxMaxGasLimit( + _newPriorityTxMaxGasLimit: BigNumberish, + overrides?: Overrides + ): Promise; + + 'setPriorityTxMaxGasLimit(uint256)'( + _newPriorityTxMaxGasLimit: BigNumberish, + overrides?: Overrides + ): Promise; + + setValidator(_validator: string, _active: boolean, overrides?: Overrides): Promise; + + 'setValidator(address,bool)'( + _validator: string, + _active: boolean, + overrides?: Overrides + ): Promise; + + storedBlockHash(_blockNumber: BigNumberish, overrides?: CallOverrides): Promise; + + 'storedBlockHash(uint256)'( + _blockNumber: BigNumberish, + overrides?: CallOverrides + ): Promise; + + unfreezeDiamond(overrides?: Overrides): Promise; + + 'unfreezeDiamond()'(overrides?: Overrides): Promise; + + upgradeProposalHash( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise; + + 'upgradeProposalHash((tuple[],address,bytes),uint256,bytes32)'( + _diamondCut: { + facetCuts: { + facet: string; + action: BigNumberish; + isFreezable: boolean; + selectors: BytesLike[]; + }[]; + initAddress: string; + initCalldata: BytesLike; + }, + _proposalId: BigNumberish, + _salt: BytesLike, + overrides?: CallOverrides + ): Promise; + }; +} diff --git a/infrastructure/protocol-upgrade/pre-boojum/IZkSyncFactory.ts b/infrastructure/protocol-upgrade/pre-boojum/IZkSyncFactory.ts new file mode 100644 index 00000000000..7aa807ca610 --- /dev/null +++ b/infrastructure/protocol-upgrade/pre-boojum/IZkSyncFactory.ts @@ -0,0 +1,1948 @@ +/* Autogenerated file. Do not edit manually. */ +/* tslint:disable */ +/* eslint-disable */ + +import { Contract, Signer } from 'ethers'; +import { Provider } from '@ethersproject/providers'; + +import type { IZkSync } from './IZkSync'; + +export class IZkSyncFactory { + static connect(address: string, signerOrProvider: Signer | Provider): IZkSync { + return new Contract(address, _abi, signerOrProvider) as IZkSync; + } +} + +const _abi = [ + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'uint256', + name: 'blockNumber', + type: 'uint256' + }, + { + indexed: true, + internalType: 'bytes32', + name: 'blockHash', + type: 'bytes32' + }, + { + indexed: true, + internalType: 'bytes32', + name: 'commitment', + type: 'bytes32' + } + ], + name: 'BlockCommit', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'uint256', + name: 'blockNumber', + type: 'uint256' + }, + { + indexed: true, + internalType: 'bytes32', + name: 'blockHash', + type: 'bytes32' + }, + { + indexed: true, + internalType: 'bytes32', + name: 'commitment', + type: 'bytes32' + } + ], + name: 'BlockExecution', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: false, + internalType: 'uint256', + name: 'totalBlocksCommitted', + type: 'uint256' + }, + { + indexed: false, + internalType: 'uint256', + name: 'totalBlocksVerified', + type: 'uint256' + }, + { + indexed: false, + internalType: 'uint256', + name: 'totalBlocksExecuted', + type: 'uint256' + } + ], + name: 'BlocksRevert', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'uint256', + name: 'previousLastVerifiedBlock', + type: 'uint256' + }, + { + indexed: true, + internalType: 'uint256', + name: 'currentLastVerifiedBlock', + type: 'uint256' + } + ], + name: 'BlocksVerification', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'uint256', + name: 'proposalId', + type: 'uint256' + }, + { + indexed: true, + internalType: 'bytes32', + name: 'proposalHash', + type: 'bytes32' + } + ], + name: 'CancelUpgradeProposal', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'address', + name: 'to', + type: 'address' + }, + { + indexed: false, + internalType: 'uint256', + name: 'amount', + type: 'uint256' + } + ], + name: 'EthWithdrawalFinalized', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'uint256', + name: 'proposalId', + type: 'uint256' + }, + { + indexed: true, + internalType: 'bytes32', + name: 'proposalHash', + type: 'bytes32' + }, + { + indexed: false, + internalType: 'bytes32', + name: 'proposalSalt', + type: 'bytes32' + } + ], + name: 'ExecuteUpgrade', + type: 'event' + }, + { + anonymous: false, + inputs: [], + name: 'Freeze', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: false, + internalType: 'bool', + name: 'isPorterAvailable', + type: 'bool' + } + ], + name: 'IsPorterAvailableStatusUpdate', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'address', + name: 'oldGovernor', + type: 'address' + }, + { + indexed: true, + internalType: 'address', + name: 'newGovernor', + type: 'address' + } + ], + name: 'NewGovernor', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'address', + name: 'oldPendingGovernor', + type: 'address' + }, + { + indexed: true, + internalType: 'address', + name: 'newPendingGovernor', + type: 'address' + } + ], + name: 'NewPendingGovernor', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: false, + internalType: 'uint256', + name: 'txId', + type: 'uint256' + }, + { + indexed: false, + internalType: 'bytes32', + name: 'txHash', + type: 'bytes32' + }, + { + indexed: false, + internalType: 'uint64', + name: 'expirationTimestamp', + type: 'uint64' + }, + { + components: [ + { + internalType: 'uint256', + name: 'txType', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'from', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'to', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'gasLimit', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'gasPerPubdataByteLimit', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'maxFeePerGas', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'maxPriorityFeePerGas', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'paymaster', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'nonce', + type: 'uint256' + }, + { + internalType: 'uint256', + name: 'value', + type: 'uint256' + }, + { + internalType: 'uint256[4]', + name: 'reserved', + type: 'uint256[4]' + }, + { + internalType: 'bytes', + name: 'data', + type: 'bytes' + }, + { + internalType: 'bytes', + name: 'signature', + type: 'bytes' + }, + { + internalType: 'uint256[]', + name: 'factoryDeps', + type: 'uint256[]' + }, + { + internalType: 'bytes', + name: 'paymasterInput', + type: 'bytes' + }, + { + internalType: 'bytes', + name: 'reservedDynamic', + type: 'bytes' + } + ], + indexed: false, + internalType: 'struct IMailbox.L2CanonicalTransaction', + name: 'transaction', + type: 'tuple' + }, + { + indexed: false, + internalType: 'bytes[]', + name: 'factoryDeps', + type: 'bytes[]' + } + ], + name: 'NewPriorityRequest', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: false, + internalType: 'uint256', + name: 'oldPriorityTxMaxGasLimit', + type: 'uint256' + }, + { + indexed: false, + internalType: 'uint256', + name: 'newPriorityTxMaxGasLimit', + type: 'uint256' + } + ], + name: 'NewPriorityTxMaxGasLimit', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'uint256', + name: 'proposalId', + type: 'uint256' + }, + { + indexed: true, + internalType: 'bytes32', + name: 'proposalHash', + type: 'bytes32' + } + ], + name: 'ProposeShadowUpgrade', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + components: [ + { + components: [ + { + internalType: 'address', + name: 'facet', + type: 'address' + }, + { + internalType: 'enum Diamond.Action', + name: 'action', + type: 'uint8' + }, + { + internalType: 'bool', + name: 'isFreezable', + type: 'bool' + }, + { + internalType: 'bytes4[]', + name: 'selectors', + type: 'bytes4[]' + } + ], + internalType: 'struct Diamond.FacetCut[]', + name: 'facetCuts', + type: 'tuple[]' + }, + { + internalType: 'address', + name: 'initAddress', + type: 'address' + }, + { + internalType: 'bytes', + name: 'initCalldata', + type: 'bytes' + } + ], + indexed: false, + internalType: 'struct Diamond.DiamondCutData', + name: 'diamondCut', + type: 'tuple' + }, + { + indexed: true, + internalType: 'uint256', + name: 'proposalId', + type: 'uint256' + }, + { + indexed: false, + internalType: 'bytes32', + name: 'proposalSalt', + type: 'bytes32' + } + ], + name: 'ProposeTransparentUpgrade', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'uint256', + name: 'proposalId', + type: 'uint256' + }, + { + indexed: true, + internalType: 'bytes32', + name: 'proposalHash', + type: 'bytes32' + } + ], + name: 'SecurityCouncilUpgradeApprove', + type: 'event' + }, + { + anonymous: false, + inputs: [], + name: 'Unfreeze', + type: 'event' + }, + { + anonymous: false, + inputs: [ + { + indexed: true, + internalType: 'address', + name: 'validatorAddress', + type: 'address' + }, + { + indexed: false, + internalType: 'bool', + name: 'isActive', + type: 'bool' + } + ], + name: 'ValidatorStatusUpdate', + type: 'event' + }, + { + inputs: [], + name: 'acceptGovernor', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'bytes32', + name: '_proposedUpgradeHash', + type: 'bytes32' + } + ], + name: 'cancelUpgradeProposal', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + components: [ + { + internalType: 'uint64', + name: 'blockNumber', + type: 'uint64' + }, + { + internalType: 'bytes32', + name: 'blockHash', + type: 'bytes32' + }, + { + internalType: 'uint64', + name: 'indexRepeatedStorageChanges', + type: 'uint64' + }, + { + internalType: 'uint256', + name: 'numberOfLayer1Txs', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'priorityOperationsHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'l2LogsTreeRoot', + type: 'bytes32' + }, + { + internalType: 'uint256', + name: 'timestamp', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'commitment', + type: 'bytes32' + } + ], + internalType: 'struct IExecutor.StoredBlockInfo', + name: '_lastCommittedBlockData', + type: 'tuple' + }, + { + components: [ + { + internalType: 'uint64', + name: 'blockNumber', + type: 'uint64' + }, + { + internalType: 'uint64', + name: 'timestamp', + type: 'uint64' + }, + { + internalType: 'uint64', + name: 'indexRepeatedStorageChanges', + type: 'uint64' + }, + { + internalType: 'bytes32', + name: 'newStateRoot', + type: 'bytes32' + }, + { + internalType: 'uint256', + name: 'numberOfLayer1Txs', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'l2LogsTreeRoot', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'priorityOperationsHash', + type: 'bytes32' + }, + { + internalType: 'bytes', + name: 'initialStorageChanges', + type: 'bytes' + }, + { + internalType: 'bytes', + name: 'repeatedStorageChanges', + type: 'bytes' + }, + { + internalType: 'bytes', + name: 'l2Logs', + type: 'bytes' + }, + { + internalType: 'bytes[]', + name: 'l2ArbitraryLengthMessages', + type: 'bytes[]' + }, + { + internalType: 'bytes[]', + name: 'factoryDeps', + type: 'bytes[]' + } + ], + internalType: 'struct IExecutor.CommitBlockInfo[]', + name: '_newBlocksData', + type: 'tuple[]' + } + ], + name: 'commitBlocks', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + components: [ + { + internalType: 'uint64', + name: 'blockNumber', + type: 'uint64' + }, + { + internalType: 'bytes32', + name: 'blockHash', + type: 'bytes32' + }, + { + internalType: 'uint64', + name: 'indexRepeatedStorageChanges', + type: 'uint64' + }, + { + internalType: 'uint256', + name: 'numberOfLayer1Txs', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'priorityOperationsHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'l2LogsTreeRoot', + type: 'bytes32' + }, + { + internalType: 'uint256', + name: 'timestamp', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'commitment', + type: 'bytes32' + } + ], + internalType: 'struct IExecutor.StoredBlockInfo[]', + name: '_blocksData', + type: 'tuple[]' + } + ], + name: 'executeBlocks', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + components: [ + { + components: [ + { + internalType: 'address', + name: 'facet', + type: 'address' + }, + { + internalType: 'enum Diamond.Action', + name: 'action', + type: 'uint8' + }, + { + internalType: 'bool', + name: 'isFreezable', + type: 'bool' + }, + { + internalType: 'bytes4[]', + name: 'selectors', + type: 'bytes4[]' + } + ], + internalType: 'struct Diamond.FacetCut[]', + name: 'facetCuts', + type: 'tuple[]' + }, + { + internalType: 'address', + name: 'initAddress', + type: 'address' + }, + { + internalType: 'bytes', + name: 'initCalldata', + type: 'bytes' + } + ], + internalType: 'struct Diamond.DiamondCutData', + name: '_diamondCut', + type: 'tuple' + }, + { + internalType: 'bytes32', + name: '_proposalSalt', + type: 'bytes32' + } + ], + name: 'executeUpgrade', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'bytes4', + name: '_selector', + type: 'bytes4' + } + ], + name: 'facetAddress', + outputs: [ + { + internalType: 'address', + name: 'facet', + type: 'address' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'facetAddresses', + outputs: [ + { + internalType: 'address[]', + name: 'facets', + type: 'address[]' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'address', + name: '_facet', + type: 'address' + } + ], + name: 'facetFunctionSelectors', + outputs: [ + { + internalType: 'bytes4[]', + name: '', + type: 'bytes4[]' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'facets', + outputs: [ + { + components: [ + { + internalType: 'address', + name: 'addr', + type: 'address' + }, + { + internalType: 'bytes4[]', + name: 'selectors', + type: 'bytes4[]' + } + ], + internalType: 'struct IGetters.Facet[]', + name: '', + type: 'tuple[]' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_l2BlockNumber', + type: 'uint256' + }, + { + internalType: 'uint256', + name: '_l2MessageIndex', + type: 'uint256' + }, + { + internalType: 'uint16', + name: '_l2TxNumberInBlock', + type: 'uint16' + }, + { + internalType: 'bytes', + name: '_message', + type: 'bytes' + }, + { + internalType: 'bytes32[]', + name: '_merkleProof', + type: 'bytes32[]' + } + ], + name: 'finalizeEthWithdrawal', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [], + name: 'freezeDiamond', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [], + name: 'getAllowList', + outputs: [ + { + internalType: 'address', + name: '', + type: 'address' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getCurrentProposalId', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getFirstUnprocessedPriorityTx', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getGovernor', + outputs: [ + { + internalType: 'address', + name: '', + type: 'address' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getL2BootloaderBytecodeHash', + outputs: [ + { + internalType: 'bytes32', + name: '', + type: 'bytes32' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getL2DefaultAccountBytecodeHash', + outputs: [ + { + internalType: 'bytes32', + name: '', + type: 'bytes32' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getL2SystemContractsUpgradeBlockNumber', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getL2SystemContractsUpgradeTxHash', + outputs: [ + { + internalType: 'bytes32', + name: '', + type: 'bytes32' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getName', + outputs: [ + { + internalType: 'string', + name: '', + type: 'string' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getPendingGovernor', + outputs: [ + { + internalType: 'address', + name: '', + type: 'address' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getPriorityQueueSize', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getPriorityTxMaxGasLimit', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getProposedUpgradeHash', + outputs: [ + { + internalType: 'bytes32', + name: '', + type: 'bytes32' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getProposedUpgradeTimestamp', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getProtocolVersion', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getSecurityCouncil', + outputs: [ + { + internalType: 'address', + name: '', + type: 'address' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getTotalBlocksCommitted', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getTotalBlocksExecuted', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getTotalBlocksVerified', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getTotalPriorityTxs', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getUpgradeProposalState', + outputs: [ + { + internalType: 'enum UpgradeState', + name: '', + type: 'uint8' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getVerifier', + outputs: [ + { + internalType: 'address', + name: '', + type: 'address' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'getVerifierParams', + outputs: [ + { + components: [ + { + internalType: 'bytes32', + name: 'recursionNodeLevelVkHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'recursionLeafLevelVkHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'recursionCircuitsSetVksHash', + type: 'bytes32' + } + ], + internalType: 'struct VerifierParams', + name: '', + type: 'tuple' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'isApprovedBySecurityCouncil', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'isDiamondStorageFrozen', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_l2BlockNumber', + type: 'uint256' + }, + { + internalType: 'uint256', + name: '_l2MessageIndex', + type: 'uint256' + } + ], + name: 'isEthWithdrawalFinalized', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'address', + name: '_facet', + type: 'address' + } + ], + name: 'isFacetFreezable', + outputs: [ + { + internalType: 'bool', + name: 'isFreezable', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'bytes4', + name: '_selector', + type: 'bytes4' + } + ], + name: 'isFunctionFreezable', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'address', + name: '_address', + type: 'address' + } + ], + name: 'isValidator', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_blockNumber', + type: 'uint256' + } + ], + name: 'l2LogsRootHash', + outputs: [ + { + internalType: 'bytes32', + name: 'hash', + type: 'bytes32' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_gasPrice', + type: 'uint256' + }, + { + internalType: 'uint256', + name: '_l2GasLimit', + type: 'uint256' + }, + { + internalType: 'uint256', + name: '_l2GasPerPubdataByteLimit', + type: 'uint256' + } + ], + name: 'l2TransactionBaseCost', + outputs: [ + { + internalType: 'uint256', + name: '', + type: 'uint256' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'priorityQueueFrontOperation', + outputs: [ + { + components: [ + { + internalType: 'bytes32', + name: 'canonicalTxHash', + type: 'bytes32' + }, + { + internalType: 'uint64', + name: 'expirationTimestamp', + type: 'uint64' + }, + { + internalType: 'uint192', + name: 'layer2Tip', + type: 'uint192' + } + ], + internalType: 'struct PriorityOperation', + name: '', + type: 'tuple' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'bytes32', + name: '_proposalHash', + type: 'bytes32' + }, + { + internalType: 'uint40', + name: '_proposalId', + type: 'uint40' + } + ], + name: 'proposeShadowUpgrade', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + components: [ + { + components: [ + { + internalType: 'address', + name: 'facet', + type: 'address' + }, + { + internalType: 'enum Diamond.Action', + name: 'action', + type: 'uint8' + }, + { + internalType: 'bool', + name: 'isFreezable', + type: 'bool' + }, + { + internalType: 'bytes4[]', + name: 'selectors', + type: 'bytes4[]' + } + ], + internalType: 'struct Diamond.FacetCut[]', + name: 'facetCuts', + type: 'tuple[]' + }, + { + internalType: 'address', + name: 'initAddress', + type: 'address' + }, + { + internalType: 'bytes', + name: 'initCalldata', + type: 'bytes' + } + ], + internalType: 'struct Diamond.DiamondCutData', + name: '_diamondCut', + type: 'tuple' + }, + { + internalType: 'uint40', + name: '_proposalId', + type: 'uint40' + } + ], + name: 'proposeTransparentUpgrade', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + components: [ + { + internalType: 'uint64', + name: 'blockNumber', + type: 'uint64' + }, + { + internalType: 'bytes32', + name: 'blockHash', + type: 'bytes32' + }, + { + internalType: 'uint64', + name: 'indexRepeatedStorageChanges', + type: 'uint64' + }, + { + internalType: 'uint256', + name: 'numberOfLayer1Txs', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'priorityOperationsHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'l2LogsTreeRoot', + type: 'bytes32' + }, + { + internalType: 'uint256', + name: 'timestamp', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'commitment', + type: 'bytes32' + } + ], + internalType: 'struct IExecutor.StoredBlockInfo', + name: '_prevBlock', + type: 'tuple' + }, + { + components: [ + { + internalType: 'uint64', + name: 'blockNumber', + type: 'uint64' + }, + { + internalType: 'bytes32', + name: 'blockHash', + type: 'bytes32' + }, + { + internalType: 'uint64', + name: 'indexRepeatedStorageChanges', + type: 'uint64' + }, + { + internalType: 'uint256', + name: 'numberOfLayer1Txs', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'priorityOperationsHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'l2LogsTreeRoot', + type: 'bytes32' + }, + { + internalType: 'uint256', + name: 'timestamp', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'commitment', + type: 'bytes32' + } + ], + internalType: 'struct IExecutor.StoredBlockInfo[]', + name: '_committedBlocks', + type: 'tuple[]' + }, + { + components: [ + { + internalType: 'uint256[]', + name: 'recursiveAggregationInput', + type: 'uint256[]' + }, + { + internalType: 'uint256[]', + name: 'serializedProof', + type: 'uint256[]' + } + ], + internalType: 'struct IExecutor.ProofInput', + name: '_proof', + type: 'tuple' + } + ], + name: 'proveBlocks', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'bytes32', + name: '_l2TxHash', + type: 'bytes32' + }, + { + internalType: 'uint256', + name: '_l2BlockNumber', + type: 'uint256' + }, + { + internalType: 'uint256', + name: '_l2MessageIndex', + type: 'uint256' + }, + { + internalType: 'uint16', + name: '_l2TxNumberInBlock', + type: 'uint16' + }, + { + internalType: 'bytes32[]', + name: '_merkleProof', + type: 'bytes32[]' + }, + { + internalType: 'enum TxStatus', + name: '_status', + type: 'uint8' + } + ], + name: 'proveL1ToL2TransactionStatus', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_blockNumber', + type: 'uint256' + }, + { + internalType: 'uint256', + name: '_index', + type: 'uint256' + }, + { + components: [ + { + internalType: 'uint8', + name: 'l2ShardId', + type: 'uint8' + }, + { + internalType: 'bool', + name: 'isService', + type: 'bool' + }, + { + internalType: 'uint16', + name: 'txNumberInBlock', + type: 'uint16' + }, + { + internalType: 'address', + name: 'sender', + type: 'address' + }, + { + internalType: 'bytes32', + name: 'key', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'value', + type: 'bytes32' + } + ], + internalType: 'struct L2Log', + name: '_log', + type: 'tuple' + }, + { + internalType: 'bytes32[]', + name: '_proof', + type: 'bytes32[]' + } + ], + name: 'proveL2LogInclusion', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_blockNumber', + type: 'uint256' + }, + { + internalType: 'uint256', + name: '_index', + type: 'uint256' + }, + { + components: [ + { + internalType: 'uint16', + name: 'txNumberInBlock', + type: 'uint16' + }, + { + internalType: 'address', + name: 'sender', + type: 'address' + }, + { + internalType: 'bytes', + name: 'data', + type: 'bytes' + } + ], + internalType: 'struct L2Message', + name: '_message', + type: 'tuple' + }, + { + internalType: 'bytes32[]', + name: '_proof', + type: 'bytes32[]' + } + ], + name: 'proveL2MessageInclusion', + outputs: [ + { + internalType: 'bool', + name: '', + type: 'bool' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [ + { + internalType: 'address', + name: '_contractL2', + type: 'address' + }, + { + internalType: 'uint256', + name: '_l2Value', + type: 'uint256' + }, + { + internalType: 'bytes', + name: '_calldata', + type: 'bytes' + }, + { + internalType: 'uint256', + name: '_l2GasLimit', + type: 'uint256' + }, + { + internalType: 'uint256', + name: '_l2GasPerPubdataByteLimit', + type: 'uint256' + }, + { + internalType: 'bytes[]', + name: '_factoryDeps', + type: 'bytes[]' + }, + { + internalType: 'address', + name: '_refundRecipient', + type: 'address' + } + ], + name: 'requestL2Transaction', + outputs: [ + { + internalType: 'bytes32', + name: 'canonicalTxHash', + type: 'bytes32' + } + ], + stateMutability: 'payable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_newLastBlock', + type: 'uint256' + } + ], + name: 'revertBlocks', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'bytes32', + name: '_upgradeProposalHash', + type: 'bytes32' + } + ], + name: 'securityCouncilUpgradeApprove', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'address', + name: '_newPendingGovernor', + type: 'address' + } + ], + name: 'setPendingGovernor', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'bool', + name: '_zkPorterIsAvailable', + type: 'bool' + } + ], + name: 'setPorterAvailability', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_newPriorityTxMaxGasLimit', + type: 'uint256' + } + ], + name: 'setPriorityTxMaxGasLimit', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'address', + name: '_validator', + type: 'address' + }, + { + internalType: 'bool', + name: '_active', + type: 'bool' + } + ], + name: 'setValidator', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + internalType: 'uint256', + name: '_blockNumber', + type: 'uint256' + } + ], + name: 'storedBlockHash', + outputs: [ + { + internalType: 'bytes32', + name: '', + type: 'bytes32' + } + ], + stateMutability: 'view', + type: 'function' + }, + { + inputs: [], + name: 'unfreezeDiamond', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + }, + { + inputs: [ + { + components: [ + { + components: [ + { + internalType: 'address', + name: 'facet', + type: 'address' + }, + { + internalType: 'enum Diamond.Action', + name: 'action', + type: 'uint8' + }, + { + internalType: 'bool', + name: 'isFreezable', + type: 'bool' + }, + { + internalType: 'bytes4[]', + name: 'selectors', + type: 'bytes4[]' + } + ], + internalType: 'struct Diamond.FacetCut[]', + name: 'facetCuts', + type: 'tuple[]' + }, + { + internalType: 'address', + name: 'initAddress', + type: 'address' + }, + { + internalType: 'bytes', + name: 'initCalldata', + type: 'bytes' + } + ], + internalType: 'struct Diamond.DiamondCutData', + name: '_diamondCut', + type: 'tuple' + }, + { + internalType: 'uint256', + name: '_proposalId', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: '_salt', + type: 'bytes32' + } + ], + name: 'upgradeProposalHash', + outputs: [ + { + internalType: 'bytes32', + name: '', + type: 'bytes32' + } + ], + stateMutability: 'pure', + type: 'function' + } +]; diff --git a/infrastructure/protocol-upgrade/src/crypto/crypto.ts b/infrastructure/protocol-upgrade/src/crypto/crypto.ts index 78c013b9479..8c7d666473f 100644 --- a/infrastructure/protocol-upgrade/src/crypto/crypto.ts +++ b/infrastructure/protocol-upgrade/src/crypto/crypto.ts @@ -10,9 +10,10 @@ function saveVerificationKeys( recursionCircuitsSetVksHash: BytesLike, environment: string ) { - recursionNodeLevelVkHash = recursionNodeLevelVkHash ?? process.env.CONTRACTS_RECURSION_NODE_LEVEL_VK_HASH; - recursionLeafLevelVkHash = recursionLeafLevelVkHash ?? process.env.CONTRACTS_RECURSION_LEAF_LEVEL_VK_HASH; - recursionCircuitsSetVksHash = recursionCircuitsSetVksHash ?? process.env.CONTRACTS_RECURSION_CIRCUITS_SET_VKS_HASH; + recursionNodeLevelVkHash = recursionNodeLevelVkHash ?? process.env.CONTRACTS_FRI_RECURSION_NODE_LEVEL_VK_HASH; + recursionLeafLevelVkHash = recursionLeafLevelVkHash ?? process.env.CONTRACTS_FRI_RECURSION_LEAF_LEVEL_VK_HASH; + recursionCircuitsSetVksHash = + recursionCircuitsSetVksHash ?? process.env.CONTRACTS_FRI_RECURSION_SCHEDULER_LEVEL_VK_HASH; const verificationParams: VerifierParams = { recursionNodeLevelVkHash, recursionLeafLevelVkHash, diff --git a/infrastructure/protocol-upgrade/src/crypto/deployer.ts b/infrastructure/protocol-upgrade/src/crypto/deployer.ts index b0ca3c5eea7..5b141fe80c5 100644 --- a/infrastructure/protocol-upgrade/src/crypto/deployer.ts +++ b/infrastructure/protocol-upgrade/src/crypto/deployer.ts @@ -14,6 +14,9 @@ export async function deployVerifier( if (l1Rpc) { argsString += ` --l1rpc ${l1Rpc}`; } + if (privateKey) { + argsString += ` --private-key ${privateKey}`; + } if (nonce) { argsString += ` --nonce ${nonce}`; } diff --git a/infrastructure/protocol-upgrade/src/l2upgrade/transactions.ts b/infrastructure/protocol-upgrade/src/l2upgrade/transactions.ts index 2babc5bf04a..a2d05585794 100644 --- a/infrastructure/protocol-upgrade/src/l2upgrade/transactions.ts +++ b/infrastructure/protocol-upgrade/src/l2upgrade/transactions.ts @@ -1,5 +1,5 @@ import { BytesLike } from 'ethers'; -import { ComplexUpgraderFactory, ContractDeployerFactory } from '../../../../etc/system-contracts/typechain'; +import { ComplexUpgrader__factory, ContractDeployer__factory } from '../../../../etc/system-contracts/typechain-types'; import { ForceDeployment, L2CanonicalTransaction } from '../transaction'; import { ForceDeployUpgraderFactory } from 'l2-zksync-contracts/typechain'; import { Command } from 'commander'; @@ -40,13 +40,13 @@ export function forceDeploymentCalldataUpgrader(forcedDeployments: ForceDeployme } export function forceDeploymentCalldataContractDeployer(forcedDeployments: ForceDeployment[]): BytesLike { - let contractDeployer = new ContractDeployerFactory(); + let contractDeployer = new ContractDeployer__factory(); let calldata = contractDeployer.interface.encodeFunctionData('forceDeployOnAddresses', [forcedDeployments]); return calldata; } export function prepareCallDataForComplexUpgrader(calldata: BytesLike, to: string): BytesLike { - const upgrader = new ComplexUpgraderFactory(); + const upgrader = new ComplexUpgrader__factory(); let finalCalldata = upgrader.interface.encodeFunctionData('upgrade', [to, calldata]); return finalCalldata; } diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index 82de44e0fbe..3d92127a8b4 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -3,8 +3,8 @@ import { BytesLike, ethers } from 'ethers'; import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-zksync-contracts/typechain'; import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1 } from 'l1-zksync-contracts/typechain'; import { FacetCut } from 'l1-zksync-contracts/src.ts/diamondCut'; -import { IZkSyncFactory } from 'l1-zksync-contracts/typechain/IZkSyncFactory'; -import { ComplexUpgraderFactory } from '../../../etc/system-contracts/typechain'; +import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; +import { ComplexUpgrader__factory } from '../../../etc/system-contracts/typechain-types'; import { getCommonDataFileName, getCryptoFileName, @@ -146,7 +146,7 @@ export function forceDeploymentCalldata(forcedDeployments: ForceDeployment[]): B } export function prepareCallDataForComplexUpgrader(calldata: BytesLike, to: string): BytesLike { - const upgrader = new ComplexUpgraderFactory(); + const upgrader = new ComplexUpgrader__factory(); let finalCalldata = upgrader.interface.encodeFunctionData('upgrade', [to, calldata]); return finalCalldata; } diff --git a/yarn.lock b/yarn.lock index eefeb367168..cd6b83283e5 100644 --- a/yarn.lock +++ b/yarn.lock @@ -2362,9 +2362,9 @@ integrity sha512-ebDJ9b0e702Yr7pWgB0jzm+CX4Srzz8RcXtLJDJB+BSccqMa36uyH/zUsSYao5+BD1ytv3k3rPYCq4mAE1hsXA== "@types/babel__core@^7.1.14": - version "7.20.3" - resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.3.tgz#d5625a50b6f18244425a1359a858c73d70340778" - integrity sha512-54fjTSeSHwfan8AyHWrKbfBWiEUrNTZsUwPTDSNaaP1QDQIZbeNUg3a59E9D+375MzUw/x1vx2/0F5LBz+AeYA== + version "7.20.4" + resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.4.tgz#26a87347e6c6f753b3668398e34496d6d9ac6ac0" + integrity sha512-mLnSC22IC4vcWiuObSRjrLd9XcBTGf59vUSoq2jkQDJ/QQ8PMI9rSuzE+aEV8karUMbskw07bKYoUJCKTUaygg== dependencies: "@babel/parser" "^7.20.7" "@babel/types" "^7.20.7" @@ -2373,24 +2373,24 @@ "@types/babel__traverse" "*" "@types/babel__generator@*": - version "7.6.6" - resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.6.tgz#676f89f67dc8ddaae923f70ebc5f1fa800c031a8" - integrity sha512-66BXMKb/sUWbMdBNdMvajU7i/44RkrA3z/Yt1c7R5xejt8qh84iU54yUWCtm0QwGJlDcf/gg4zd/x4mpLAlb/w== + version "7.6.7" + resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.7.tgz#a7aebf15c7bc0eb9abd638bdb5c0b8700399c9d0" + integrity sha512-6Sfsq+EaaLrw4RmdFWE9Onp63TOUue71AWb4Gpa6JxzgTYtimbM086WnYTy2U67AofR++QKCo08ZP6pwx8YFHQ== dependencies: "@babel/types" "^7.0.0" "@types/babel__template@*": - version "7.4.3" - resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.4.3.tgz#db9ac539a2fe05cfe9e168b24f360701bde41f5f" - integrity sha512-ciwyCLeuRfxboZ4isgdNZi/tkt06m8Tw6uGbBSBgWrnnZGNXiEyM27xc/PjXGQLqlZ6ylbgHMnm7ccF9tCkOeQ== + version "7.4.4" + resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.4.4.tgz#5672513701c1b2199bc6dad636a9d7491586766f" + integrity sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A== dependencies: "@babel/parser" "^7.1.0" "@babel/types" "^7.0.0" "@types/babel__traverse@*", "@types/babel__traverse@^7.0.6": - version "7.20.3" - resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.3.tgz#a971aa47441b28ef17884ff945d0551265a2d058" - integrity sha512-Lsh766rGEFbaxMIDH7Qa+Yha8cMVI3qAK6CHt3OR0YfxOIn5Z54iHiyDRycHrBqeIiqGa20Kpsv1cavfBKkRSw== + version "7.20.4" + resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.4.tgz#ec2c06fed6549df8bc0eb4615b683749a4a92e1b" + integrity sha512-mSM/iKUk5fDDrEV/e83qY+Cr3I1+Q3qqTuEn++HAWYjEa1+NxZr6CNrcJGf2ZTnq4HoFGC3zaTPZTobCzCFukA== dependencies: "@babel/types" "^7.20.7" @@ -2402,9 +2402,9 @@ "@types/node" "*" "@types/bn.js@^5.1.0": - version "5.1.3" - resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-5.1.3.tgz#0857f00da3bf888a26a44b4a477c7819b17dacc5" - integrity sha512-wT1B4iIO82ecXkdN6waCK8Ou7E71WU+mP1osDA5Q8c6Ur+ozU2vIKUIhSpUr6uE5L2YHocKS1Z2jG2fBC1YVeg== + version "5.1.5" + resolved "https://registry.yarnpkg.com/@types/bn.js/-/bn.js-5.1.5.tgz#2e0dacdcce2c0f16b905d20ff87aedbc6f7b4bf0" + integrity sha512-V46N0zwKRF5Q00AZ6hWtN0T8gGmDUaUzLWQvHFo5yThtVwK/VCenFY3wXVbOvNfajEpsTfQM4IN9k/d6gUVX3A== dependencies: "@types/node" "*" @@ -2463,9 +2463,9 @@ "@types/node" "*" "@types/graceful-fs@^4.1.3": - version "4.1.8" - resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.8.tgz#417e461e4dc79d957dc3107f45fe4973b09c2915" - integrity sha512-NhRH7YzWq8WiNKVavKPBmtLYZHxNY19Hh+az28O/phfp68CF45pMFud+ZzJ8ewnxnC5smIdF3dqFeiSUQ5I+pw== + version "4.1.9" + resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.9.tgz#2a06bc0f68a20ab37b3e36aa238be6abdf49e8b4" + integrity sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ== dependencies: "@types/node" "*" @@ -2475,36 +2475,36 @@ integrity sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA== "@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1": - version "2.0.5" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.5.tgz#fdfdd69fa16d530047d9963635bd77c71a08c068" - integrity sha512-zONci81DZYCZjiLe0r6equvZut0b+dBRPBN5kBDjsONnutYNtJMoWQ9uR2RkL1gLG9NMTzvf+29e5RFfPbeKhQ== + version "2.0.6" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz#7739c232a1fee9b4d3ce8985f314c0c6d33549d7" + integrity sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w== "@types/istanbul-lib-report@*": - version "3.0.2" - resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.2.tgz#394798d5f727402eb5ec99eb9618ffcd2b7645a1" - integrity sha512-8toY6FgdltSdONav1XtUHl4LN1yTmLza+EuDazb/fEmRNCwjyqNVIQWs2IfC74IqjHkREs/nQ2FWq5kZU9IC0w== + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz#53047614ae72e19fc0401d872de3ae2b4ce350bf" + integrity sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA== dependencies: "@types/istanbul-lib-coverage" "*" "@types/istanbul-reports@^3.0.0": - version "3.0.3" - resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.3.tgz#0313e2608e6d6955d195f55361ddeebd4b74c6e7" - integrity sha512-1nESsePMBlf0RPRffLZi5ujYh7IH1BWL4y9pr+Bn3cJBdxz+RTP8bUFljLz9HvzhhOSWKdyBZ4DIivdL6rvgZg== + version "3.0.4" + resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz#0f03e3d2f670fbdac586e34b433783070cc16f54" + integrity sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ== dependencies: "@types/istanbul-lib-report" "*" "@types/jest@^29.0.3": - version "29.5.6" - resolved "https://registry.yarnpkg.com/@types/jest/-/jest-29.5.6.tgz#f4cf7ef1b5b0bfc1aa744e41b24d9cc52533130b" - integrity sha512-/t9NnzkOpXb4Nfvg17ieHE6EeSjDS2SGSpNYfoLbUAeL/EOueU/RSdOWFpfQTXBEM7BguYW1XQ0EbM+6RlIh6w== + version "29.5.7" + resolved "https://registry.yarnpkg.com/@types/jest/-/jest-29.5.7.tgz#2c0dafe2715dd958a455bc10e2ec3e1ec47b5036" + integrity sha512-HLyetab6KVPSiF+7pFcUyMeLsx25LDNDemw9mGsJBkai/oouwrjTycocSDYopMEwFhN2Y4s9oPyOCZNofgSt2g== dependencies: expect "^29.0.0" pretty-format "^29.0.0" "@types/json-schema@^7.0.12": - version "7.0.14" - resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.14.tgz#74a97a5573980802f32c8e47b663530ab3b6b7d1" - integrity sha512-U3PUjAudAdJBeC2pgN8uTIKgxrb4nlDF3SF0++EldXQvQBGkpFZMSnwQiIoDU77tv45VgNkl/L4ouD+rEomujw== + version "7.0.15" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" + integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== "@types/json5@^0.0.29": version "0.0.29" @@ -2541,16 +2541,16 @@ "@types/node" "*" "@types/mocha-steps@^1.3.0": - version "1.3.2" - resolved "https://registry.yarnpkg.com/@types/mocha-steps/-/mocha-steps-1.3.2.tgz#0cbb227ca4328c892302e6aca55b3237ccdbd690" - integrity sha512-0k/IGS2Wax/TwNQ/rUnLPsaR/eaoVX4DgAxVhBeisTvecucTOCNSvgBu3VYL7xziO1qxC92WktC53sFmlGihkg== + version "1.3.3" + resolved "https://registry.yarnpkg.com/@types/mocha-steps/-/mocha-steps-1.3.3.tgz#69e178d2e3ccdcba65bedda07f69a26c4f3bac98" + integrity sha512-9xRpcsshkEF3UeVZNmXuUumQ2+Acj1BkwzyywU+Q0DkpoW7dxFSN0e8OsyBngTjLiLGoJeI/Uz1t9zKdi6rUUg== dependencies: "@types/mocha" "*" "@types/mocha@*": - version "10.0.3" - resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-10.0.3.tgz#4804fe9cd39da26eb62fa65c15ea77615a187812" - integrity sha512-RsOPImTriV/OE4A9qKjMtk2MnXiuLLbcO3nCXK+kvq4nr0iMfFgpjaX3MPLb6f7+EL1FGSelYvuJMV6REH+ZPQ== + version "10.0.4" + resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-10.0.4.tgz#b5331955ebca216604691fd4fcd2dbdc2bd559a4" + integrity sha512-xKU7bUjiFTIttpWaIZ9qvgg+22O1nmbA+HRxdlR+u6TWsGfmFdXrheJoK4fFxrHNVIOBDvDNKZG+LYBpMHpX3w== "@types/mocha@^8.2.3": version "8.2.3" @@ -2579,9 +2579,9 @@ form-data "^4.0.0" "@types/node@*": - version "20.8.9" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.8.9.tgz#646390b4fab269abce59c308fc286dcd818a2b08" - integrity sha512-UzykFsT3FhHb1h7yD4CA4YhBHq545JC0YnEz41xkipN88eKQtL6rSgocL5tbAP6Ola9Izm/Aw4Ora8He4x0BHg== + version "20.8.10" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.8.10.tgz#a5448b895c753ae929c26ce85cab557c6d4a365e" + integrity sha512-TlgT8JntpcbmKUFzjhsyhGfP2fsiz1Mv56im6enJ905xG1DAYesxJaeSbGqQmAw8OWPdhyJGhGSQGKRNJ45u9w== dependencies: undici-types "~5.26.4" @@ -2618,9 +2618,9 @@ "@types/node" "*" "@types/pg@^8.10.3": - version "8.10.7" - resolved "https://registry.yarnpkg.com/@types/pg/-/pg-8.10.7.tgz#2f172598272e581e72cba640026e5152b5a3d5c3" - integrity sha512-ksJqHipwYaSEHz9e1fr6H6erjoEdNNaOxwyJgPx9bNeaqOW3iWBQgVHfpwiSAoqGzchfc+ZyRLwEfeCcyYD3uQ== + version "8.10.8" + resolved "https://registry.yarnpkg.com/@types/pg/-/pg-8.10.8.tgz#20e8d653baba70c946df5a12601d8972da7cd043" + integrity sha512-EqSUgXvnbEnMkIdffX1cDsmbvNt5Fzoim17JRwdaHaCpeTz440FocCxcStU9GgJvA+1aqtHEjHgFlH0d8ZCjAg== dependencies: "@types/node" "*" pg-protocol "*" @@ -2688,22 +2688,22 @@ integrity sha512-5qcvofLPbfjmBfKaLfj/+f+Sbd6pN4zl7w7VSVI5uz7m9QZTuB2aZAa2uo1wHFBNN2x6g/SoTkXmd8mQnQF2Cw== "@types/yargs@^17.0.8": - version "17.0.29" - resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.29.tgz#06aabc72497b798c643c812a8b561537fea760cf" - integrity sha512-nacjqA3ee9zRF/++a3FUY1suHTFKZeHba2n8WeDw9cCVdmzmHpIxyzOJBcpHvvEmS8E9KqWlSnWHUkOrkhWcvA== + version "17.0.30" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.30.tgz#921094ec92faffd2cd7e5ddb02f95ba158ab5c1d" + integrity sha512-3SJLzYk3yz3EgI9I8OLoH06B3PdXIoU2imrBZzaGqUtUXf5iUNDtmAfCGuQrny1bnmyjh/GM/YNts6WK5jR5Rw== dependencies: "@types/yargs-parser" "*" "@typescript-eslint/eslint-plugin@^6.7.4": - version "6.9.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.9.1.tgz#d8ce497dc0ed42066e195c8ecc40d45c7b1254f4" - integrity sha512-w0tiiRc9I4S5XSXXrMHOWgHgxbrBn1Ro+PmiYhSg2ZVdxrAJtQgzU5o2m1BfP6UOn7Vxcc6152vFjQfmZR4xEg== + version "6.10.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.10.0.tgz#cfe2bd34e26d2289212946b96ab19dcad64b661a" + integrity sha512-uoLj4g2OTL8rfUQVx2AFO1hp/zja1wABJq77P6IclQs6I/m9GLrm7jCdgzZkvWdDCQf1uEvoa8s8CupsgWQgVg== dependencies: "@eslint-community/regexpp" "^4.5.1" - "@typescript-eslint/scope-manager" "6.9.1" - "@typescript-eslint/type-utils" "6.9.1" - "@typescript-eslint/utils" "6.9.1" - "@typescript-eslint/visitor-keys" "6.9.1" + "@typescript-eslint/scope-manager" "6.10.0" + "@typescript-eslint/type-utils" "6.10.0" + "@typescript-eslint/utils" "6.10.0" + "@typescript-eslint/visitor-keys" "6.10.0" debug "^4.3.4" graphemer "^1.4.0" ignore "^5.2.4" @@ -2748,21 +2748,13 @@ "@typescript-eslint/types" "6.10.0" "@typescript-eslint/visitor-keys" "6.10.0" -"@typescript-eslint/scope-manager@6.9.1": - version "6.9.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.9.1.tgz#e96afeb9a68ad1cd816dba233351f61e13956b75" - integrity sha512-38IxvKB6NAne3g/+MyXMs2Cda/Sz+CEpmm+KLGEM8hx/CvnSRuw51i8ukfwB/B/sESdeTGet1NH1Wj7I0YXswg== - dependencies: - "@typescript-eslint/types" "6.9.1" - "@typescript-eslint/visitor-keys" "6.9.1" - -"@typescript-eslint/type-utils@6.9.1": - version "6.9.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.9.1.tgz#efd5db20ed35a74d3c7d8fba51b830ecba09ce32" - integrity sha512-eh2oHaUKCK58qIeYp19F5V5TbpM52680sB4zNSz29VBQPTWIlE/hCj5P5B1AChxECe/fmZlspAWFuRniep1Skg== +"@typescript-eslint/type-utils@6.10.0": + version "6.10.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.10.0.tgz#1007faede067c78bdbcef2e8abb31437e163e2e1" + integrity sha512-wYpPs3hgTFblMYwbYWPT3eZtaDOjbLyIYuqpwuLBBqhLiuvJ+9sEp2gNRJEtR5N/c9G1uTtQQL5AhV0fEPJYcg== dependencies: - "@typescript-eslint/typescript-estree" "6.9.1" - "@typescript-eslint/utils" "6.9.1" + "@typescript-eslint/typescript-estree" "6.10.0" + "@typescript-eslint/utils" "6.10.0" debug "^4.3.4" ts-api-utils "^1.0.1" @@ -2776,11 +2768,6 @@ resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.10.0.tgz#f4f0a84aeb2ac546f21a66c6e0da92420e921367" integrity sha512-36Fq1PWh9dusgo3vH7qmQAj5/AZqARky1Wi6WpINxB6SkQdY5vQoT2/7rW7uBIsPDcvvGCLi4r10p0OJ7ITAeg== -"@typescript-eslint/types@6.9.1": - version "6.9.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.9.1.tgz#a6cfc20db0fcedcb2f397ea728ef583e0ee72459" - integrity sha512-BUGslGOb14zUHOUmDB2FfT6SI1CcZEJYfF3qFwBeUrU6srJfzANonwRYHDpLBuzbq3HaoF2XL2hcr01c8f8OaQ== - "@typescript-eslint/typescript-estree@4.33.0": version "4.33.0" resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-4.33.0.tgz#0dfb51c2908f68c5c08d82aefeaf166a17c24609" @@ -2807,30 +2794,17 @@ semver "^7.5.4" ts-api-utils "^1.0.1" -"@typescript-eslint/typescript-estree@6.9.1": - version "6.9.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.9.1.tgz#8c77910a49a04f0607ba94d78772da07dab275ad" - integrity sha512-U+mUylTHfcqeO7mLWVQ5W/tMLXqVpRv61wm9ZtfE5egz7gtnmqVIw9ryh0mgIlkKk9rZLY3UHygsBSdB9/ftyw== - dependencies: - "@typescript-eslint/types" "6.9.1" - "@typescript-eslint/visitor-keys" "6.9.1" - debug "^4.3.4" - globby "^11.1.0" - is-glob "^4.0.3" - semver "^7.5.4" - ts-api-utils "^1.0.1" - -"@typescript-eslint/utils@6.9.1": - version "6.9.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.9.1.tgz#763da41281ef0d16974517b5f0d02d85897a1c1e" - integrity sha512-L1T0A5nFdQrMVunpZgzqPL6y2wVreSyHhKGZryS6jrEN7bD9NplVAyMryUhXsQ4TWLnZmxc2ekar/lSGIlprCA== +"@typescript-eslint/utils@6.10.0": + version "6.10.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.10.0.tgz#4d76062d94413c30e402c9b0df8c14aef8d77336" + integrity sha512-v+pJ1/RcVyRc0o4wAGux9x42RHmAjIGzPRo538Z8M1tVx6HOnoQBCX/NoadHQlZeC+QO2yr4nNSFWOoraZCAyg== dependencies: "@eslint-community/eslint-utils" "^4.4.0" "@types/json-schema" "^7.0.12" "@types/semver" "^7.5.0" - "@typescript-eslint/scope-manager" "6.9.1" - "@typescript-eslint/types" "6.9.1" - "@typescript-eslint/typescript-estree" "6.9.1" + "@typescript-eslint/scope-manager" "6.10.0" + "@typescript-eslint/types" "6.10.0" + "@typescript-eslint/typescript-estree" "6.10.0" semver "^7.5.4" "@typescript-eslint/visitor-keys@4.33.0": @@ -2849,14 +2823,6 @@ "@typescript-eslint/types" "6.10.0" eslint-visitor-keys "^3.4.1" -"@typescript-eslint/visitor-keys@6.9.1": - version "6.9.1" - resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.9.1.tgz#6753a9225a0ba00459b15d6456b9c2780b66707d" - integrity sha512-MUaPUe/QRLEffARsmNfmpghuQkW436DvESW+h+M52w0coICHRfD6Np9/K6PdACwnrq1HmuLl+cSPZaJmeVPkSw== - dependencies: - "@typescript-eslint/types" "6.9.1" - eslint-visitor-keys "^3.4.1" - "@ungap/promise-all-settled@1.1.2": version "1.1.2" resolved "https://registry.yarnpkg.com/@ungap/promise-all-settled/-/promise-all-settled-1.1.2.tgz#aa58042711d6e3275dd37dc597e5d31e8c290a44" @@ -8520,9 +8486,9 @@ isstream@~0.1.2: integrity sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g== istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz#189e7909d0a39fa5a3dfad5b03f71947770191d3" - integrity sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw== + version "3.2.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.1.tgz#c680fd1544600460367af5811866c34c44c6f3b1" + integrity sha512-opCrKqbthmq3SKZ10mFMQG9dk3fTa3quaOLD35kJa5ejwZHd9xAr+kLuziiZz2cG32s4lMZxNdmdcEQnTDP4+g== istanbul-lib-instrument@^5.0.4: version "5.2.1" @@ -13517,9 +13483,9 @@ undici-types@~5.26.4: integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== undici@^5.14.0: - version "5.27.0" - resolved "https://registry.yarnpkg.com/undici/-/undici-5.27.0.tgz#789f2e40ce982b5507899abc2c2ddeb2712b4554" - integrity sha512-l3ydWhlhOJzMVOYkymLykcRRXqbUaQriERtR70B9LzNkZ4bX52Fc8wbTDneMiwo8T+AemZXvXaTx+9o5ROxrXg== + version "5.27.2" + resolved "https://registry.yarnpkg.com/undici/-/undici-5.27.2.tgz#a270c563aea5b46cc0df2550523638c95c5d4411" + integrity sha512-iS857PdOEy/y3wlM3yRp+6SNQQ6xU0mmZcwRSriqk+et/cwWAtwmIGf6WkoDN2EK/AMdCO/dfXzIwi+rFMrjjQ== dependencies: "@fastify/busboy" "^2.0.0" @@ -13539,9 +13505,9 @@ universalify@^0.1.0: integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== universalify@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.0.tgz#75a4984efedc4b08975c5aeb73f530d02df25717" - integrity sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ== + version "2.0.1" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-2.0.1.tgz#168efc2180964e6386d061e094df61afe239b18d" + integrity sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw== unorm@^1.3.3: version "1.6.0" From f4313a4e5a67f616d2dfa8f364c47cb73cef1ec7 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Tue, 14 Nov 2023 16:46:22 +0100 Subject: [PATCH 010/115] feat(core): adds a get proof endpoint in zks namespace (#455) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR adds a new JSON-RPC endpoint `zks_getProof` that allows users to request proofs of data in the tree that can later be verified against storarage roots at a given point in batch history. --------- Co-authored-by: Roman Brodetski --- core/lib/config/src/configs/api.rs | 7 +++ core/lib/env_config/src/api.rs | 1 + core/lib/types/src/api/mod.rs | 16 ++++++ core/lib/web3_decl/src/error.rs | 2 + .../zksync_core/src/api_server/tree/mod.rs | 3 +- .../api_server/web3/backend_jsonrpc/error.rs | 1 + .../web3/backend_jsonrpc/namespaces/zks.rs | 25 ++++++++- .../api_server/web3/backend_jsonrpsee/mod.rs | 1 + .../zksync_core/src/api_server/web3/mod.rs | 12 ++++- .../src/api_server/web3/namespaces/zks.rs | 52 ++++++++++++++++--- .../zksync_core/src/api_server/web3/state.rs | 3 ++ core/lib/zksync_core/src/lib.rs | 1 + 12 files changed, 114 insertions(+), 10 deletions(-) diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index dc14514aec4..3b23abea43c 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -87,6 +87,8 @@ pub struct Web3JsonRpcConfig { /// The value is per active connection. /// Note: For HTTP, rate limiting is expected to be configured on the infra level. pub websocket_requests_per_minute_limit: Option, + /// Tree API url, currently used to proxy `getProof` calls to the tree + pub tree_api_url: Option, } impl Web3JsonRpcConfig { @@ -123,6 +125,7 @@ impl Web3JsonRpcConfig { max_batch_request_size: Default::default(), max_response_body_size_mb: Default::default(), websocket_requests_per_minute_limit: Default::default(), + tree_api_url: None, } } @@ -205,6 +208,10 @@ impl Web3JsonRpcConfig { // The default limit is chosen to be reasonably permissive. self.websocket_requests_per_minute_limit.unwrap_or(6000) } + + pub fn tree_api_url(&self) -> Option { + self.tree_api_url.clone() + } } #[derive(Debug, Deserialize, Clone, PartialEq)] diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 92428fe45f1..20ecfe41e21 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -87,6 +87,7 @@ mod tests { max_batch_request_size: Some(200), max_response_body_size_mb: Some(10), websocket_requests_per_minute_limit: Some(10), + tree_api_url: None, }, contract_verification: ContractVerificationApiConfig { port: 3070, diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index c9b157c6629..99b6964408f 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -684,3 +684,19 @@ pub struct L1BatchDetails { #[serde(flatten)] pub base: BlockDetailsBase, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct StorageProof { + pub key: H256, + pub proof: Vec, + pub value: H256, + pub index: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Proof { + pub address: Address, + pub storage_proof: Vec, +} diff --git a/core/lib/web3_decl/src/error.rs b/core/lib/web3_decl/src/error.rs index 20235fe65c1..d36bd2531f3 100644 --- a/core/lib/web3_decl/src/error.rs +++ b/core/lib/web3_decl/src/error.rs @@ -37,4 +37,6 @@ pub enum Web3Error { InvalidFilterBlockHash, #[error("Query returned more than {0} results. Try smaller range of blocks")] TooManyLogs(usize), + #[error("Tree API is not available")] + TreeApiUnavailable, } diff --git a/core/lib/zksync_core/src/api_server/tree/mod.rs b/core/lib/zksync_core/src/api_server/tree/mod.rs index f59cd171192..74dd3e5b70c 100644 --- a/core/lib/zksync_core/src/api_server/tree/mod.rs +++ b/core/lib/zksync_core/src/api_server/tree/mod.rs @@ -119,14 +119,13 @@ impl TreeApiClient for AsyncTreeReader { /// [`TreeApiClient`] implementation requesting data from a Merkle tree API server. #[derive(Debug, Clone)] -pub(crate) struct TreeApiHttpClient { +pub struct TreeApiHttpClient { inner: reqwest::Client, info_url: String, proofs_url: String, } impl TreeApiHttpClient { - #[cfg(test)] // temporary measure until `TreeApiClient` is required by other components pub fn new(url_base: &str) -> Self { Self { inner: reqwest::Client::new(), diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs index ac498c92799..4a30961c453 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs @@ -22,6 +22,7 @@ pub fn into_jsrpc_error(err: Web3Error) -> Error { Web3Error::SubmitTransactionError(_, _) | Web3Error::SerializationError(_) => 3.into(), Web3Error::PubSubTimeout => 4.into(), Web3Error::RequestTimeout => 5.into(), + Web3Error::TreeApiUnavailable => 6.into(), }, message: match err { Web3Error::SubmitTransactionError(_, _) => err.to_string(), diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs index 5cfb76af62b..bf700a64156 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs @@ -9,7 +9,7 @@ use jsonrpc_derive::rpc; // Workspace uses use zksync_types::{ api::{ - BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, ProtocolVersion, + BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, TransactionDetails, }, fee::Fee, @@ -111,6 +111,14 @@ pub trait ZksNamespaceT { #[rpc(name = "zks_getLogsWithVirtualBlocks")] fn get_logs_with_virtual_blocks(&self, filter: Filter) -> BoxFuture>>; + + #[rpc(name = "zks_getProof")] + fn get_proof( + &self, + address: Address, + keys: Vec, + l1_batch_number: L1BatchNumber, + ) -> BoxFuture>; } impl ZksNamespaceT for ZksNamespace { @@ -308,4 +316,19 @@ impl ZksNamespaceT for ZksNamespa .map_err(into_jsrpc_error) }) } + + fn get_proof( + &self, + address: Address, + keys: Vec, + l1_batch_number: L1BatchNumber, + ) -> BoxFuture> { + let self_ = self.clone(); + Box::pin(async move { + self_ + .get_proofs_impl(address, keys.clone(), l1_batch_number) + .await + .map_err(into_jsrpc_error) + }) + } } diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs index 815f92b70f2..04f6102066f 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs @@ -29,6 +29,7 @@ pub fn into_jsrpc_error(err: Web3Error) -> ErrorObjectOwned { Web3Error::SubmitTransactionError(_, _) | Web3Error::SerializationError(_) => 3, Web3Error::PubSubTimeout => 4, Web3Error::RequestTimeout => 5, + Web3Error::TreeApiUnavailable => 6, }, match err { Web3Error::SubmitTransactionError(ref message, _) => message.clone(), diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 6745decd21b..57da568ea9a 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -27,7 +27,7 @@ use zksync_web3_decl::{ use crate::{ api_server::{ - execution_sandbox::VmConcurrencyBarrier, tx_sender::TxSender, + execution_sandbox::VmConcurrencyBarrier, tree::TreeApiHttpClient, tx_sender::TxSender, web3::backend_jsonrpc::batch_limiter_middleware::RateLimitMetadata, }, l1_gas_price::L1GasPriceProvider, @@ -136,6 +136,7 @@ pub struct ApiBuilder { polling_interval: Option, namespaces: Option>, logs_translator_enabled: bool, + tree_api_url: Option, } impl ApiBuilder { @@ -159,6 +160,7 @@ impl ApiBuilder { namespaces: None, config, logs_translator_enabled: false, + tree_api_url: None, } } @@ -255,6 +257,11 @@ impl ApiBuilder { self.logs_translator_enabled = true; self } + + pub fn with_tree_api(mut self, tree_api_url: Option) -> Self { + self.tree_api_url = tree_api_url; + self + } } impl ApiBuilder { @@ -280,6 +287,9 @@ impl ApiBuilder { api_config: self.config, last_sealed_miniblock, logs_translator_enabled: self.logs_translator_enabled, + tree_api: self + .tree_api_url + .map(|url| TreeApiHttpClient::new(url.as_str())), } } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 3e31ea6bb06..849f88615b9 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -6,8 +6,8 @@ use zksync_dal::StorageProcessor; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_types::{ api::{ - BlockDetails, BridgeAddresses, GetLogsFilter, L1BatchDetails, L2ToL1LogProof, - ProtocolVersion, TransactionDetails, + BlockDetails, BridgeAddresses, GetLogsFilter, L1BatchDetails, L2ToL1LogProof, Proof, + ProtocolVersion, StorageProof, TransactionDetails, }, fee::Fee, l1::L1Tx, @@ -15,8 +15,9 @@ use zksync_types::{ l2_to_l1_log::L2ToL1Log, tokens::ETHEREUM_ADDRESS, transaction_request::CallRequest, - L1BatchNumber, MiniblockNumber, Transaction, L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, - MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, U64, + AccountTreeId, L1BatchNumber, MiniblockNumber, StorageKey, Transaction, L1_MESSENGER_ADDRESS, + L2_ETH_TOKEN_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, + U64, }; use zksync_utils::{address_to_h256, ratio_to_big_decimal_normalized}; use zksync_web3_decl::{ @@ -24,8 +25,9 @@ use zksync_web3_decl::{ types::{Address, Filter, Log, Token, H256}, }; -use crate::api_server::web3::{ - backend_jsonrpc::error::internal_error, metrics::API_METRICS, RpcState, +use crate::api_server::{ + tree::TreeApiClient, + web3::{backend_jsonrpc::error::internal_error, metrics::API_METRICS, RpcState}, }; use crate::l1_gas_price::L1GasPriceProvider; @@ -620,4 +622,42 @@ impl ZksNamespace { ) -> Result, Web3Error> { self.state.translate_get_logs(filter).await } + + #[tracing::instrument(skip_all)] + pub async fn get_proofs_impl( + &self, + address: Address, + keys: Vec, + l1_batch_number: L1BatchNumber, + ) -> Result { + const METHOD_NAME: &str = "get_proofs"; + + let hashed_keys = keys + .iter() + .map(|key| StorageKey::new(AccountTreeId::new(address), *key).hashed_key_u256()) + .collect(); + + let storage_proof = self + .state + .tree_api + .as_ref() + .ok_or(Web3Error::TreeApiUnavailable)? + .get_proofs(l1_batch_number, hashed_keys) + .await + .map_err(|err| internal_error(METHOD_NAME, err))? + .into_iter() + .zip(keys) + .map(|(proof, key)| StorageProof { + key, + proof: proof.merkle_path, + value: proof.value, + index: proof.index, + }) + .collect(); + + Ok(Proof { + address, + storage_proof, + }) + } } diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 5d4e91284cc..b143b6ccfc2 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -31,6 +31,7 @@ use super::metrics::API_METRICS; use crate::{ api_server::{ execution_sandbox::BlockArgs, + tree::TreeApiHttpClient, tx_sender::TxSender, web3::{ backend_jsonrpc::error::internal_error, namespaces::eth::EVENT_TOPIC_NUMBER_LIMIT, @@ -167,6 +168,7 @@ impl SealedMiniblockNumber { pub struct RpcState { pub installed_filters: Arc>, pub connection_pool: ConnectionPool, + pub tree_api: Option, pub tx_sender: TxSender, pub sync_state: Option, pub(super) api_config: InternalApiConfig, @@ -185,6 +187,7 @@ impl Clone for RpcState { installed_filters: self.installed_filters.clone(), connection_pool: self.connection_pool.clone(), tx_sender: self.tx_sender.clone(), + tree_api: self.tree_api.clone(), sync_state: self.sync_state.clone(), api_config: self.api_config.clone(), last_sealed_miniblock: self.last_sealed_miniblock.clone(), diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 5c64547d715..be7e75dbc1e 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -1364,6 +1364,7 @@ async fn run_ws_api( ) .with_polling_interval(api_config.web3_json_rpc.pubsub_interval()) .with_threads(api_config.web3_json_rpc.ws_server_threads()) + .with_tree_api(api_config.web3_json_rpc.tree_api_url()) .with_tx_sender(tx_sender, vm_barrier) .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()); From 44cbb9476775b943bfed236253fda70ee067956f Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Wed, 15 Nov 2023 10:00:23 +0100 Subject: [PATCH 011/115] chore(main): release core 18.0.0 (#424) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit :robot: I have created a release *beep* *boop* --- ## [18.0.0](https://github.com/matter-labs/zksync-era/compare/core-v17.1.0...core-v18.0.0) (2023-11-14) ### ⚠ BREAKING CHANGES * boojum integration ([#112](https://github.com/matter-labs/zksync-era/issues/112)) ### Features * **basic_witness_input_producer:** Witness inputs queued after BWIP run ([#345](https://github.com/matter-labs/zksync-era/issues/345)) ([9c2be91](https://github.com/matter-labs/zksync-era/commit/9c2be91ec1b9bd30c44e210d943539630a857825)) * boojum integration ([#112](https://github.com/matter-labs/zksync-era/issues/112)) ([e76d346](https://github.com/matter-labs/zksync-era/commit/e76d346d02ded771dea380aa8240da32119d7198)) * **core:** adds a get proof endpoint in zks namespace ([#455](https://github.com/matter-labs/zksync-era/issues/455)) ([f4313a4](https://github.com/matter-labs/zksync-era/commit/f4313a4e5a67f616d2dfa8f364c47cb73cef1ec7)) * **core:** Split config definitions and deserialization ([#414](https://github.com/matter-labs/zksync-era/issues/414)) ([c7c6b32](https://github.com/matter-labs/zksync-era/commit/c7c6b321a63dbcc7f1af045aa7416e697beab08f)) * **dal:** Do not load config from env in DAL crate ([#444](https://github.com/matter-labs/zksync-era/issues/444)) ([3fe1bb2](https://github.com/matter-labs/zksync-era/commit/3fe1bb21f8d33557353f447811ca86c60f1fe51a)) * **house_keeper:** Remove GCS Blob Cleaner ([#321](https://github.com/matter-labs/zksync-era/issues/321)) ([9548914](https://github.com/matter-labs/zksync-era/commit/9548914bd1be7b6ada52061d961353a763412150)) * **job-processor:** report attempts metrics ([#448](https://github.com/matter-labs/zksync-era/issues/448)) ([ab31f03](https://github.com/matter-labs/zksync-era/commit/ab31f031dfcaa7ddf296786ddccb78e8edd2d3c5)) * **vm:** Use the one interface for all vms ([#277](https://github.com/matter-labs/zksync-era/issues/277)) ([91bb99b](https://github.com/matter-labs/zksync-era/commit/91bb99b232120e29f9ee55208e3325ab37550f0c)) ### Bug Fixes * **boojnet:** various boojnet fixes ([#462](https://github.com/matter-labs/zksync-era/issues/462)) ([f13648c](https://github.com/matter-labs/zksync-era/commit/f13648cf10c0a225dc7b4f64cb3b8195c1a52814)) * change vks upgrade logic ([#491](https://github.com/matter-labs/zksync-era/issues/491)) ([cb394f3](https://github.com/matter-labs/zksync-era/commit/cb394f3c3ce93d345f24e5b9ee34e22ebca3abb0)) * **eth-sender:** Correct ABI for get_verification_key ([#445](https://github.com/matter-labs/zksync-era/issues/445)) ([8af0d85](https://github.com/matter-labs/zksync-era/commit/8af0d85b94cc74f691eb21b59556c0cd6084db01)) * **metadata-calculator:** Save commitment for pre-boojum ([#481](https://github.com/matter-labs/zksync-era/issues/481)) ([664ce33](https://github.com/matter-labs/zksync-era/commit/664ce33622af220a24360f7f11a52a14141c3fdc)) * Versioned L1 batch metadata ([#450](https://github.com/matter-labs/zksync-era/issues/450)) ([8a40dc3](https://github.com/matter-labs/zksync-era/commit/8a40dc38669867c89dfe54bf71c1f461a9db1fc7)) * **vm:** storage_refunds for `vm_refunds_enhancement` ([#449](https://github.com/matter-labs/zksync-era/issues/449)) ([1e1e59f](https://github.com/matter-labs/zksync-era/commit/1e1e59fbbb4e7b0667f080fcd922a5302d819f22)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 10065ba3b69..d301bdd2b16 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "17.1.0", + "core": "18.0.0", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 274842c6223..97539ee9365 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,33 @@ # Changelog +## [18.0.0](https://github.com/matter-labs/zksync-era/compare/core-v17.1.0...core-v18.0.0) (2023-11-14) + + +### ⚠ BREAKING CHANGES + +* boojum integration ([#112](https://github.com/matter-labs/zksync-era/issues/112)) + +### Features + +* **basic_witness_input_producer:** Witness inputs queued after BWIP run ([#345](https://github.com/matter-labs/zksync-era/issues/345)) ([9c2be91](https://github.com/matter-labs/zksync-era/commit/9c2be91ec1b9bd30c44e210d943539630a857825)) +* boojum integration ([#112](https://github.com/matter-labs/zksync-era/issues/112)) ([e76d346](https://github.com/matter-labs/zksync-era/commit/e76d346d02ded771dea380aa8240da32119d7198)) +* **core:** adds a get proof endpoint in zks namespace ([#455](https://github.com/matter-labs/zksync-era/issues/455)) ([f4313a4](https://github.com/matter-labs/zksync-era/commit/f4313a4e5a67f616d2dfa8f364c47cb73cef1ec7)) +* **core:** Split config definitions and deserialization ([#414](https://github.com/matter-labs/zksync-era/issues/414)) ([c7c6b32](https://github.com/matter-labs/zksync-era/commit/c7c6b321a63dbcc7f1af045aa7416e697beab08f)) +* **dal:** Do not load config from env in DAL crate ([#444](https://github.com/matter-labs/zksync-era/issues/444)) ([3fe1bb2](https://github.com/matter-labs/zksync-era/commit/3fe1bb21f8d33557353f447811ca86c60f1fe51a)) +* **house_keeper:** Remove GCS Blob Cleaner ([#321](https://github.com/matter-labs/zksync-era/issues/321)) ([9548914](https://github.com/matter-labs/zksync-era/commit/9548914bd1be7b6ada52061d961353a763412150)) +* **job-processor:** report attempts metrics ([#448](https://github.com/matter-labs/zksync-era/issues/448)) ([ab31f03](https://github.com/matter-labs/zksync-era/commit/ab31f031dfcaa7ddf296786ddccb78e8edd2d3c5)) +* **vm:** Use the one interface for all vms ([#277](https://github.com/matter-labs/zksync-era/issues/277)) ([91bb99b](https://github.com/matter-labs/zksync-era/commit/91bb99b232120e29f9ee55208e3325ab37550f0c)) + + +### Bug Fixes + +* **boojnet:** various boojnet fixes ([#462](https://github.com/matter-labs/zksync-era/issues/462)) ([f13648c](https://github.com/matter-labs/zksync-era/commit/f13648cf10c0a225dc7b4f64cb3b8195c1a52814)) +* change vks upgrade logic ([#491](https://github.com/matter-labs/zksync-era/issues/491)) ([cb394f3](https://github.com/matter-labs/zksync-era/commit/cb394f3c3ce93d345f24e5b9ee34e22ebca3abb0)) +* **eth-sender:** Correct ABI for get_verification_key ([#445](https://github.com/matter-labs/zksync-era/issues/445)) ([8af0d85](https://github.com/matter-labs/zksync-era/commit/8af0d85b94cc74f691eb21b59556c0cd6084db01)) +* **metadata-calculator:** Save commitment for pre-boojum ([#481](https://github.com/matter-labs/zksync-era/issues/481)) ([664ce33](https://github.com/matter-labs/zksync-era/commit/664ce33622af220a24360f7f11a52a14141c3fdc)) +* Versioned L1 batch metadata ([#450](https://github.com/matter-labs/zksync-era/issues/450)) ([8a40dc3](https://github.com/matter-labs/zksync-era/commit/8a40dc38669867c89dfe54bf71c1f461a9db1fc7)) +* **vm:** storage_refunds for `vm_refunds_enhancement` ([#449](https://github.com/matter-labs/zksync-era/issues/449)) ([1e1e59f](https://github.com/matter-labs/zksync-era/commit/1e1e59fbbb4e7b0667f080fcd922a5302d819f22)) + ## [17.1.0](https://github.com/matter-labs/zksync-era/compare/core-v16.2.0...core-v17.1.0) (2023-11-03) From 0c454fc6cdd1fb32074389643bd40c899983283f Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 15 Nov 2023 14:19:14 +0200 Subject: [PATCH 012/115] fix(metadata-calculator): Do not require events_queue for old batches (#492) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - `events_queue_commitment` is not calculated if `events_queue` is missing - `metadata_calculator` doesn't require `header.protocol_version` is present ## Why ❔ `events_queue` is missing for old batches, `header.protocol_version` is none for old batches in ENs databases ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/commitment_utils/src/lib.rs | 13 ++++------ core/lib/dal/src/blocks_dal.rs | 4 +-- core/lib/types/src/commitment.rs | 24 ++++++++--------- core/lib/zksync_core/src/eth_sender/tests.rs | 2 +- core/lib/zksync_core/src/gas_tracker/mod.rs | 6 ++++- core/lib/zksync_core/src/genesis.rs | 2 +- .../src/metadata_calculator/mod.rs | 9 +++++-- .../src/metadata_calculator/updater.rs | 26 +++++++++++++------ .../src/state_keeper/io/tests/mod.rs | 7 +---- core/lib/zksync_core/src/sync_layer/tests.rs | 7 +---- 10 files changed, 53 insertions(+), 47 deletions(-) diff --git a/core/lib/commitment_utils/src/lib.rs b/core/lib/commitment_utils/src/lib.rs index b8f8784832e..ac6dc8ff917 100644 --- a/core/lib/commitment_utils/src/lib.rs +++ b/core/lib/commitment_utils/src/lib.rs @@ -2,21 +2,18 @@ use zkevm_test_harness::witness::utils::{ events_queue_commitment_fixed, initial_heap_content_commitment_fixed, }; -use zksync_types::{LogQuery, ProtocolVersionId, H256, U256, USED_BOOTLOADER_MEMORY_BYTES}; +use zksync_types::{LogQuery, H256, U256, USED_BOOTLOADER_MEMORY_BYTES}; use zksync_utils::expand_memory_contents; -pub fn events_queue_commitment( - events_queue: &Vec, - protocol_version: ProtocolVersionId, -) -> Option { - (!protocol_version.is_pre_boojum()).then(|| H256(events_queue_commitment_fixed(events_queue))) +pub fn events_queue_commitment(events_queue: &Vec, is_pre_boojum: bool) -> Option { + (!is_pre_boojum).then(|| H256(events_queue_commitment_fixed(events_queue))) } pub fn bootloader_initial_content_commitment( initial_bootloader_contents: &[(usize, U256)], - protocol_version: ProtocolVersionId, + is_pre_boojum: bool, ) -> Option { - (!protocol_version.is_pre_boojum()).then(|| { + (!is_pre_boojum).then(|| { let full_bootloader_memory = expand_memory_contents(initial_bootloader_contents, USED_BOOTLOADER_MEMORY_BYTES); H256(initial_heap_content_commitment_fixed( diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 8f73c84d3b1..662cdf7b501 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -582,7 +582,7 @@ impl BlocksDal<'_, '_> { number: L1BatchNumber, metadata: &L1BatchMetadata, previous_root_hash: H256, - protocol_version: ProtocolVersionId, + is_pre_boojum: bool, ) -> anyhow::Result<()> { let mut transaction = self.storage.start_transaction().await?; @@ -615,7 +615,7 @@ impl BlocksDal<'_, '_> { .execute(transaction.conn()) .await?; - if metadata.events_queue_commitment.is_some() || protocol_version.is_pre_boojum() { + if metadata.events_queue_commitment.is_some() || is_pre_boojum { // Save `commitment`, `aux_data_hash`, `events_queue_commitment`, `bootloader_initial_content_commitment`. sqlx::query!( "INSERT INTO commitments (l1_batch_number, events_queue_commitment, bootloader_initial_content_commitment) \ diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 8f2a4620a86..29750a5c77b 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -25,7 +25,7 @@ use crate::{ compress_state_diffs, InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord, PADDED_ENCODED_STORAGE_DIFF_LEN_BYTES, }, - ProtocolVersionId, H256, KNOWN_CODES_STORAGE_ADDRESS, U256, + H256, KNOWN_CODES_STORAGE_ADDRESS, U256, }; /// Type that can be serialized for commitment. @@ -341,7 +341,7 @@ struct L1BatchAuxiliaryOutput { bootloader_heap_hash: H256, #[allow(dead_code)] events_state_queue_hash: H256, - protocol_version: ProtocolVersionId, + is_pre_boojum: bool, } impl L1BatchAuxiliaryOutput { @@ -354,7 +354,7 @@ impl L1BatchAuxiliaryOutput { state_diffs: Vec, bootloader_heap_hash: H256, events_state_queue_hash: H256, - protocol_version: ProtocolVersionId, + is_pre_boojum: bool, ) -> Self { let state_diff_hash_from_logs = system_logs.iter().find_map(|log| { if log.0.key == u256_to_h256(STATE_DIFF_HASH_KEY.into()) { @@ -378,7 +378,7 @@ impl L1BatchAuxiliaryOutput { repeated_writes_compressed, system_logs_compressed, state_diffs_packed, - ) = if protocol_version.is_pre_boojum() { + ) = if is_pre_boojum { ( pre_boojum_serialize_commitments(&l2_l1_logs), pre_boojum_serialize_commitments(&initial_writes), @@ -404,7 +404,7 @@ impl L1BatchAuxiliaryOutput { let repeated_writes_hash = H256::from(keccak256(&repeated_writes_compressed)); let state_diffs_hash = H256::from(keccak256(&(state_diffs_packed))); - let serialized_logs = if protocol_version.is_pre_boojum() { + let serialized_logs = if is_pre_boojum { &l2_l1_logs_compressed[4..] } else { &l2_l1_logs_compressed @@ -414,7 +414,7 @@ impl L1BatchAuxiliaryOutput { .chunks(UserL2ToL1Log::SERIALIZED_SIZE) .map(|chunk| <[u8; UserL2ToL1Log::SERIALIZED_SIZE]>::try_from(chunk).unwrap()); // ^ Skip first 4 bytes of the serialized logs (i.e., the number of logs). - let min_tree_size = if protocol_version.is_pre_boojum() { + let min_tree_size = if is_pre_boojum { L2ToL1Log::PRE_BOOJUM_MIN_L2_L1_LOGS_TREE_SIZE } else { L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE @@ -453,7 +453,7 @@ impl L1BatchAuxiliaryOutput { bootloader_heap_hash, events_state_queue_hash, - protocol_version, + is_pre_boojum, } } @@ -462,7 +462,7 @@ impl L1BatchAuxiliaryOutput { const SERIALIZED_SIZE: usize = 128; let mut result = Vec::with_capacity(SERIALIZED_SIZE); - if self.protocol_version.is_pre_boojum() { + if self.is_pre_boojum { result.extend(self.l2_l1_logs_merkle_root.as_bytes()); result.extend(self.l2_l1_logs_linear_hash.as_bytes()); result.extend(self.initial_writes_hash.as_bytes()); @@ -566,7 +566,7 @@ impl L1BatchCommitment { state_diffs: Vec, bootloader_heap_hash: H256, events_state_queue_hash: H256, - protocol_version: ProtocolVersionId, + is_pre_boojum: bool, ) -> Self { let meta_parameters = L1BatchMetaParameters { zkporter_is_available: ZKPORTER_IS_AVAILABLE, @@ -596,7 +596,7 @@ impl L1BatchCommitment { state_diffs, bootloader_heap_hash, events_state_queue_hash, - protocol_version, + is_pre_boojum, ), meta_parameters, } @@ -671,7 +671,7 @@ mod tests { }; use crate::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; use crate::writes::{InitialStorageWrite, RepeatedStorageWrite}; - use crate::{ProtocolVersionId, H256, U256}; + use crate::{H256, U256}; #[serde_as] #[derive(Debug, Serialize, Deserialize)] @@ -754,7 +754,7 @@ mod tests { vec![], H256::zero(), H256::zero(), - ProtocolVersionId::latest(), + false, ); let commitment = L1BatchCommitment { diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index e15d8e91eff..51166fc794a 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -897,7 +897,7 @@ async fn insert_l1_batch(tester: &EthSenderTester, number: L1BatchNumber) -> L1B header.number, &default_l1_batch_metadata(), Default::default(), - Default::default(), + false, ) .await .unwrap(); diff --git a/core/lib/zksync_core/src/gas_tracker/mod.rs b/core/lib/zksync_core/src/gas_tracker/mod.rs index c27d4fb1173..44c59ac2758 100644 --- a/core/lib/zksync_core/src/gas_tracker/mod.rs +++ b/core/lib/zksync_core/src/gas_tracker/mod.rs @@ -111,7 +111,11 @@ pub(crate) fn commit_gas_count_for_l1_batch( .sum(); // Boojum upgrade changes how storage writes are communicated/compressed. - let state_diff_size = if header.protocol_version.unwrap().is_pre_boojum() { + let is_pre_boojum = header + .protocol_version + .map(|v| v.is_pre_boojum()) + .unwrap_or(true); + let state_diff_size = if is_pre_boojum { metadata.initial_writes_compressed.len() as u32 + metadata.repeated_writes_compressed.len() as u32 } else { diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index 231ed4c88a9..39a8645767d 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -111,7 +111,7 @@ pub async fn ensure_genesis_state( vec![], H256::zero(), H256::zero(), - *protocol_version, + protocol_version.is_pre_boojum(), ); save_genesis_l1_batch_metadata( diff --git a/core/lib/zksync_core/src/metadata_calculator/mod.rs b/core/lib/zksync_core/src/metadata_calculator/mod.rs index 8392511858a..7289347fec0 100644 --- a/core/lib/zksync_core/src/metadata_calculator/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/mod.rs @@ -185,6 +185,11 @@ impl MetadataCalculator { events_queue_commitment: Option, bootloader_initial_content_commitment: Option, ) -> L1BatchMetadata { + let is_pre_boojum = header + .protocol_version + .map(|v| v.is_pre_boojum()) + .unwrap_or(true); + let merkle_root_hash = tree_metadata.root_hash; let commitment = L1BatchCommitment::new( @@ -199,12 +204,12 @@ impl MetadataCalculator { tree_metadata.state_diffs, bootloader_initial_content_commitment.unwrap_or_default(), events_queue_commitment.unwrap_or_default(), - header.protocol_version.unwrap(), + is_pre_boojum, ); let commitment_hash = commitment.hash(); tracing::trace!("L1 batch commitment: {commitment:?}"); - let l2_l1_messages_compressed = if header.protocol_version.unwrap().is_pre_boojum() { + let l2_l1_messages_compressed = if is_pre_boojum { commitment.l2_l1_logs_compressed().to_vec() } else { commitment.system_logs_compressed().to_vec() diff --git a/core/lib/zksync_core/src/metadata_calculator/updater.rs b/core/lib/zksync_core/src/metadata_calculator/updater.rs index 08652300434..ed38dae14ed 100644 --- a/core/lib/zksync_core/src/metadata_calculator/updater.rs +++ b/core/lib/zksync_core/src/metadata_calculator/updater.rs @@ -165,13 +165,17 @@ impl TreeUpdater { reestimate_gas_cost_latency.observe(); let save_postgres_latency = METRICS.start_stage(TreeUpdateStage::SavePostgres); + let is_pre_boojum = header + .protocol_version + .map(|v| v.is_pre_boojum()) + .unwrap_or(true); storage .blocks_dal() .save_l1_batch_metadata( l1_batch_number, &metadata, previous_root_hash, - header.protocol_version.unwrap(), + is_pre_boojum, ) .await .unwrap(); @@ -244,10 +248,18 @@ impl TreeUpdater { .blocks_dal() .get_events_queue(header.number) .await - .unwrap() .unwrap(); - let events_queue_commitment = - events_queue_commitment(&events_queue, header.protocol_version.unwrap()); + + let is_pre_boojum = header + .protocol_version + .map(|v| v.is_pre_boojum()) + .unwrap_or(true); + let events_queue_commitment = (!is_pre_boojum).then(|| { + let events_queue = + events_queue.expect("Events queue is required for post-boojum batch"); + events_queue_commitment(&events_queue, is_pre_boojum) + .expect("Events queue commitment is required for post-boojum batch") + }); events_queue_commitment_latency.observe(); let bootloader_commitment_latency = @@ -258,10 +270,8 @@ impl TreeUpdater { .await .unwrap() .unwrap(); - let bootloader_initial_content_commitment = bootloader_initial_content_commitment( - &initial_bootloader_contents, - header.protocol_version.unwrap(), - ); + let bootloader_initial_content_commitment = + bootloader_initial_content_commitment(&initial_bootloader_contents, is_pre_boojum); bootloader_commitment_latency.observe(); ( diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 634483e8421..0c13a7a614b 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -353,12 +353,7 @@ async fn test_miniblock_and_l1_batch_processing( // Save metadata for the genesis L1 batch so that we don't hang in `seal_l1_batch`. let metadata = create_l1_batch_metadata(0); conn.blocks_dal() - .save_l1_batch_metadata( - L1BatchNumber(0), - &metadata, - H256::zero(), - ProtocolVersionId::latest(), - ) + .save_l1_batch_metadata(L1BatchNumber(0), &metadata, H256::zero(), false) .await .unwrap(); drop(conn); diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 71d98fb6f73..89b8ce86e73 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -411,12 +411,7 @@ async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { let metadata = create_l1_batch_metadata(number); storage .blocks_dal() - .save_l1_batch_metadata( - L1BatchNumber(1), - &metadata, - H256::zero(), - ProtocolVersionId::latest(), - ) + .save_l1_batch_metadata(L1BatchNumber(1), &metadata, H256::zero(), false) .await .unwrap(); break; From 97f2c97224eb58cbd696dd0e941f33cb0eac2980 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Wed, 15 Nov 2023 13:47:59 +0100 Subject: [PATCH 013/115] chore(main): release core 18.0.1 (#493) :robot: I have created a release *beep* *boop* --- ## [18.0.1](https://github.com/matter-labs/zksync-era/compare/core-v18.0.0...core-v18.0.1) (2023-11-15) ### Bug Fixes * **metadata-calculator:** Do not require events_queue for old batches ([#492](https://github.com/matter-labs/zksync-era/issues/492)) ([0c454fc](https://github.com/matter-labs/zksync-era/commit/0c454fc6cdd1fb32074389643bd40c899983283f)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index d301bdd2b16..996ed1e81e8 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.0.0", + "core": "18.0.1", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 97539ee9365..466c6c9ec94 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [18.0.1](https://github.com/matter-labs/zksync-era/compare/core-v18.0.0...core-v18.0.1) (2023-11-15) + + +### Bug Fixes + +* **metadata-calculator:** Do not require events_queue for old batches ([#492](https://github.com/matter-labs/zksync-era/issues/492)) ([0c454fc](https://github.com/matter-labs/zksync-era/commit/0c454fc6cdd1fb32074389643bd40c899983283f)) + ## [18.0.0](https://github.com/matter-labs/zksync-era/compare/core-v17.1.0...core-v18.0.0) (2023-11-14) From cb873bd0da6b421160ce96b8d578f1351861f376 Mon Sep 17 00:00:00 2001 From: Tudor <32748771+RedaOps@users.noreply.github.com> Date: Wed, 15 Nov 2023 14:55:58 +0200 Subject: [PATCH 014/115] fix(api): `debug_trace*` no longer throws error if the `TracerConfig` object is incomplete (#468) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * Derived `Default` for `CallTracerConfig` * Allow default values for `tracer_config` field inside `TracerConfig` * Integration tests for this exact issue to confirm it is solved. * Running `zk test i api` will now also run the test suite for the debug namespace. (this is optional and not required in any way for the bug fix, so I can remove it) ## Why ❔ Check out the discussion in #451 The following `debug` namespace rpc methods would throw an error if you would include a `TracerConfig` as an argument but only specified the `tracer` and not also the `tracer_config` (see issue mentioned above for full description of the issue): * `debug_traceBlockByNumber` * `debug_traceBlockByHash` * `debug_traceCall` * `debug_traceTransaction` Some ETH nodes I tested do not throw an error in this case, so the zkSync node should behave the same way after this fix. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/types/src/api/mod.rs | 3 ++- core/tests/ts-integration/package.json | 2 +- core/tests/ts-integration/tests/api/debug.test.ts | 8 ++++++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 99b6964408f..f0cc7132831 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -627,7 +627,7 @@ pub enum SupportedTracers { CallTracer, } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Default)] #[serde(rename_all = "camelCase")] pub struct CallTracerConfig { pub only_top_call: bool, @@ -637,6 +637,7 @@ pub struct CallTracerConfig { #[serde(rename_all = "camelCase")] pub struct TracerConfig { pub tracer: SupportedTracers, + #[serde(default)] pub tracer_config: CallTracerConfig, } diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index ef6e4880aa0..63b51a6e179 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -7,7 +7,7 @@ "test": "zk f jest --forceExit --testTimeout 60000", "long-running-test": "zk f jest", "fee-test": "RUN_FEE_TEST=1 zk f jest -- fees.test.ts", - "api-test": "zk f jest -- api/web3.test.ts", + "api-test": "zk f jest -- api/web3.test.ts api/debug.test.ts", "contract-verification-test": "zk f jest -- api/contract-verification.test.ts", "build": "hardhat compile", "build-yul": "hardhat run scripts/compile-yul.ts" diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts index adf0bf31a04..b4585789fb1 100644 --- a/core/tests/ts-integration/tests/api/debug.test.ts +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -63,6 +63,9 @@ describe('Debug methods', () => { let blockCallTrace = await testMaster .mainAccount() .provider.send('debug_traceBlockByNumber', [receipt.blockNumber.toString(16)]); + let blockCallTrace_tracer = await testMaster + .mainAccount() + .provider.send('debug_traceBlockByNumber', [receipt.blockNumber.toString(16), { tracer: 'callTracer' }]); let expected = { error: null, from: ethers.constants.AddressZero, @@ -78,6 +81,7 @@ describe('Debug methods', () => { }; for (let i = 0; i < blockCallTrace.length; i++) { expect(blockCallTrace[i]).toEqual({ result: expected }); + expect(blockCallTrace[i]).toEqual(blockCallTrace_tracer[i]); } expected = { error: null, @@ -97,7 +101,11 @@ describe('Debug methods', () => { calls: expect.any(Array) }; let txCallTrace = await testMaster.mainAccount().provider.send('debug_traceTransaction', [tx.hash]); + let txCallTrace_tracer = await testMaster + .mainAccount() + .provider.send('debug_traceTransaction', [tx.hash, { tracer: 'callTracer' }]); expect(txCallTrace).toEqual(expected); + expect(txCallTrace).toEqual(txCallTrace_tracer); }); afterAll(async () => { From 38bb4823c7b5e0e651d9f531feede66c24afd19f Mon Sep 17 00:00:00 2001 From: Akash <112477155+akash-chandrakar@users.noreply.github.com> Date: Thu, 16 Nov 2023 13:43:40 +0400 Subject: [PATCH 015/115] fix(crypto): update shivini to switch to era-cuda (#469) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ update shivini dependency to switch to era-cuda ## Why ❔ * take latest fixes from shivini(gpu prover) that includes implementation of batch FRI query ## Checklist - [* ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [* ] Tests for the changes have been added / updated. - [* ] Documentation comments have been added / updated. - [* ] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: zksync-admin-bot2 <91326834+zksync-admin-bot2@users.noreply.github.com> --- prover/Cargo.lock | 136 ++++++++---------- .../src/gpu_prover_job_processor.rs | 3 +- prover/setup-data-gpu-keys.json | 6 +- .../src/setup_data_generator.rs | 3 + 4 files changed, 68 insertions(+), 80 deletions(-) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 0af717daff5..38ea58ac436 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -77,7 +77,7 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.11", "once_cell", "version_check", ] @@ -628,7 +628,7 @@ dependencies = [ "const_format", "convert_case 0.6.0", "crossbeam 0.8.2", - "crypto-bigint 0.5.3", + "crypto-bigint 0.5.4", "cs_derive 0.1.0 (git+https://github.com/matter-labs/era-boojum?branch=main)", "derivative", "ethereum-types 0.14.1", @@ -652,7 +652,7 @@ dependencies = [ [[package]] name = "boojum-cuda" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#af269f667e0609f08a9cf303f330461ed5cd00ba" +source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#6731d9a8d0c439a4fa3cde14b494c9eca642bb2e" dependencies = [ "boojum", "cmake", @@ -742,11 +742,10 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "0f8e7c90afad890484a21653d08b6e209ae34770fb5ee298f9c699fcc1e5c856" dependencies = [ - "jobserver", "libc", ] @@ -872,18 +871,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.7" +version = "4.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac495e00dcec98c83465d5ad66c5c4fabd652fd6686e7c6269b117e729a6f17b" +checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.4.7" +version = "4.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77ed9a32a62e6ca27175d00d29d05ca32e396ea1eb5fb01d8256b669cec7663" +checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc" dependencies = [ "anstyle", "clap_lex", @@ -1056,7 +1055,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.7", + "clap 4.4.8", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -1252,9 +1251,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" +checksum = "28f85c3514d2a6e64160359b45a3918c3b4178bcbf4ae5d03ab2d02e521c479a" dependencies = [ "rand_core 0.6.4", "subtle", @@ -1335,7 +1334,7 @@ dependencies = [ [[package]] name = "cudart" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#af269f667e0609f08a9cf303f330461ed5cd00ba" +source = "git+https://github.com/matter-labs/era-cuda?branch=main#492f2afad93ad156ac0cfb69e6efbfc03a7a2652" dependencies = [ "bitflags 2.4.1", "criterion", @@ -1345,7 +1344,7 @@ dependencies = [ [[package]] name = "cudart-sys" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-boojum-cuda?branch=main#af269f667e0609f08a9cf303f330461ed5cd00ba" +source = "git+https://github.com/matter-labs/era-cuda?branch=main#492f2afad93ad156ac0cfb69e6efbfc03a7a2652" dependencies = [ "bindgen 0.69.1", "serde_json", @@ -1590,9 +1589,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece" dependencies = [ "humantime", "is-terminal", @@ -1618,9 +1617,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" +checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e" dependencies = [ "libc", "windows-sys", @@ -2071,9 +2070,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if 1.0.0", "libc", @@ -2231,9 +2230,9 @@ checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" [[package]] name = "handlebars" -version = "4.4.0" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c39b3bc2a8f715298032cf5087e58573809374b08160aa7d750582bdb82d2683" +checksum = "faa67bab9ff362228eb3d00bd024a4965d8231bbb7921167f0cfa66c6626b225" dependencies = [ "log", "pest", @@ -2395,9 +2394,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "f95b9abcae896730d42b78e09c155ed4ddf82c07b4de772c64aee5b2d8b7c150" dependencies = [ "bytes", "fnv", @@ -2655,15 +2654,6 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" -[[package]] -name = "jobserver" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" -dependencies = [ - "libc", -] - [[package]] name = "js-sys" version = "0.3.65" @@ -2818,9 +2808,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "local-ip-address" @@ -3448,7 +3438,7 @@ dependencies = [ [[package]] name = "pairing_ce" version = "0.28.5" -source = "git+https://github.com/matter-labs/pairing.git#d06c2a112913b0abfb75996cc29a6b6075717e99" +source = "git+https://github.com/matter-labs/pairing.git#f55393fd366596eac792d78525d26e9c4d6ed1ca" dependencies = [ "byteorder", "cfg-if 1.0.0", @@ -3941,9 +3931,9 @@ dependencies = [ [[package]] name = "proptest" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", @@ -3953,7 +3943,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "rand_xorshift 0.3.0", - "regex-syntax 0.7.5", + "regex-syntax 0.8.2", "rusty-fork", "tempfile", "unarray", @@ -4158,7 +4148,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.11", ] [[package]] @@ -4303,7 +4293,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.11", "libredox", "thiserror", ] @@ -4346,12 +4336,6 @@ version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" -[[package]] -name = "regex-syntax" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" - [[package]] name = "regex-syntax" version = "0.8.2" @@ -4479,7 +4463,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" dependencies = [ "cc", - "getrandom 0.2.10", + "getrandom 0.2.11", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -4591,9 +4575,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ "base64 0.21.5", ] @@ -4776,9 +4760,9 @@ dependencies = [ [[package]] name = "sentry" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0097a48cd1999d983909f07cb03b15241c5af29e5e679379efac1c06296abecc" +checksum = "6ce4b57f1b521f674df7a1d200be8ff5d74e3712020ee25b553146657b5377d5" dependencies = [ "httpdate", "native-tls", @@ -4795,9 +4779,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a7b80fa1dd6830a348d38a8d3a9761179047757b7dca29aef82db0118b9670" +checksum = "58cc8d4e04a73de8f718dc703943666d03f25d3e9e4d0fb271ca0b8c76dfa00e" dependencies = [ "backtrace", "once_cell", @@ -4807,9 +4791,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7615dc588930f1fd2e721774f25844ae93add2dbe2d3c2f995ce5049af898147" +checksum = "6436c1bad22cdeb02179ea8ef116ffc217797c028927def303bc593d9320c0d1" dependencies = [ "hostname", "libc", @@ -4821,9 +4805,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f51264e4013ed9b16558cce43917b983fa38170de2ca480349ceb57d71d6053" +checksum = "901f761681f97db3db836ef9e094acdd8756c40215326c194201941947164ef1" dependencies = [ "once_cell", "rand 0.8.5", @@ -4834,9 +4818,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fe6180fa564d40bb942c9f0084ffb5de691c7357ead6a2b7a3154fae9e401dd" +checksum = "afdb263e73d22f39946f6022ed455b7561b22ff5553aca9be3c6a047fa39c328" dependencies = [ "findshlibs", "once_cell", @@ -4845,9 +4829,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323160213bba549f9737317b152af116af35c0410f4468772ee9b606d3d6e0fa" +checksum = "74fbf1c163f8b6a9d05912e1b272afa27c652e8b47ea60cb9a57ad5e481eea99" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4855,9 +4839,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38033822128e73f7b6ca74c1631cef8868890c6cb4008a291cf73530f87b4eac" +checksum = "82eabcab0a047040befd44599a1da73d3adb228ff53b5ed9795ae04535577704" dependencies = [ "sentry-backtrace", "sentry-core", @@ -4867,9 +4851,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e663b3eb62ddfc023c9cf5432daf5f1a4f6acb1df4d78dd80b740b32dd1a740" +checksum = "da956cca56e0101998c8688bc65ce1a96f00673a0e58e663664023d4c7911e82" dependencies = [ "debugid", "hex", @@ -5071,7 +5055,7 @@ dependencies = [ [[package]] name = "shivini" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-shivini.git?branch=main#2a40d9376bdb92d563e1eacaddcbcfaedd09de89" +source = "git+https://github.com/matter-labs/era-shivini.git?branch=main#bb3d4ad1fa454d7be54a819cc0c16561806c49fe" dependencies = [ "bincode", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -5156,9 +5140,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.1" +version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" dependencies = [ "serde", ] @@ -5661,9 +5645,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" dependencies = [ "backtrace", "bytes", @@ -5680,9 +5664,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2 1.0.69", "quote 1.0.33", @@ -6613,7 +6597,7 @@ dependencies = [ "codegen 0.2.0", "crossbeam 0.8.2", "derivative", - "env_logger 0.10.0", + "env_logger 0.10.1", "hex", "num-bigint 0.4.4", "num-integer", @@ -6640,7 +6624,7 @@ dependencies = [ "codegen 0.2.0", "crossbeam 0.8.2", "derivative", - "env_logger 0.10.0", + "env_logger 0.10.1", "hex", "rand 0.4.6", "rayon", diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/prover_fri/src/gpu_prover_job_processor.rs index c624c040ce3..ec022d419d4 100644 --- a/prover/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/prover_fri/src/gpu_prover_job_processor.rs @@ -145,7 +145,7 @@ pub mod gpu_prover { NoPow, _, >( - assembly, + &assembly, &witness_vector, proof_config, &setup_data.setup, @@ -166,6 +166,7 @@ pub mod gpu_prover { started_at.elapsed(), "circuit_type" => circuit_id.to_string() ); + let proof = proof.into(); verify_proof( &prover_job.circuit_wrapper, &proof, diff --git a/prover/setup-data-gpu-keys.json b/prover/setup-data-gpu-keys.json index 1a5eb452059..295d43ddaa4 100644 --- a/prover/setup-data-gpu-keys.json +++ b/prover/setup-data-gpu-keys.json @@ -1,5 +1,5 @@ { - "us": "gs://matterlabs-setup-data-us/8a40dc3-gpu/", - "europe": "gs://matterlabs-setup-data-europe/8a40dc3-gpu/", - "asia": "gs://matterlabs-setup-data-asia/8a40dc3-gpu/" + "us": "gs://matterlabs-setup-data-us/5e22273-gpu/", + "europe": "gs://matterlabs-setup-data-europe/5e22273-gpu/", + "asia": "gs://matterlabs-setup-data-asia/5e22273-gpu/" } diff --git a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs index 7681ff1693e..354594a556a 100644 --- a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs +++ b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs @@ -186,10 +186,13 @@ fn generate_gpu_setup_data(is_base_layer: bool, numeric_circuit: u8) -> anyhow:: ) } }; + let worker = Worker::new(); let gpu_setup_data = GpuSetup::from_setup_and_hints( cpu_setup_data.setup_base, cpu_setup_data.setup_tree, cpu_setup_data.vars_hint.clone(), + cpu_setup_data.wits_hint, + &worker, ) .context("failed creating GPU base layer setup data")?; let gpu_prover_setup_data = GpuProverSetupData { From c3a7651987f6efaeca55ccf328e5aaaa5cc66bde Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 16 Nov 2023 12:44:03 +0200 Subject: [PATCH 016/115] fix(proof-data-handler): Check commitments only for post-boojum (#500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ `events_queue_commitment`, `bootloader_initial_content_commitment` are checked in proof-data-handler only for post-boojum batches. ## Why ❔ These values are not present for pre-boojum batches. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../proof_data_handler/request_processor.rs | 45 +++++++++++++------ 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index 0e3505ec351..53f3376938a 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -161,14 +161,34 @@ impl RequestProcessor { .unwrap() .expect("Proved block without metadata"); - let events_queue_state = l1_batch - .metadata - .events_queue_commitment - .expect("No events_queue_commitment"); - let bootloader_heap_initial_content = l1_batch - .metadata - .bootloader_initial_content_commitment - .expect("No bootloader_initial_content_commitment"); + let is_pre_boojum = l1_batch + .header + .protocol_version + .map(|v| v.is_pre_boojum()) + .unwrap_or(true); + if is_pre_boojum { + let events_queue_state = l1_batch + .metadata + .events_queue_commitment + .expect("No events_queue_commitment"); + let bootloader_heap_initial_content = l1_batch + .metadata + .bootloader_initial_content_commitment + .expect("No bootloader_initial_content_commitment"); + + if events_queue_state != events_queue_state_from_prover + || bootloader_heap_initial_content + != bootloader_heap_initial_content_from_prover + { + let server_values = format!("events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}"); + let prover_values = format!("events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}"); + panic!( + "Auxilary output doesn't match, server values: {} prover values: {}", + server_values, prover_values + ); + } + } + let system_logs = serialize_commitments(&l1_batch.header.system_logs); let system_logs_hash = H256(keccak256(&system_logs)); @@ -181,14 +201,11 @@ impl RequestProcessor { .0 .value; - if events_queue_state != events_queue_state_from_prover - || bootloader_heap_initial_content - != bootloader_heap_initial_content_from_prover - || state_diff_hash != state_diff_hash_from_prover + if state_diff_hash != state_diff_hash_from_prover || system_logs_hash != system_logs_hash_from_prover { - let server_values = format!("{system_logs_hash} {state_diff_hash} {events_queue_state} {bootloader_heap_initial_content}"); - let prover_values = format!("{system_logs_hash_from_prover} {state_diff_hash_from_prover} {events_queue_state_from_prover} {bootloader_heap_initial_content_from_prover}"); + let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); + let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); panic!( "Auxilary output doesn't match, server values: {} prover values: {}", server_values, prover_values From 87bd22693ec59b9346b92e96537e46c49515b378 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Thu, 16 Nov 2023 12:37:30 +0100 Subject: [PATCH 017/115] chore(main): release core 18.0.2 (#495) :robot: I have created a release *beep* *boop* --- ## [18.0.2](https://github.com/matter-labs/zksync-era/compare/core-v18.0.1...core-v18.0.2) (2023-11-16) ### Bug Fixes * **api:** `debug_trace*` no longer throws error if the `TracerConfig` object is incomplete ([#468](https://github.com/matter-labs/zksync-era/issues/468)) ([cb873bd](https://github.com/matter-labs/zksync-era/commit/cb873bd0da6b421160ce96b8d578f1351861f376)) * **proof-data-handler:** Check commitments only for post-boojum ([#500](https://github.com/matter-labs/zksync-era/issues/500)) ([c3a7651](https://github.com/matter-labs/zksync-era/commit/c3a7651987f6efaeca55ccf328e5aaaa5cc66bde)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 996ed1e81e8..985c54d1869 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.0.1", + "core": "18.0.2", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 466c6c9ec94..907ece3fca6 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## [18.0.2](https://github.com/matter-labs/zksync-era/compare/core-v18.0.1...core-v18.0.2) (2023-11-16) + + +### Bug Fixes + +* **api:** `debug_trace*` no longer throws error if the `TracerConfig` object is incomplete ([#468](https://github.com/matter-labs/zksync-era/issues/468)) ([cb873bd](https://github.com/matter-labs/zksync-era/commit/cb873bd0da6b421160ce96b8d578f1351861f376)) +* **proof-data-handler:** Check commitments only for post-boojum ([#500](https://github.com/matter-labs/zksync-era/issues/500)) ([c3a7651](https://github.com/matter-labs/zksync-era/commit/c3a7651987f6efaeca55ccf328e5aaaa5cc66bde)) + ## [18.0.1](https://github.com/matter-labs/zksync-era/compare/core-v18.0.0...core-v18.0.1) (2023-11-15) From ff636ca9250d0276098e4b5b4a5f7a44a0717d06 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 16 Nov 2023 14:29:09 +0200 Subject: [PATCH 018/115] fix(proof-data-handler): Check commitments only for post-boojum (again) (#502) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix bug ## Why ❔ Fix bug ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../lib/zksync_core/src/proof_data_handler/request_processor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index 53f3376938a..d82e66f0cf5 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -166,7 +166,7 @@ impl RequestProcessor { .protocol_version .map(|v| v.is_pre_boojum()) .unwrap_or(true); - if is_pre_boojum { + if !is_pre_boojum { let events_queue_state = l1_batch .metadata .events_queue_commitment From ecd68ca64c18e8733d79e1e3a81a6fdb87e5b5e5 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Thu, 16 Nov 2023 13:55:11 +0100 Subject: [PATCH 019/115] chore(main): release core 18.0.3 (#505) :robot: I have created a release *beep* *boop* --- ## [18.0.3](https://github.com/matter-labs/zksync-era/compare/core-v18.0.2...core-v18.0.3) (2023-11-16) ### Bug Fixes * **proof-data-handler:** Check commitments only for post-boojum (again) ([#502](https://github.com/matter-labs/zksync-era/issues/502)) ([ff636ca](https://github.com/matter-labs/zksync-era/commit/ff636ca9250d0276098e4b5b4a5f7a44a0717d06)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 985c54d1869..d34f7f5b5f7 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.0.2", + "core": "18.0.3", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 907ece3fca6..92c48c29573 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [18.0.3](https://github.com/matter-labs/zksync-era/compare/core-v18.0.2...core-v18.0.3) (2023-11-16) + + +### Bug Fixes + +* **proof-data-handler:** Check commitments only for post-boojum (again) ([#502](https://github.com/matter-labs/zksync-era/issues/502)) ([ff636ca](https://github.com/matter-labs/zksync-era/commit/ff636ca9250d0276098e4b5b4a5f7a44a0717d06)) + ## [18.0.2](https://github.com/matter-labs/zksync-era/compare/core-v18.0.1...core-v18.0.2) (2023-11-16) From f9ae0ad56b17fffa4b400ec2376517a2b630b862 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Thu, 16 Nov 2023 15:09:01 +0100 Subject: [PATCH 020/115] feat: added consensus types and consensus column to miniblocks table (#490) To execute consensus on miniblocks we need to store extra data per miniblock. --- Cargo.lock | 844 +++++++++++++++++- ...d_consensus_fields_for_miniblocks.down.sql | 2 + ...add_consensus_fields_for_miniblocks.up.sql | 2 + core/lib/dal/sqlx-data.json | 203 +++-- core/lib/dal/src/blocks_dal.rs | 23 +- core/lib/dal/src/models/storage_sync.rs | 9 +- core/lib/dal/src/sync_dal.rs | 35 +- core/lib/types/Cargo.toml | 9 + core/lib/types/build.rs | 12 + core/lib/types/src/api/en.rs | 5 +- core/lib/types/src/block.rs | 40 + core/lib/types/src/lib.rs | 2 + core/lib/types/src/proto/mod.proto | 10 + core/lib/types/src/proto/mod.rs | 2 + core/lib/zksync_core/Cargo.toml | 16 +- core/lib/zksync_core/build.rs | 12 + core/lib/zksync_core/src/consensus/mod.rs | 10 + core/lib/zksync_core/src/consensus/payload.rs | 92 ++ .../zksync_core/src/consensus/proto/mod.proto | 21 + .../zksync_core/src/consensus/proto/mod.rs | 2 + core/lib/zksync_core/src/sync_layer/tests.rs | 1 + 21 files changed, 1205 insertions(+), 147 deletions(-) create mode 100644 core/lib/dal/migrations/20231102144901_add_consensus_fields_for_miniblocks.down.sql create mode 100644 core/lib/dal/migrations/20231102144901_add_consensus_fields_for_miniblocks.up.sql create mode 100644 core/lib/types/build.rs create mode 100644 core/lib/types/src/proto/mod.proto create mode 100644 core/lib/types/src/proto/mod.rs create mode 100644 core/lib/zksync_core/build.rs create mode 100644 core/lib/zksync_core/src/consensus/mod.rs create mode 100644 core/lib/zksync_core/src/consensus/payload.rs create mode 100644 core/lib/zksync_core/src/consensus/proto/mod.proto create mode 100644 core/lib/zksync_core/src/consensus/proto/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 7c4e3c25d16..d98b5207003 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -223,6 +223,16 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array 0.14.7", +] + [[package]] name = "aes" version = "0.6.0" @@ -231,7 +241,18 @@ checksum = "884391ef1066acaa41e766ba8f596341b96e93ce34f9a43e7d24bf0a0eaf0561" dependencies = [ "aes-soft", "aesni", - "cipher", + "cipher 0.2.5", +] + +[[package]] +name = "aes" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +dependencies = [ + "cfg-if 1.0.0", + "cipher 0.4.4", + "cpufeatures", ] [[package]] @@ -242,8 +263,22 @@ checksum = "7729c3cde54d67063be556aeac75a81330d802f0259500ca40cb52967f975763" dependencies = [ "aes-soft", "aesni", - "cipher", - "ctr", + "cipher 0.2.5", + "ctr 0.6.0", +] + +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead", + "aes 0.8.3", + "cipher 0.4.4", + "ctr 0.9.2", + "ghash", + "subtle", ] [[package]] @@ -252,7 +287,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be14c7498ea50828a38d0e24a765ed2effe92a705885b57d029cd67d45744072" dependencies = [ - "cipher", + "cipher 0.2.5", "opaque-debug 0.3.0", ] @@ -262,7 +297,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea2e11f5e94c2f7d386164cc2aa1f97823fed6f259e486940a71c174dd01b0ce" dependencies = [ - "cipher", + "cipher 0.2.5", "opaque-debug 0.3.0", ] @@ -762,6 +797,15 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +[[package]] +name = "bitmaps" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" +dependencies = [ + "typenum", +] + [[package]] name = "bitvec" version = "0.20.4" @@ -894,7 +938,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57a0e8073e8baa88212fb5823574c02ebccb395136ba9a164ab89379ec6072f0" dependencies = [ "block-padding 0.2.1", - "cipher", + "cipher 0.2.5", ] [[package]] @@ -928,6 +972,18 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "blst" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + [[package]] name = "boojum" version = "0.1.0" @@ -1124,6 +1180,30 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if 1.0.0", + "cipher 0.4.4", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher 0.4.4", + "poly1305", + "zeroize", +] + [[package]] name = "chrono" version = "0.4.31" @@ -1175,6 +1255,17 @@ dependencies = [ "generic-array 0.14.7", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", + "zeroize", +] + [[package]] name = "circuit_definitions" version = "0.1.0" @@ -1696,6 +1787,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array 0.14.7", + "rand_core 0.6.4", "typenum", ] @@ -1748,7 +1840,16 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb4a30d54f7443bf3d6191dcd486aca19e67cb3c49fa7a06a319966346707e7f" dependencies = [ - "cipher", + "cipher 0.2.5", +] + +[[package]] +name = "ctr" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" +dependencies = [ + "cipher 0.4.4", ] [[package]] @@ -1761,6 +1862,34 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "platforms", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "darling" version = "0.13.4" @@ -1840,6 +1969,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +dependencies = [ + "const-oid 0.9.5", + "zeroize", +] + [[package]] name = "deranged" version = "0.3.9" @@ -1944,7 +2083,31 @@ dependencies = [ "der 0.6.1", "elliptic-curve", "rfc6979", - "signature", + "signature 1.6.4", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8 0.10.2", + "signature 2.2.0", +] + +[[package]] +name = "ed25519-dalek" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2 0.10.8", + "zeroize", ] [[package]] @@ -2196,6 +2359,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "fiat-crypto" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f69037fe1b785e84986b4f2cbcf647381876a00671d25ceef715d7812dd7e1dd" + [[package]] name = "findshlibs" version = "0.10.2" @@ -2244,6 +2413,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "flate2" version = "1.0.28" @@ -2539,6 +2714,16 @@ dependencies = [ "wasi 0.11.0+wasi-snapshot-preview1", ] +[[package]] +name = "ghash" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +dependencies = [ + "opaque-debug 0.3.0", + "polyval", +] + [[package]] name = "gimli" version = "0.28.0" @@ -3053,6 +3238,20 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "im" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0acd33ff0285af998aaf9b57342af478078f53492322fafc47450e09397e0e9" +dependencies = [ + "bitmaps", + "rand_core 0.6.4", + "rand_xoshiro", + "sized-chunks", + "typenum", + "version_check", +] + [[package]] name = "impl-codec" version = "0.5.1" @@ -3129,6 +3328,15 @@ dependencies = [ "hashbrown 0.14.2", ] +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array 0.14.7", +] + [[package]] name = "insta" version = "1.34.0" @@ -3724,6 +3932,38 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +[[package]] +name = "logos" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c000ca4d908ff18ac99b93a062cb8958d331c3220719c52e77cb19cc6ac5d2c1" +dependencies = [ + "logos-derive", +] + +[[package]] +name = "logos-codegen" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68" +dependencies = [ + "beef", + "fnv", + "proc-macro2 1.0.69", + "quote 1.0.33", + "regex-syntax 0.6.29", + "syn 2.0.38", +] + +[[package]] +name = "logos-derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbfc0d229f1f42d790440136d941afd806bc9e949e2bcb8faa813b0f00d1267e" +dependencies = [ + "logos-codegen", +] + [[package]] name = "mach" version = "0.3.2" @@ -3873,6 +4113,29 @@ dependencies = [ "sketches-ddsketch", ] +[[package]] +name = "miette" +version = "5.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59bb584eaeeab6bd0226ccf3509a69d7936d148cf3d036ad350abe35e8c6856e" +dependencies = [ + "miette-derive", + "once_cell", + "thiserror", + "unicode-width", +] + +[[package]] +name = "miette-derive" +version = "5.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "mime" version = "0.3.17" @@ -3974,6 +4237,12 @@ dependencies = [ "ws2_32-sys", ] +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + [[package]] name = "multivm" version = "0.1.0" @@ -4361,6 +4630,15 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + [[package]] name = "os_info" version = "3.7.0" @@ -4407,6 +4685,18 @@ dependencies = [ "serde", ] +[[package]] +name = "pairing_ce" +version = "0.28.5" +source = "git+https://github.com/matter-labs/pairing.git?rev=f55393f#f55393fd366596eac792d78525d26e9c4d6ed1ca" +dependencies = [ + "byteorder", + "cfg-if 1.0.0", + "ff_ce", + "rand 0.4.6", + "serde", +] + [[package]] name = "pairing_ce" version = "0.28.5" @@ -4425,7 +4715,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b92ea9ddac0d6e1db7c49991e7d397d34a9fd814b4c93cda53788e8eef94e35" dependencies = [ - "aes", + "aes 0.6.0", "aes-ctr", "block-modes", "digest 0.9.0", @@ -4674,6 +4964,16 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "petgraph" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +dependencies = [ + "fixedbitset", + "indexmap 2.0.2", +] + [[package]] name = "pin-project" version = "1.1.3" @@ -4738,12 +5038,28 @@ dependencies = [ "spki 0.6.0", ] +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der 0.7.8", + "spki 0.7.2", +] + [[package]] name = "pkg-config" version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +[[package]] +name = "platforms" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" + [[package]] name = "plotters" version = "0.3.5" @@ -4772,6 +5088,29 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug 0.3.0", + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "opaque-debug 0.3.0", + "universal-hash", +] + [[package]] name = "portable-atomic" version = "1.5.0" @@ -4929,28 +5268,125 @@ dependencies = [ ] [[package]] -name = "pulldown-cmark" -version = "0.9.3" +name = "prost" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" dependencies = [ - "bitflags 1.3.2", - "memchr", - "unicase", + "bytes 1.5.0", + "prost-derive", ] [[package]] -name = "quanta" -version = "0.9.3" +name = "prost-build" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" +checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" dependencies = [ - "crossbeam-utils 0.8.16", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", + "bytes 1.5.0", + "heck 0.4.1", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.38", + "tempfile", + "which", +] + +[[package]] +name = "prost-derive" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + +[[package]] +name = "prost-reflect" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" +dependencies = [ + "base64 0.21.5", + "logos", + "miette", + "once_cell", + "prost", + "prost-types", + "serde", + "serde-value", +] + +[[package]] +name = "prost-types" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" +dependencies = [ + "prost", +] + +[[package]] +name = "protox" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00bb76c5f6221de491fe2c8f39b106330bbd9762c6511119c07940e10eb9ff11" +dependencies = [ + "bytes 1.5.0", + "miette", + "prost", + "prost-reflect", + "prost-types", + "protox-parse", + "thiserror", +] + +[[package]] +name = "protox-parse" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4581f441c58863525a3e6bec7b8de98188cf75239a56c725a3e7288450a33f" +dependencies = [ + "logos", + "miette", + "prost-types", + "thiserror", +] + +[[package]] +name = "pulldown-cmark" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +dependencies = [ + "bitflags 1.3.2", + "memchr", + "unicase", +] + +[[package]] +name = "quanta" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" +dependencies = [ + "crossbeam-utils 0.8.16", + "libc", + "mach", + "once_cell", + "raw-cpuid", + "wasi 0.10.2+wasi-snapshot-preview1", "web-sys", "winapi 0.3.9", ] @@ -4971,6 +5407,15 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + [[package]] name = "quote" version = "0.6.13" @@ -5191,6 +5636,15 @@ dependencies = [ "rand_core 0.3.1", ] +[[package]] +name = "rand_xoshiro" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "raw-cpuid" version = "10.7.0" @@ -5598,7 +6052,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15" dependencies = [ - "cipher", + "cipher 0.2.5", ] [[package]] @@ -5857,6 +6311,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.189" @@ -6071,6 +6535,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "similar" version = "2.3.0" @@ -6089,6 +6562,16 @@ dependencies = [ "time", ] +[[package]] +name = "sized-chunks" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e" +dependencies = [ + "bitmaps", + "typenum", +] + [[package]] name = "skeptic" version = "0.13.7" @@ -6138,6 +6621,22 @@ dependencies = [ "rescue_poseidon 0.4.1 (git+https://github.com/matter-labs/rescue-poseidon.git?branch=poseidon2)", ] +[[package]] +name = "snow" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58021967fd0a5eeeb23b08df6cc244a4d4a5b4aec1d27c9e02fad1a58b4cd74e" +dependencies = [ + "aes-gcm", + "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", + "chacha20poly1305", + "curve25519-dalek", + "rand_core 0.6.4", + "rustc_version", + "sha2 0.10.8", + "subtle", +] + [[package]] name = "socket2" version = "0.4.10" @@ -6200,6 +6699,16 @@ dependencies = [ "der 0.6.1", ] +[[package]] +name = "spki" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +dependencies = [ + "base64ct", + "der 0.7.8", +] + [[package]] name = "splitmut" version = "0.2.1" @@ -6555,6 +7064,26 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "test-casing" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2378d657757969a2cec9ec4eb616be8f01be98c21c6467991f91cb182e4653b" +dependencies = [ + "test-casing-macro", +] + +[[package]] +name = "test-casing-macro" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cfbe7811249c4c914b06141b8ac0f2cee2733fb883d05eb19668a45fc60c3d5" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "test-log" version = "0.2.13" @@ -6611,6 +7140,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "tikv-jemalloc-sys" version = "0.5.4+5.3.0-patched" @@ -6705,9 +7243,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" dependencies = [ "backtrace", "bytes 1.5.0", @@ -6724,9 +7262,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2 1.0.69", "quote 1.0.33", @@ -7074,6 +7612,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unroll" version = "0.1.5" @@ -7418,6 +7966,18 @@ version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + [[package]] name = "whoami" version = "1.4.1" @@ -7690,6 +8250,20 @@ name = "zeroize" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.38", +] [[package]] name = "zk_evm" @@ -7937,6 +8511,24 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_concurrency" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "once_cell", + "pin-project", + "rand 0.8.5", + "sha3 0.10.8", + "thiserror", + "time", + "tokio", + "tracing", + "tracing-subscriber", + "vise", +] + [[package]] name = "zksync_config" version = "0.1.0" @@ -7946,6 +8538,150 @@ dependencies = [ "zksync_basic_types", ] +[[package]] +name = "zksync_consensus_bft" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "once_cell", + "rand 0.8.5", + "thiserror", + "tracing", + "vise", + "zksync_concurrency", + "zksync_consensus_crypto", + "zksync_consensus_network", + "zksync_consensus_roles", + "zksync_consensus_storage", + "zksync_consensus_utils", + "zksync_protobuf", +] + +[[package]] +name = "zksync_consensus_crypto" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "blst", + "ed25519-dalek", + "ff_ce", + "hex", + "pairing_ce 0.28.5 (git+https://github.com/matter-labs/pairing.git?rev=f55393f)", + "rand 0.4.6", + "rand 0.8.5", + "sha3 0.10.8", + "thiserror", + "tracing", +] + +[[package]] +name = "zksync_consensus_executor" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "prost", + "rand 0.8.5", + "tracing", + "vise", + "zksync_concurrency", + "zksync_consensus_bft", + "zksync_consensus_crypto", + "zksync_consensus_network", + "zksync_consensus_roles", + "zksync_consensus_storage", + "zksync_consensus_sync_blocks", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", +] + +[[package]] +name = "zksync_consensus_network" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "async-trait", + "im", + "once_cell", + "pin-project", + "prost", + "rand 0.8.5", + "snow", + "thiserror", + "tracing", + "vise", + "zksync_concurrency", + "zksync_consensus_crypto", + "zksync_consensus_roles", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", +] + +[[package]] +name = "zksync_consensus_roles" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "bit-vec", + "hex", + "prost", + "rand 0.8.5", + "serde", + "tracing", + "zksync_concurrency", + "zksync_consensus_crypto", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", +] + +[[package]] +name = "zksync_consensus_storage" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "async-trait", + "prost", + "rand 0.8.5", + "thiserror", + "tracing", + "zksync_concurrency", + "zksync_consensus_roles", + "zksync_protobuf", + "zksync_protobuf_build", +] + +[[package]] +name = "zksync_consensus_sync_blocks" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "thiserror", + "tracing", + "zksync_concurrency", + "zksync_consensus_network", + "zksync_consensus_roles", + "zksync_consensus_storage", + "zksync_consensus_utils", +] + +[[package]] +name = "zksync_consensus_utils" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "thiserror", + "zksync_concurrency", +] + [[package]] name = "zksync_contract_verifier" version = "0.1.0" @@ -8020,11 +8756,13 @@ dependencies = [ "num 0.3.1", "once_cell", "prometheus_exporter", + "prost", "rand 0.8.5", "reqwest", "serde", "serde_json", "tempfile", + "test-casing", "thiserror", "tokio", "tower", @@ -8034,7 +8772,11 @@ dependencies = [ "vlog", "zksync_circuit_breaker", "zksync_commitment_utils", + "zksync_concurrency", "zksync_config", + "zksync_consensus_executor", + "zksync_consensus_roles", + "zksync_consensus_storage", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -8044,6 +8786,8 @@ dependencies = [ "zksync_merkle_tree", "zksync_mini_merkle_tree", "zksync_object_store", + "zksync_protobuf", + "zksync_protobuf_build", "zksync_prover_utils", "zksync_queued_job_processor", "zksync_state", @@ -8254,6 +8998,43 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_protobuf" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "bit-vec", + "once_cell", + "prost", + "prost-reflect", + "quick-protobuf", + "rand 0.8.5", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", + "zksync_concurrency", + "zksync_protobuf_build", +] + +[[package]] +name = "zksync_protobuf_build" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +dependencies = [ + "anyhow", + "heck 0.4.1", + "prettyplease", + "proc-macro2 1.0.69", + "prost-build", + "prost-reflect", + "protox", + "quote 1.0.33", + "syn 2.0.38", +] + [[package]] name = "zksync_prover_utils" version = "0.1.0" @@ -8368,6 +9149,7 @@ dependencies = [ name = "zksync_types" version = "0.1.0" dependencies = [ + "anyhow", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono", "codegen 0.1.0", @@ -8377,6 +9159,7 @@ dependencies = [ "num_enum", "once_cell", "parity-crypto", + "prost", "rlp", "secp256k1 0.27.0", "serde", @@ -8389,8 +9172,11 @@ dependencies = [ "zk_evm 1.4.0", "zkevm_test_harness 1.3.3", "zksync_basic_types", + "zksync_consensus_roles", "zksync_contracts", "zksync_mini_merkle_tree", + "zksync_protobuf", + "zksync_protobuf_build", "zksync_system_constants", "zksync_utils", ] diff --git a/core/lib/dal/migrations/20231102144901_add_consensus_fields_for_miniblocks.down.sql b/core/lib/dal/migrations/20231102144901_add_consensus_fields_for_miniblocks.down.sql new file mode 100644 index 00000000000..701c5e60854 --- /dev/null +++ b/core/lib/dal/migrations/20231102144901_add_consensus_fields_for_miniblocks.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks + DROP COLUMN IF EXISTS consensus; diff --git a/core/lib/dal/migrations/20231102144901_add_consensus_fields_for_miniblocks.up.sql b/core/lib/dal/migrations/20231102144901_add_consensus_fields_for_miniblocks.up.sql new file mode 100644 index 00000000000..cdfd74990ea --- /dev/null +++ b/core/lib/dal/migrations/20231102144901_add_consensus_fields_for_miniblocks.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE miniblocks + ADD COLUMN consensus JSONB NULL; diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index b69e48b178e..e031fe7d671 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -4410,98 +4410,6 @@ }, "query": "UPDATE contract_verification_requests SET status = 'failed', updated_at = now(), error = $2, compilation_errors = $3, panic_message = $4 WHERE id = $1" }, - "5190fad25f0c476380af4013761d42ae97dbd55f87e38ceec33f8e148c5cbb14": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "l1_batch_number!", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "last_batch_miniblock?", - "ordinal": 2, - "type_info": "Int8" - }, - { - "name": "timestamp", - "ordinal": 3, - "type_info": "Int8" - }, - { - "name": "root_hash?", - "ordinal": 4, - "type_info": "Bytea" - }, - { - "name": "l1_gas_price", - "ordinal": 5, - "type_info": "Int8" - }, - { - "name": "l2_fair_gas_price", - "ordinal": 6, - "type_info": "Int8" - }, - { - "name": "bootloader_code_hash", - "ordinal": 7, - "type_info": "Bytea" - }, - { - "name": "default_aa_code_hash", - "ordinal": 8, - "type_info": "Bytea" - }, - { - "name": "virtual_blocks", - "ordinal": 9, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 10, - "type_info": "Bytea" - }, - { - "name": "protocol_version!", - "ordinal": 11, - "type_info": "Int4" - }, - { - "name": "fee_account_address?", - "ordinal": 12, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - null, - null, - false, - false, - false, - false, - true, - true, - false, - false, - true, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "\n SELECT miniblocks.number,\n COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\",\n (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.hash as \"root_hash?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version as \"protocol_version!\",\n l1_batches.fee_account_address as \"fee_account_address?\"\n FROM miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n WHERE miniblocks.number = $1\n " - }, "51cb712685991ffd600dce59f5ed8b5a1bfce8feed46ebd02471c43802e6e65a": { "describe": { "columns": [ @@ -6178,6 +6086,104 @@ }, "query": "SELECT * FROM call_traces WHERE tx_hash IN (SELECT hash FROM transactions WHERE miniblock_number = $1)" }, + "7947dd8e7d6c138146f7ebe6b1e89fcd494b2679ac4e9fcff6aa2b2944aeed50": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number!", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "last_batch_miniblock?", + "ordinal": 2, + "type_info": "Int8" + }, + { + "name": "timestamp", + "ordinal": 3, + "type_info": "Int8" + }, + { + "name": "root_hash?", + "ordinal": 4, + "type_info": "Bytea" + }, + { + "name": "l1_gas_price", + "ordinal": 5, + "type_info": "Int8" + }, + { + "name": "l2_fair_gas_price", + "ordinal": 6, + "type_info": "Int8" + }, + { + "name": "bootloader_code_hash", + "ordinal": 7, + "type_info": "Bytea" + }, + { + "name": "default_aa_code_hash", + "ordinal": 8, + "type_info": "Bytea" + }, + { + "name": "virtual_blocks", + "ordinal": 9, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 10, + "type_info": "Bytea" + }, + { + "name": "consensus", + "ordinal": 11, + "type_info": "Jsonb" + }, + { + "name": "protocol_version!", + "ordinal": 12, + "type_info": "Int4" + }, + { + "name": "fee_account_address?", + "ordinal": 13, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + null, + null, + false, + false, + false, + false, + true, + true, + false, + false, + true, + true, + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT miniblocks.number, COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\", (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as \"last_batch_miniblock?\", miniblocks.timestamp, miniblocks.hash as \"root_hash?\", miniblocks.l1_gas_price, miniblocks.l2_fair_gas_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, miniblocks.virtual_blocks, miniblocks.hash, miniblocks.consensus, miniblocks.protocol_version as \"protocol_version!\", l1_batches.fee_account_address as \"fee_account_address?\" FROM miniblocks LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number WHERE miniblocks.number = $1" + }, "79cdb4cdd3c47b3654e6240178985fb4b4420e0634f9482a6ef8169e90200b84": { "describe": { "columns": [ @@ -12321,6 +12327,19 @@ }, "query": "SELECT * FROM eth_txs WHERE id = $1" }, + "fcca1961f34082f7186de607b922fd608166c5af98031e4dcc8a056b89696dbe": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + } + }, + "query": "UPDATE miniblocks SET consensus = $2 WHERE number = $1" + }, "ff7ff36b86b0e8d1cd7280aa447baef172cb054ffe7e1d742c59bf09b4f414cb": { "describe": { "columns": [ diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 662cdf7b501..cd894212c3c 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -10,7 +10,7 @@ use sqlx::Row; use zksync_types::{ aggregated_operations::AggregatedActionType, - block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, + block::{BlockGasCount, ConsensusBlockFields, L1BatchHeader, MiniblockHeader}, commitment::{L1BatchMetadata, L1BatchWithMetadata}, Address, L1BatchNumber, LogQuery, MiniblockNumber, ProtocolVersionId, H256, MAX_GAS_PER_PUBDATA_BYTE, U256, @@ -466,6 +466,27 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Sets consensus-related fields for the specified miniblock. + pub async fn set_miniblock_consensus_fields( + &mut self, + miniblock_number: MiniblockNumber, + consensus: &ConsensusBlockFields, + ) -> anyhow::Result<()> { + let result = sqlx::query!( + "UPDATE miniblocks SET consensus = $2 WHERE number = $1", + miniblock_number.0 as i64, + serde_json::to_value(consensus).unwrap(), + ) + .execute(self.storage.conn()) + .await?; + + anyhow::ensure!( + result.rows_affected() == 1, + "Miniblock #{miniblock_number} is not present in Postgres" + ); + Ok(()) + } + pub async fn update_hashes( &mut self, number_and_hashes: &[(MiniblockNumber, H256)], diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 052fadcf60a..b49cfd98acc 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -2,8 +2,7 @@ use std::convert::TryInto; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::api::en::SyncBlock; -use zksync_types::Transaction; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, H256}; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction, H256}; #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageSyncBlock { @@ -22,6 +21,7 @@ pub struct StorageSyncBlock { pub protocol_version: i32, pub virtual_blocks: i64, pub hash: Vec, + pub consensus: Option, } impl StorageSyncBlock { @@ -30,12 +30,14 @@ impl StorageSyncBlock { current_operator_address: Address, transactions: Option>, ) -> SyncBlock { + let number = self.number; + SyncBlock { number: MiniblockNumber(self.number as u32), l1_batch_number: L1BatchNumber(self.l1_batch_number as u32), last_in_batch: self .last_batch_miniblock - .map(|n| n == self.number) + .map(|n| n == number) .unwrap_or(false), timestamp: self.timestamp as u64, root_hash: self.root_hash.as_deref().map(H256::from_slice), @@ -60,6 +62,7 @@ impl StorageSyncBlock { virtual_blocks: Some(self.virtual_blocks as u32), hash: Some(H256::from_slice(&self.hash)), protocol_version: (self.protocol_version as u16).try_into().unwrap(), + consensus: self.consensus.map(|v| serde_json::from_value(v).unwrap()), } } } diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index ab905dd5cb2..7b7c1359414 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -23,24 +23,23 @@ impl SyncDal<'_, '_> { let latency = MethodLatency::new("sync_dal_sync_block"); let storage_block_details = sqlx::query_as!( StorageSyncBlock, - r#" - SELECT miniblocks.number, - COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as "l1_batch_number!", - (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as "last_batch_miniblock?", - miniblocks.timestamp, - miniblocks.hash as "root_hash?", - miniblocks.l1_gas_price, - miniblocks.l2_fair_gas_price, - miniblocks.bootloader_code_hash, - miniblocks.default_aa_code_hash, - miniblocks.virtual_blocks, - miniblocks.hash, - miniblocks.protocol_version as "protocol_version!", - l1_batches.fee_account_address as "fee_account_address?" - FROM miniblocks - LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number - WHERE miniblocks.number = $1 - "#, + "SELECT miniblocks.number, \ + COALESCE(miniblocks.l1_batch_number, (SELECT (max(number) + 1) FROM l1_batches)) as \"l1_batch_number!\", \ + (SELECT max(m2.number) FROM miniblocks m2 WHERE miniblocks.l1_batch_number = m2.l1_batch_number) as \"last_batch_miniblock?\", \ + miniblocks.timestamp, \ + miniblocks.hash as \"root_hash?\", \ + miniblocks.l1_gas_price, \ + miniblocks.l2_fair_gas_price, \ + miniblocks.bootloader_code_hash, \ + miniblocks.default_aa_code_hash, \ + miniblocks.virtual_blocks, \ + miniblocks.hash, \ + miniblocks.consensus, \ + miniblocks.protocol_version as \"protocol_version!\", \ + l1_batches.fee_account_address as \"fee_account_address?\" \ + FROM miniblocks \ + LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number \ + WHERE miniblocks.number = $1", block_number.0 as i64 ) .instrument("sync_dal_sync_block.block") diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 35ccb4a9370..117cbdcec8d 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -10,6 +10,8 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] readme = "README.md" +links = "zksync_types_proto" + [dependencies] zksync_system_constants = { path = "../constants" } zksync_utils = { path = "../utils" } @@ -21,10 +23,14 @@ codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } zk_evm_1_4_0 = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0", package = "zk_evm" } zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +anyhow = "1.0.75" chrono = { version = "0.4", features = ["serde"] } num = { version = "0.3.1", features = ["serde"] } once_cell = "1.7" +prost = "0.12.1" rlp = "0.5" serde = "1.0.90" serde_json = "1.0.0" @@ -47,3 +53,6 @@ ethereum_types_old = { package = "ethereum-types", version = "0.12.0" } secp256k1 = { version = "0.27", features = ["recovery"] } tokio = { version = "1", features = ["rt", "macros"] } serde_with = { version = "1", features = ["hex"] } + +[build-dependencies] +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } diff --git a/core/lib/types/build.rs b/core/lib/types/build.rs new file mode 100644 index 00000000000..464a905e47a --- /dev/null +++ b/core/lib/types/build.rs @@ -0,0 +1,12 @@ +//! Generates rust code from protobufs. +fn main() { + zksync_protobuf_build::Config { + input_root: "src/proto".into(), + proto_root: "zksync/types".into(), + dependencies: vec!["::zksync_consensus_roles::proto".parse().unwrap()], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: true, + } + .generate() + .expect("generate()"); +} diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 18a83f9f821..aa3d2955e2e 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -5,7 +5,7 @@ use zk_evm::ethereum_types::Address; use zksync_basic_types::{L1BatchNumber, MiniblockNumber, H256}; use zksync_contracts::BaseSystemContractsHashes; -use crate::ProtocolVersionId; +use crate::{block::ConsensusBlockFields, ProtocolVersionId}; /// Representation of the L2 block, as needed for the EN synchronization. /// This structure has several fields that describe *L1 batch* rather than @@ -44,4 +44,7 @@ pub struct SyncBlock { pub hash: Option, /// Version of the protocol used for this block. pub protocol_version: ProtocolVersionId, + /// Consensus-related information about the block. Not present if consensus is not enabled + /// for the environment. + pub consensus: Option, } diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 1896fe0eb50..762733f8e21 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,10 +1,13 @@ +use anyhow::Context as _; use serde::{Deserialize, Serialize}; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use std::{fmt, ops}; use zksync_basic_types::{H2048, H256, U256}; +use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContractsHashes; +use zksync_protobuf::{read_required, ProtoFmt}; use crate::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, @@ -85,6 +88,43 @@ pub struct MiniblockHeader { pub virtual_blocks: u32, } +/// Consensus-related L2 block (= miniblock) fields. +#[derive(Debug, Clone)] +pub struct ConsensusBlockFields { + /// Hash of the previous consensus block. + pub parent: validator::BlockHeaderHash, + /// Quorum certificate for the block. + pub justification: validator::CommitQC, +} + +impl ProtoFmt for ConsensusBlockFields { + type Proto = crate::proto::ConsensusBlockFields; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + parent: read_required(&r.parent).context("parent")?, + justification: read_required(&r.justification).context("justification")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + parent: Some(self.parent.build()), + justification: Some(self.justification.build()), + } + } +} + +impl Serialize for ConsensusBlockFields { + fn serialize(&self, s: S) -> Result { + zksync_protobuf::serde::serialize(self, s) + } +} + +impl<'de> Deserialize<'de> for ConsensusBlockFields { + fn deserialize>(d: D) -> Result { + zksync_protobuf::serde::deserialize(d) + } +} + /// Data needed to execute a miniblock in the VM. #[derive(Debug)] pub struct MiniblockExecutionData { diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 4715a2f86da..22904eb71b8 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -62,6 +62,8 @@ pub mod utils; pub mod vk_transform; pub mod vm_version; +mod proto; + /// Denotes the first byte of the special zkSync's EIP-712-signed transaction. pub const EIP_712_TX_TYPE: u8 = 0x71; diff --git a/core/lib/types/src/proto/mod.proto b/core/lib/types/src/proto/mod.proto new file mode 100644 index 00000000000..2fc03e285d3 --- /dev/null +++ b/core/lib/types/src/proto/mod.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package zksync.types; + +import "zksync/roles/validator.proto"; + +message ConsensusBlockFields { + optional roles.validator.BlockHeaderHash parent = 1; + optional roles.validator.CommitQC justification = 2; +} diff --git a/core/lib/types/src/proto/mod.rs b/core/lib/types/src/proto/mod.rs new file mode 100644 index 00000000000..660bf4c5b4c --- /dev/null +++ b/core/lib/types/src/proto/mod.rs @@ -0,0 +1,2 @@ +#![allow(warnings)] +include!(concat!(env!("OUT_DIR"), "/src/proto/gen.rs")); diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index 4d84757b6fa..6c8e43763fd 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_core" version = "0.1.0" -edition = "2018" +edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" @@ -39,7 +39,14 @@ zksync_health_check = { path = "../health_check" } vlog = { path = "../vlog" } multivm = { path = "../multivm" } +# Consensus dependenices +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +prost = "0.12.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" itertools = "0.10.3" @@ -84,8 +91,11 @@ actix-web = "4.0.0-beta.8" tracing = "0.1.26" [dev-dependencies] - -assert_matches = "1.5" zksync_test_account = { path = "../test_account" } +assert_matches = "1.5" tempfile = "3.0.2" +test-casing = "0.1.2" + +[build-dependencies] +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } diff --git a/core/lib/zksync_core/build.rs b/core/lib/zksync_core/build.rs new file mode 100644 index 00000000000..7e8cc45bb8c --- /dev/null +++ b/core/lib/zksync_core/build.rs @@ -0,0 +1,12 @@ +//! Generates rust code from protobufs. +fn main() { + zksync_protobuf_build::Config { + input_root: "src/consensus/proto".into(), + proto_root: "zksync/core/consensus".into(), + dependencies: vec![], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: false, + } + .generate() + .expect("generate()"); +} diff --git a/core/lib/zksync_core/src/consensus/mod.rs b/core/lib/zksync_core/src/consensus/mod.rs new file mode 100644 index 00000000000..08a02e1dd2a --- /dev/null +++ b/core/lib/zksync_core/src/consensus/mod.rs @@ -0,0 +1,10 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, time}; +use zksync_consensus_roles::validator; +use zksync_types::block::ConsensusBlockFields; +use zksync_types::{Address, MiniblockNumber}; + +mod payload; +mod proto; + +pub(crate) use payload::Payload; diff --git a/core/lib/zksync_core/src/consensus/payload.rs b/core/lib/zksync_core/src/consensus/payload.rs new file mode 100644 index 00000000000..818d63d7414 --- /dev/null +++ b/core/lib/zksync_core/src/consensus/payload.rs @@ -0,0 +1,92 @@ +use anyhow::Context as _; +use zksync_consensus_roles::validator; +use zksync_protobuf::{required, ProtoFmt}; +use zksync_types::api::en::SyncBlock; +use zksync_types::{Address, L1BatchNumber, Transaction, H256}; + +pub(crate) struct Payload { + pub hash: H256, + pub l1_batch_number: L1BatchNumber, + pub timestamp: u64, + pub l1_gas_price: u64, + pub l2_fair_gas_price: u64, + pub virtual_blocks: u32, + pub operator_address: Address, + pub transactions: Vec, +} + +impl ProtoFmt for Payload { + type Proto = super::proto::Payload; + fn read(r: &Self::Proto) -> anyhow::Result { + let mut transactions = vec![]; + for (i, t) in r.transactions.iter().enumerate() { + transactions.push( + required(&t.json) + .and_then(|s| Ok(serde_json::from_str(&*s)?)) + .with_context(|| format!("transaction[{i}]"))?, + ); + } + Ok(Self { + hash: required(&r.hash) + .and_then(|h| Ok(<[u8; 32]>::try_from(h.as_slice())?.into())) + .context("hash")?, + l1_batch_number: L1BatchNumber( + *required(&r.l1_batch_number).context("l1_batch_number")?, + ), + timestamp: *required(&r.timestamp).context("timestamp")?, + l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, + l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, + virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, + operator_address: required(&r.operator_address) + .and_then(|a| Ok(<[u8; 20]>::try_from(a.as_slice())?.into())) + .context("operator_address")?, + transactions, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + hash: Some(self.hash.as_bytes().into()), + l1_batch_number: Some(self.l1_batch_number.0), + timestamp: Some(self.timestamp), + l1_gas_price: Some(self.l1_gas_price), + l2_fair_gas_price: Some(self.l2_fair_gas_price), + virtual_blocks: Some(self.virtual_blocks), + operator_address: Some(self.operator_address.as_bytes().into()), + // Transactions are stored in execution order, therefore order is deterministic. + transactions: self + .transactions + .iter() + .map(|t| super::proto::Transaction { + // TODO: There is no guarantee that json encoding here will be deterministic. + json: Some(serde_json::to_string(t).unwrap()), + }) + .collect(), + } + } +} + +impl TryFrom for Payload { + type Error = anyhow::Error; + fn try_from(block: SyncBlock) -> anyhow::Result { + Ok(Self { + hash: block.hash.unwrap_or_default(), + l1_batch_number: block.l1_batch_number, + timestamp: block.timestamp, + l1_gas_price: block.l1_gas_price, + l2_fair_gas_price: block.l2_fair_gas_price, + virtual_blocks: block.virtual_blocks.unwrap_or(0), + operator_address: block.operator_address, + transactions: block.transactions.context("Transactions are required")?, + }) + } +} + +impl Payload { + pub fn decode(p: &validator::Payload) -> anyhow::Result { + zksync_protobuf::decode(&p.0) + } + + pub fn encode(&self) -> validator::Payload { + validator::Payload(zksync_protobuf::encode(self)) + } +} diff --git a/core/lib/zksync_core/src/consensus/proto/mod.proto b/core/lib/zksync_core/src/consensus/proto/mod.proto new file mode 100644 index 00000000000..6199585899d --- /dev/null +++ b/core/lib/zksync_core/src/consensus/proto/mod.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package zksync.core.consensus; + +message Transaction { + // Default derive(serde::Serialize) encoding of the zksync_types::Transaction. + // TODO(gprusak): it is neither efficient, unique, nor suitable for version control. + // replace with a more robust encoding. + optional string json = 1; // required +} + +message Payload { + optional bytes hash = 1; // required; H256 + optional uint32 l1_batch_number = 2; // required + optional uint64 timestamp = 3; // required; seconds since UNIX epoch + optional uint64 l1_gas_price = 4; // required; gwei + optional uint64 l2_fair_gas_price = 5; // required; gwei + optional uint32 virtual_blocks = 6; // required + optional bytes operator_address = 7; // required; H160 + repeated Transaction transactions = 8; +} diff --git a/core/lib/zksync_core/src/consensus/proto/mod.rs b/core/lib/zksync_core/src/consensus/proto/mod.rs new file mode 100644 index 00000000000..e6ac37696c2 --- /dev/null +++ b/core/lib/zksync_core/src/consensus/proto/mod.rs @@ -0,0 +1,2 @@ +#![allow(warnings)] +include!(concat!(env!("OUT_DIR"), "/src/consensus/proto/gen.rs")); diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 89b8ce86e73..576cb56dd7d 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -73,6 +73,7 @@ impl MockMainNodeClient { virtual_blocks: Some(!is_fictive as u32), hash: Some(H256::repeat_byte(1)), protocol_version: ProtocolVersionId::latest(), + consensus: None, } }); From 0ac4a4ddb87d7728a99a29df9adeded5822e1def Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Thu, 16 Nov 2023 15:22:28 +0100 Subject: [PATCH 021/115] feat(core): adds a get proof endpoint in zks namespace to http endpoint on main node (#504) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Support for `zks_getProof` was accedentally only implemented for WS apis in #455, this PR deploys them also to HTTP endpoints. --- core/lib/web3_decl/src/namespaces/zks.rs | 10 +++++++++- .../web3/backend_jsonrpsee/namespaces/zks.rs | 13 ++++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index b56368f116c..d3bf43b9a97 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -5,7 +5,7 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use zksync_types::{ api::{ - BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, ProtocolVersion, + BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, TransactionDetails, }, fee::Fee, @@ -110,4 +110,12 @@ pub trait ZksNamespace { #[method(name = "getLogsWithVirtualBlocks")] async fn get_logs_with_virtual_blocks(&self, filter: Filter) -> RpcResult>; + + #[method(name = "getProof")] + async fn get_proof( + &self, + address: Address, + keys: Vec, + l1_batch_number: L1BatchNumber, + ) -> RpcResult; } diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index ee675cc9e2c..6b6ed67c3c6 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -4,7 +4,7 @@ use std::collections::HashMap; use zksync_types::{ api::{ - BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, ProtocolVersion, + BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, TransactionDetails, }, fee::Fee, @@ -158,4 +158,15 @@ impl ZksNamespaceServer for ZksNa .await .map_err(into_jsrpc_error) } + + async fn get_proof( + &self, + address: Address, + keys: Vec, + l1_batch_number: L1BatchNumber, + ) -> RpcResult { + self.get_proofs_impl(address, keys, l1_batch_number) + .await + .map_err(into_jsrpc_error) + } } From 0ff922dadd0d8686c9487471a6f70fd2837bb659 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 16 Nov 2023 20:15:37 +0200 Subject: [PATCH 022/115] chore(upgrade-tool): Extend upgrade tool to work with new governance (#507) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Extends upgrade tool to be able to work with new governance. Tool was tested locally. ## Why ❔ New governance mechanism is being introduced. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- infrastructure/protocol-upgrade/README.md | 6 +- .../protocol-upgrade/src/transaction.ts | 207 +++++++++++++----- 2 files changed, 163 insertions(+), 50 deletions(-) diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md index 595bea8a84f..6636dff8b0f 100644 --- a/infrastructure/protocol-upgrade/README.md +++ b/infrastructure/protocol-upgrade/README.md @@ -213,7 +213,8 @@ $ zk f yarn start transactions build-default \ --l2-upgrader-address \ --diamond-upgrade-proposal-id \ --l1rpc \ ---zksync-address +--zksync-address \ +--use-new-governance ``` To execute the `proposeTransparentUpgrade` transaction on L1, use the following command: @@ -225,6 +226,7 @@ $ zk f yarn start transactions propose-upgrade \ --gas-price \ --nonce \ --zksync-address \ +--new-governance \ --environment ``` @@ -237,6 +239,7 @@ $ zk f yarn start transactions execute-upgrade \ --gas-price \ --nonce \ --zksync-address \ +--new-governance \ --environment ``` @@ -249,5 +252,6 @@ $ zk f yarn start transactions cancel-upgrade \ --zksync-address \ --gas-price \ --nonce \ +--new-governance \ --environment ``` diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index 3d92127a8b4..9162da1c46f 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -1,7 +1,11 @@ import { BigNumberish } from '@ethersproject/bignumber'; import { BytesLike, ethers } from 'ethers'; import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-zksync-contracts/typechain'; -import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1 } from 'l1-zksync-contracts/typechain'; +import { + DefaultUpgradeFactory as DefaultUpgradeFactoryL1, + AdminFacetFactory, + GovernanceFactory +} from 'l1-zksync-contracts/typechain'; import { FacetCut } from 'l1-zksync-contracts/src.ts/diamondCut'; import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; import { ComplexUpgrader__factory } from '../../../etc/system-contracts/typechain-types'; @@ -163,7 +167,7 @@ export function prepareDefaultCalldataForL2upgrade(forcedDeployments: ForceDeplo return complexUpgraderCalldata; } -export function prepareproposeTransparentUpgradeCalldata( +export function prepareProposeTransparentUpgradeCalldata( initCalldata, upgradeAddress: string, facetCuts: FacetCut[], @@ -185,7 +189,56 @@ export function prepareproposeTransparentUpgradeCalldata( transparentUpgrade, ethers.constants.HashZero ]); - return [proposeTransparentUpgradeCalldata, executeUpgradeCalldata, transparentUpgrade]; + return { + transparentUpgrade, + proposeTransparentUpgradeCalldata, + executeUpgradeCalldata + }; +} + +export function prepareTransparentUpgradeCalldataForNewGovernance( + initCalldata, + upgradeAddress: string, + facetCuts: FacetCut[], + zksyncAddress: string +) { + let transparentUpgrade: TransparentUpgrade = { + facetCuts, + initAddress: upgradeAddress, + initCalldata + }; + + // Prepare calldata for upgrading diamond proxy + let adminFacet = new AdminFacetFactory(); + const diamondProxyUpgradeCalldata = adminFacet.interface.encodeFunctionData('executeUpgrade', [transparentUpgrade]); + + const call = { + target: zksyncAddress, + value: 0, + data: diamondProxyUpgradeCalldata + }; + const governanceOperation = { + calls: [call], + predecessor: ethers.constants.HashZero, + salt: ethers.constants.HashZero + }; + + const governance = new GovernanceFactory(); + // Get transaction data of the `scheduleTransparent` + const scheduleTransparentOperation = governance.interface.encodeFunctionData('scheduleTransparent', [ + governanceOperation, + 0 // delay + ]); + + // Get transaction data of the `execute` + const executeOperation = governance.interface.encodeFunctionData('execute', [governanceOperation]); + + return { + scheduleTransparentOperation, + executeOperation, + governanceOperation, + transparentUpgrade + }; } export function buildDefaultUpgradeTx( @@ -194,7 +247,9 @@ export function buildDefaultUpgradeTx( upgradeAddress, l2UpgraderAddress, upgradeTimestamp, - newAllowList + newAllowList, + zksyncAddress, + useNewGovernance ) { const commonData = JSON.parse(fs.readFileSync(getCommonDataFileName(), { encoding: 'utf-8' })); const protocolVersion = commonData.protocolVersion; @@ -261,13 +316,22 @@ export function buildDefaultUpgradeTx( let l1upgradeCalldata = prepareDefaultCalldataForL1upgrade(proposeUpgradeTx); - let [proposeTransparentUpgradeCalldata, executeUpgradeCalldata, transparentUpgrade] = - prepareproposeTransparentUpgradeCalldata( + let upgradeData; + if (useNewGovernance) { + upgradeData = prepareTransparentUpgradeCalldataForNewGovernance( + l1upgradeCalldata, + upgradeAddress, + facetCuts, + zksyncAddress + ); + } else { + upgradeData = prepareProposeTransparentUpgradeCalldata( l1upgradeCalldata, upgradeAddress, facetCuts, diamondUpgradeProposalId ); + } const transactions = { proposeUpgradeTx, l1upgradeCalldata, @@ -275,9 +339,7 @@ export function buildDefaultUpgradeTx( protocolVersion, diamondUpgradeProposalId, upgradeTimestamp, - proposeTransparentUpgradeCalldata, - transparentUpgrade, - executeUpgradeCalldata + ...upgradeData }; fs.writeFileSync(getL2TransactionsFileName(environment), JSON.stringify(transactions, null, 2)); @@ -288,17 +350,16 @@ async function sendTransaction( calldata: BytesLike, privateKey: string, l1rpc: string, - zksyncAddress: string, + to: string, environment: string, gasPrice: ethers.BigNumber, nonce: number ) { const wallet = getWallet(l1rpc, privateKey); - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; gasPrice = gasPrice ?? (await wallet.provider.getGasPrice()); nonce = nonce ?? (await wallet.getTransactionCount()); const tx = await wallet.sendTransaction({ - to: zksyncAddress, + to, data: calldata, value: 0, gasLimit: 10_000_000, @@ -330,20 +391,21 @@ async function proposeUpgrade( zksyncAddress: string, environment: string, gasPrice: ethers.BigNumber, - nonce: number + nonce: number, + newGovernanceAddress: string ) { const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - const proposeTransparentUpgradeCalldata = transactions.proposeTransparentUpgradeCalldata; + let to; + let calldata; + if (newGovernanceAddress != null) { + to = newGovernanceAddress; + calldata = transactions.scheduleTransparentOperation; + } else { + to = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; + calldata = transactions.proposeTransparentUpgradeCalldata; + } console.log(`Proposing upgrade for protocolVersion ${transactions.protocolVersion}`); - await sendTransaction( - proposeTransparentUpgradeCalldata, - privateKey, - l1rpc, - zksyncAddress, - environment, - gasPrice, - nonce - ); + await sendTransaction(calldata, privateKey, l1rpc, to, environment, gasPrice, nonce); } async function executeUpgrade( @@ -352,12 +414,21 @@ async function executeUpgrade( zksyncAddress: string, environment: string, gasPrice: ethers.BigNumber, - nonce: number + nonce: number, + newGovernanceAddress: string ) { const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - const executeUpgradeCalldata = transactions.executeUpgradeCalldata; + let to; + let calldata; + if (newGovernanceAddress != null) { + to = newGovernanceAddress; + calldata = transactions.executeOperation; + } else { + to = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; + calldata = transactions.executeUpgradeCalldata; + } console.log(`Execute upgrade for protocolVersion ${transactions.protocolVersion}`); - await sendTransaction(executeUpgradeCalldata, privateKey, l1rpc, zksyncAddress, environment, gasPrice, nonce); + await sendTransaction(calldata, privateKey, l1rpc, to, environment, gasPrice, nonce); } async function cancelUpgrade( @@ -367,28 +438,57 @@ async function cancelUpgrade( environment: string, gasPrice: ethers.BigNumber, nonce: number, - execute: boolean + execute: boolean, + newGovernanceAddress: string ) { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - let wallet = getWallet(l1rpc, privateKey); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); + if (newGovernanceAddress != null) { + let wallet = getWallet(l1rpc, privateKey); + const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); + + let governance = GovernanceFactory.connect(newGovernanceAddress, wallet); + const operation = transactions.governanceOperation; + + const operationId = await governance.hashOperation(operation); + + console.log(`Cancel upgrade operation with id: ${operationId}`); + if (execute) { + const tx = await governance.cancel(operationId); + await tx.wait(); + console.log('Operation canceled'); + } else { + const calldata = governance.interface.encodeFunctionData('cancel', [operationId]); + console.log(`Cancel upgrade calldata: ${calldata}`); + } + } else { + zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; + let wallet = getWallet(l1rpc, privateKey); + let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); + const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - const transparentUpgrade = transactions.transparentUpgrade; - const diamondUpgradeProposalId = transactions.diamondUpgradeProposalId; + const transparentUpgrade = transactions.transparentUpgrade; + const diamondUpgradeProposalId = transactions.diamondUpgradeProposalId; - const proposalHash = await zkSync.upgradeProposalHash( - transparentUpgrade, - diamondUpgradeProposalId, - ethers.constants.HashZero - ); + const proposalHash = await zkSync.upgradeProposalHash( + transparentUpgrade, + diamondUpgradeProposalId, + ethers.constants.HashZero + ); - console.log(`Cancel upgrade with hash: ${proposalHash}`); - let cancelUpgradeCalldata = zkSync.interface.encodeFunctionData('cancelUpgradeProposal', [proposalHash]); - if (execute) { - await sendTransaction(cancelUpgradeCalldata, privateKey, l1rpc, zksyncAddress, environment, gasPrice, nonce); - } else { - console.log(`Cancel upgrade calldata: ${cancelUpgradeCalldata}`); + console.log(`Cancel upgrade with hash: ${proposalHash}`); + let cancelUpgradeCalldata = zkSync.interface.encodeFunctionData('cancelUpgradeProposal', [proposalHash]); + if (execute) { + await sendTransaction( + cancelUpgradeCalldata, + privateKey, + l1rpc, + zksyncAddress, + environment, + gasPrice, + nonce + ); + } else { + console.log(`Cancel upgrade calldata: ${cancelUpgradeCalldata}`); + } } } @@ -421,9 +521,10 @@ command .option('--diamond-upgrade-proposal-id ') .option('--l1rpc ') .option('--zksync-address ') + .option('--use-new-governance') .action(async (options) => { let diamondUpgradeProposalId = options.diamondUpgradeProposalId; - if (!diamondUpgradeProposalId) { + if (!diamondUpgradeProposalId && !options.useNewGovernance) { diamondUpgradeProposalId = await getNewDiamondUpgradeProposalId(options.l1rpc, options.zksyncAddress); } @@ -433,7 +534,9 @@ command options.upgradeAddress, options.l2UpgraderAddress, options.upgradeTimestamp, - options.newAllowList + options.newAllowList, + options.zksyncAddress, + options.useNewGovernance ); }); @@ -445,6 +548,7 @@ command .option('--gas-price ') .option('--nonce ') .option('--l1rpc ') + .option('--new-governance ') .action(async (options) => { await proposeUpgrade( options.privateKey, @@ -452,7 +556,8 @@ command options.zksyncAddress, options.environment, options.gasPrice, - options.nonce + options.nonce, + options.newGovernance ); }); @@ -464,6 +569,7 @@ command .option('--gas-price ') .option('--nonce ') .option('--l1rpc ') + .option('--new-governance ') .action(async (options) => { await executeUpgrade( options.privateKey, @@ -471,7 +577,8 @@ command options.zksyncAddress, options.environment, options.gasPrice, - options.nonce + options.nonce, + options.newGovernance ); }); @@ -484,6 +591,7 @@ command .option('--nonce ') .option('--l1rpc ') .option('--execute') + .option('--new-governance ') .action(async (options) => { await cancelUpgrade( options.privateKey, @@ -492,6 +600,7 @@ command options.environment, options.gasPrice, options.nonce, - options.execute + options.execute, + options.newGovernance ); }); From 851e800721a627742d6781d6162009d61f83c1af Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 17 Nov 2023 10:51:55 +0100 Subject: [PATCH 023/115] feat: Adds `prover_group_id` label into `fri_prover_prover_job` metric (#503) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds `group_id` label into `fri_prover_prover_job` metric. Adds `FriProverGroupConfig::get_group_id_for_circuit_id_and_aggregation_round` function. Fixes .githooks/pre-commit syntax. Formats the repo. ## Why ❔ This is needed to get usable metrics for auto scaling fri_prover jobs. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .githooks/pre-commit | 2 +- core/bin/zksync_server/src/main.rs | 2 + .../config/src/configs/fri_prover_group.rs | 29 +++++++ core/lib/env_config/src/fri_prover_group.rs | 86 +++++++++++++++++++ core/lib/types/src/event.rs | 13 ++- .../house_keeper/fri_prover_queue_monitor.rs | 20 ++++- core/lib/zksync_core/src/lib.rs | 5 ++ core/lib/zksync_core/src/temp_config_store.rs | 2 + 8 files changed, 147 insertions(+), 12 deletions(-) diff --git a/.githooks/pre-commit b/.githooks/pre-commit index c7f7de77cf4..1f0c6b945b6 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -7,7 +7,7 @@ RED='\033[0;31m' NC='\033[0m' # No Color # Check that Rust formatting rules are not violated. -function check_fmt { +check_fmt () { if ! cargo fmt -- --check; then echo -e "${RED}Commit error!${NC}" echo "Please format the code via 'cargo fmt', cannot commit unformatted code" diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 75b44660e6e..f2aed9c75c2 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -10,6 +10,7 @@ use zksync_config::{ CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, StateKeeperConfig, }, + fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, PrometheusConfig, ProofDataHandlerConfig, ProverGroupConfig, WitnessGeneratorConfig, @@ -108,6 +109,7 @@ async fn main() -> anyhow::Result<()> { house_keeper_config: HouseKeeperConfig::from_env().ok(), fri_proof_compressor_config: FriProofCompressorConfig::from_env().ok(), fri_prover_config: FriProverConfig::from_env().ok(), + fri_prover_group_config: FriProverGroupConfig::from_env().ok(), fri_witness_generator_config: FriWitnessGeneratorConfig::from_env().ok(), prometheus_config: PrometheusConfig::from_env().ok(), proof_data_handler_config: ProofDataHandlerConfig::from_env().ok(), diff --git a/core/lib/config/src/configs/fri_prover_group.rs b/core/lib/config/src/configs/fri_prover_group.rs index 966a4b324db..71ed5d1f7d9 100644 --- a/core/lib/config/src/configs/fri_prover_group.rs +++ b/core/lib/config/src/configs/fri_prover_group.rs @@ -40,6 +40,35 @@ impl FriProverGroupConfig { } } + pub fn get_group_id_for_circuit_id_and_aggregation_round( + &self, + circuit_id: u8, + aggregation_round: u8, + ) -> Option { + let configs = [ + &self.group_0, + &self.group_1, + &self.group_2, + &self.group_3, + &self.group_4, + &self.group_5, + &self.group_6, + &self.group_7, + &self.group_8, + &self.group_9, + &self.group_10, + &self.group_11, + &self.group_12, + ]; + configs + .iter() + .enumerate() + .find(|(_, group)| { + group.contains(&CircuitIdRoundTuple::new(circuit_id, aggregation_round)) + }) + .map(|(group_id, _)| group_id as u8) + } + pub fn get_all_circuit_ids(&self) -> Vec { (0..13) .filter_map(|group_id| self.get_circuit_ids_for_group_id(group_id)) diff --git a/core/lib/env_config/src/fri_prover_group.rs b/core/lib/env_config/src/fri_prover_group.rs index 373febb6abb..32c2b453158 100644 --- a/core/lib/env_config/src/fri_prover_group.rs +++ b/core/lib/env_config/src/fri_prover_group.rs @@ -244,4 +244,90 @@ mod tests { let actual = FriProverGroupConfig::from_env().unwrap(); assert_eq!(actual, expected_config()); } + + #[test] + fn get_group_id_for_circuit_id_and_aggregation_round() { + let fri_prover_group_config = expected_config(); + + assert_eq!( + Some(0), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(1, 3) + ); + assert_eq!( + Some(0), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(2, 2) + ); + + assert_eq!( + Some(1), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(1, 0) + ); + + assert_eq!( + Some(2), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(2, 0) + ); + assert_eq!( + Some(2), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(4, 0) + ); + + assert_eq!( + Some(3), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(3, 0) + ); + + assert_eq!( + Some(4), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(11, 0) + ); + assert_eq!( + Some(4), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(12, 0) + ); + + assert_eq!( + Some(5), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(5, 0) + ); + + assert_eq!( + Some(6), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(3, 1) + ); + + assert_eq!( + Some(7), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(7, 0) + ); + + assert_eq!( + Some(8), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(8, 0) + ); + + assert_eq!( + Some(9), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(12, 1) + ); + + assert_eq!( + Some(10), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(10, 0) + ); + + assert_eq!( + Some(11), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(7, 1) + ); + + assert_eq!( + Some(12), + fri_prover_group_config.get_group_id_for_circuit_id_and_aggregation_round(4, 1) + ); + + assert!(fri_prover_group_config + .get_group_id_for_circuit_id_and_aggregation_round(19, 0) + .is_none()); + } } diff --git a/core/lib/types/src/event.rs b/core/lib/types/src/event.rs index c901ac8b0f4..285567c8911 100644 --- a/core/lib/types/src/event.rs +++ b/core/lib/types/src/event.rs @@ -249,14 +249,11 @@ pub fn extract_l2tol1logs_from_l1_messenger( && event.indexed_topics[0] == l1_messenger_l2_to_l1_log_event_signature }) .map(|event| { - let tuple = ethabi::decode( - params, - &event.value, - ) - .expect("Failed to decode L2ToL1LogSent message") - .first() - .unwrap() - .clone(); + let tuple = ethabi::decode(params, &event.value) + .expect("Failed to decode L2ToL1LogSent message") + .first() + .unwrap() + .clone(); let Token::Tuple(tokens) = tuple else { panic!("Tuple was expected, got: {}", tuple); }; diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index 58beb9a3b34..e0763377177 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use zksync_config::configs::fri_prover_group::FriProverGroupConfig; use zksync_dal::ConnectionPool; use zksync_prover_utils::periodic_job::PeriodicJob; @@ -6,13 +7,19 @@ use zksync_prover_utils::periodic_job::PeriodicJob; pub struct FriProverStatsReporter { reporting_interval_ms: u64, prover_connection_pool: ConnectionPool, + config: FriProverGroupConfig, } impl FriProverStatsReporter { - pub fn new(reporting_interval_ms: u64, prover_connection_pool: ConnectionPool) -> Self { + pub fn new( + reporting_interval_ms: u64, + prover_connection_pool: ConnectionPool, + config: FriProverGroupConfig, + ) -> Self { Self { reporting_interval_ms, prover_connection_pool, + config, } } } @@ -27,12 +34,18 @@ impl PeriodicJob for FriProverStatsReporter { let stats = conn.fri_prover_jobs_dal().get_prover_jobs_stats().await; for ((circuit_id, aggregation_round), stats) in stats.into_iter() { + let group_id = self + .config + .get_group_id_for_circuit_id_and_aggregation_round(circuit_id, aggregation_round) + .unwrap(); + metrics::gauge!( "fri_prover.prover.jobs", stats.queued as f64, "type" => "queued", "circuit_id" => circuit_id.to_string(), - "aggregation_round" => aggregation_round.to_string() + "aggregation_round" => aggregation_round.to_string(), + "prover_group_id" => group_id.to_string(), ); metrics::gauge!( @@ -40,7 +53,8 @@ impl PeriodicJob for FriProverStatsReporter { stats.in_progress as f64, "type" => "in_progress", "circuit_id" => circuit_id.to_string(), - "aggregation_round" => aggregation_round.to_string() + "aggregation_round" => aggregation_round.to_string(), + "prover_group_id" => group_id.to_string(), ); } diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index be7e75dbc1e..c12ea4625c4 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -1184,9 +1184,14 @@ async fn add_house_keeper_to_task_futures( ); task_futures.push(tokio::spawn(fri_witness_generator_stats_reporter.run())); + let fri_prover_group_config = configs + .fri_prover_group_config + .clone() + .context("fri_prover_group_config")?; let fri_prover_stats_reporter = FriProverStatsReporter::new( house_keeper_config.fri_prover_stats_reporting_interval_ms, prover_connection_pool.clone(), + fri_prover_group_config, ); task_futures.push(tokio::spawn(fri_prover_stats_reporter.run())); diff --git a/core/lib/zksync_core/src/temp_config_store.rs b/core/lib/zksync_core/src/temp_config_store.rs index e2c243b1d3a..d1cd9f32670 100644 --- a/core/lib/zksync_core/src/temp_config_store.rs +++ b/core/lib/zksync_core/src/temp_config_store.rs @@ -5,6 +5,7 @@ use zksync_config::{ CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, StateKeeperConfig, }, + fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, PrometheusConfig, ProofDataHandlerConfig, ProverGroupConfig, WitnessGeneratorConfig, @@ -30,6 +31,7 @@ pub struct TempConfigStore { pub house_keeper_config: Option, pub fri_proof_compressor_config: Option, pub fri_prover_config: Option, + pub fri_prover_group_config: Option, pub fri_witness_generator_config: Option, pub prometheus_config: Option, pub proof_data_handler_config: Option, From e8bbf767bacc0acc1e8ad4a571a0d185f91173d8 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 17 Nov 2023 13:10:23 +0200 Subject: [PATCH 024/115] test(merkle tree): Fix flaky `MerkleTree` consistency test (#475) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix duplicate_leaf_index_error test, which [sometimes fails in CI](https://github.com/matter-labs/zksync-era/actions/runs/6848926146/job/18620092974?pr=432). ## Why ❔ Failing CI costs time and money. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/merkle_tree/src/consistency.rs | 34 ++++++++++++------- .../src/metadata_calculator/tests.rs | 2 +- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/core/lib/merkle_tree/src/consistency.rs b/core/lib/merkle_tree/src/consistency.rs index afe0111f9a3..85896bad1ae 100644 --- a/core/lib/merkle_tree/src/consistency.rs +++ b/core/lib/merkle_tree/src/consistency.rs @@ -256,6 +256,7 @@ impl AtomicBitSet { #[cfg(test)] mod tests { use assert_matches::assert_matches; + use rayon::ThreadPoolBuilder; use std::num::NonZeroU64; @@ -290,7 +291,16 @@ mod tests { #[test] fn basic_consistency_checks() { let db = prepare_database(); - MerkleTree::new(db).verify_consistency(0).unwrap(); + deterministic_verify_consistency(db).unwrap(); + } + + /// Limits the number of `rayon` threads to 1 in order to get deterministic test execution. + fn deterministic_verify_consistency(db: PatchSet) -> Result<(), ConsistencyError> { + let thread_pool = ThreadPoolBuilder::new() + .num_threads(1) + .build() + .expect("failed initializing `rayon` thread pool"); + thread_pool.install(|| MerkleTree::new(db).verify_consistency(0)) } #[test] @@ -298,7 +308,7 @@ mod tests { let mut db = prepare_database(); db.manifest_mut().version_count = 0; - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!(err, ConsistencyError::MissingVersion(0)); } @@ -307,7 +317,7 @@ mod tests { let mut db = prepare_database(); db.remove_root(0); - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!(err, ConsistencyError::MissingRoot(0)); } @@ -321,7 +331,7 @@ mod tests { let leaf_key = leaf_key.unwrap(); db.remove_node(&leaf_key); - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!( err, ConsistencyError::MissingNode { key, is_leaf: true } if key == leaf_key @@ -338,7 +348,7 @@ mod tests { }; *leaf_count = NonZeroU64::new(42).unwrap(); - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!( err, ConsistencyError::LeafCountMismatch { @@ -363,7 +373,7 @@ mod tests { let child_ref = node.child_ref_mut(0xd).unwrap(); child_ref.hash = ValueHash::zero(); - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!( err, ConsistencyError::HashMismatch { @@ -388,7 +398,7 @@ mod tests { }); let leaf_key = leaf_key.unwrap(); - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!( err, ConsistencyError::FullKeyMismatch { key, full_key } @@ -409,7 +419,7 @@ mod tests { }); leaf_key.unwrap(); - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!( err, ConsistencyError::LeafIndexOverflow { @@ -430,7 +440,7 @@ mod tests { } } - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!(err, ConsistencyError::DuplicateLeafIndex { index: 1, .. }); } @@ -446,7 +456,7 @@ mod tests { }); let node_key = node_key.unwrap(); - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!(err, ConsistencyError::EmptyInternalNode { key } if key == node_key); } @@ -463,7 +473,7 @@ mod tests { }); let node_key = node_key.unwrap(); - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!( err, ConsistencyError::KeyVersionMismatch { key, expected_version: 1 } if key == node_key @@ -483,7 +493,7 @@ mod tests { let (nibble, _) = node.children().next().unwrap(); node.child_ref_mut(nibble).unwrap().version = 42; - let err = MerkleTree::new(db).verify_consistency(0).unwrap_err(); + let err = deterministic_verify_consistency(db).unwrap_err(); assert_matches!( err, ConsistencyError::RootVersionMismatch { diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index dd72410f950..5e86db6087b 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -24,7 +24,7 @@ use super::{ }; use crate::genesis::{ensure_genesis_state, GenesisParams}; -const RUN_TIMEOUT: Duration = Duration::from_secs(15); +const RUN_TIMEOUT: Duration = Duration::from_secs(30); async fn run_with_timeout(timeout: Duration, action: F) -> T where From 146e4cf2f8d890ff0a8d33229e224442e14be437 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 17 Nov 2023 13:46:19 +0200 Subject: [PATCH 025/115] feat(merkle tree): Allow random-order tree recovery (#485) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Allow recovery the Merkle tree from entries provided in an arbitrary order. ## Why ❔ This is necessary to implement the snapshot recovery PoC and could be beneficial in the long run. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/merkle_tree/examples/recovery.rs | 29 ++++++-- core/lib/merkle_tree/src/recovery.rs | 31 +++++++-- core/lib/merkle_tree/src/storage/mod.rs | 28 +++++++- core/lib/merkle_tree/src/storage/tests.rs | 66 +++++++++++++++---- .../merkle_tree/tests/integration/recovery.rs | 43 +++++++++--- prover/Cargo.lock | 2 - 6 files changed, 162 insertions(+), 37 deletions(-) diff --git a/core/lib/merkle_tree/examples/recovery.rs b/core/lib/merkle_tree/examples/recovery.rs index 207499da8b4..af16ed05baf 100644 --- a/core/lib/merkle_tree/examples/recovery.rs +++ b/core/lib/merkle_tree/examples/recovery.rs @@ -23,6 +23,9 @@ struct Cli { /// Number of entries per update. #[arg(name = "ops")] writes_per_update: usize, + /// Perform random recovery instead of linear recovery. + #[arg(name = "random", long)] + random: bool, /// Use a no-op hashing function. #[arg(name = "no-hash", long)] no_hashing: bool, @@ -89,17 +92,29 @@ impl Cli { let started_at = Instant::now(); let recovery_entries = (0..self.writes_per_update) .map(|_| { - last_key += key_step - Key::from(rng.gen::()); - // ^ Increases the key by a random increment close to `key` step with some randomness. last_leaf_index += 1; - RecoveryEntry { - key: last_key, - value: ValueHash::zero(), - leaf_index: last_leaf_index, + if self.random { + RecoveryEntry { + key: Key::from(rng.gen::<[u8; 32]>()), + value: ValueHash::zero(), + leaf_index: last_leaf_index, + } + } else { + last_key += key_step - Key::from(rng.gen::()); + // ^ Increases the key by a random increment close to `key` step with some randomness. + RecoveryEntry { + key: last_key, + value: ValueHash::zero(), + leaf_index: last_leaf_index, + } } }) .collect(); - recovery.extend(recovery_entries); + if self.random { + recovery.extend_random(recovery_entries); + } else { + recovery.extend_linear(recovery_entries); + } tracing::info!( "Updated tree with recovery chunk #{updated_idx} in {:?}", started_at.elapsed() diff --git a/core/lib/merkle_tree/src/recovery.rs b/core/lib/merkle_tree/src/recovery.rs index 9700e401fa2..6f57b64ee81 100644 --- a/core/lib/merkle_tree/src/recovery.rs +++ b/core/lib/merkle_tree/src/recovery.rs @@ -137,7 +137,7 @@ impl MerkleTreeRecovery { storage.greatest_key() } - /// Extends a tree with a chunk of entries. + /// Extends a tree with a chunk of linearly ordered entries. /// /// Entries must be ordered by increasing `key`, and the key of the first entry must be greater /// than [`Self::last_processed_key()`]. @@ -154,12 +154,35 @@ impl MerkleTreeRecovery { %entries.key_range = entries_key_range(&entries), ), )] - pub fn extend(&mut self, entries: Vec) { + pub fn extend_linear(&mut self, entries: Vec) { tracing::debug!("Started extending tree"); let started_at = Instant::now(); let storage = Storage::new(&self.db, &self.hasher, self.recovered_version, false); - let patch = storage.extend_during_recovery(entries); + let patch = storage.extend_during_linear_recovery(entries); + tracing::debug!("Finished processing keys; took {:?}", started_at.elapsed()); + + let started_at = Instant::now(); + self.db.apply_patch(patch); + tracing::debug!("Finished persisting to DB; took {:?}", started_at.elapsed()); + } + + /// Extends a tree with a chunk of entries. Unlike [`Self::extend_linear()`], entries may be + /// ordered in any way you like. + #[tracing::instrument( + level = "debug", + skip_all, + fields( + recovered_version = self.recovered_version, + entries.len = entries.len(), + ), + )] + pub fn extend_random(&mut self, entries: Vec) { + tracing::debug!("Started extending tree"); + + let started_at = Instant::now(); + let storage = Storage::new(&self.db, &self.hasher, self.recovered_version, false); + let patch = storage.extend_during_random_recovery(entries); tracing::debug!("Finished processing keys; took {:?}", started_at.elapsed()); let started_at = Instant::now(); @@ -262,7 +285,7 @@ mod tests { value: ValueHash::repeat_byte(1), leaf_index: 1, }; - recovery.extend(vec![recovery_entry]); + recovery.extend_linear(vec![recovery_entry]); let tree = recovery.finalize(); assert_eq!(tree.latest_version(), Some(42)); diff --git a/core/lib/merkle_tree/src/storage/mod.rs b/core/lib/merkle_tree/src/storage/mod.rs index baea778cf93..c5a56abfca9 100644 --- a/core/lib/merkle_tree/src/storage/mod.rs +++ b/core/lib/merkle_tree/src/storage/mod.rs @@ -321,7 +321,10 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { Some(self.updater.load_greatest_key(self.db)?.0.full_key) } - pub fn extend_during_recovery(mut self, recovery_entries: Vec) -> PatchSet { + pub fn extend_during_linear_recovery( + mut self, + recovery_entries: Vec, + ) -> PatchSet { let (mut prev_key, mut prev_nibbles) = match self.updater.load_greatest_key(self.db) { Some((leaf, nibbles)) => (Some(leaf.full_key), nibbles), None => (None, Nibbles::EMPTY), @@ -353,6 +356,29 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { patch } + pub fn extend_during_random_recovery( + mut self, + recovery_entries: Vec, + ) -> PatchSet { + let load_nodes_latency = BLOCK_TIMINGS.load_nodes.start(); + let sorted_keys = SortedKeys::new(recovery_entries.iter().map(|entry| entry.key)); + let parent_nibbles = self.updater.load_ancestors(&sorted_keys, self.db); + let load_nodes_latency = load_nodes_latency.observe(); + tracing::debug!("Load stage took {load_nodes_latency:?}"); + + let extend_patch_latency = BLOCK_TIMINGS.extend_patch.start(); + for (entry, parent_nibbles) in recovery_entries.into_iter().zip(parent_nibbles) { + self.updater + .insert(entry.key, entry.value, &parent_nibbles, || entry.leaf_index); + self.leaf_count += 1; + } + let extend_patch_latency = extend_patch_latency.observe(); + tracing::debug!("Tree traversal stage took {extend_patch_latency:?}"); + + let (_, patch) = self.finalize(); + patch + } + fn finalize(self) -> (ValueHash, PatchSet) { tracing::debug!( "Finished updating tree; total leaf count: {}, stats: {:?}", diff --git a/core/lib/merkle_tree/src/storage/tests.rs b/core/lib/merkle_tree/src/storage/tests.rs index d00ed4d3e05..3ed0cbada52 100644 --- a/core/lib/merkle_tree/src/storage/tests.rs +++ b/core/lib/merkle_tree/src/storage/tests.rs @@ -510,7 +510,7 @@ fn recovery_flattens_node_versions() { leaf_index: i + 1, }); let patch = Storage::new(&PatchSet::default(), &(), recovery_version, false) - .extend_during_recovery(recovery_entries.collect()); + .extend_during_linear_recovery(recovery_entries.collect()); assert_eq!(patch.patches_by_version.len(), 1); let (updated_version, patch) = patch.patches_by_version.into_iter().next().unwrap(); assert_eq!(updated_version, recovery_version); @@ -546,7 +546,7 @@ fn test_recovery_with_node_hierarchy(chunk_size: usize) { let mut db = PatchSet::default(); for recovery_chunk in recovery_entries.chunks(chunk_size) { let patch = Storage::new(&db, &(), recovery_version, false) - .extend_during_recovery(recovery_chunk.to_vec()); + .extend_during_linear_recovery(recovery_chunk.to_vec()); db.apply_patch(patch); } assert_eq!(db.updated_version, Some(recovery_version)); @@ -605,7 +605,7 @@ fn test_recovery_with_deep_node_hierarchy(chunk_size: usize) { let mut db = PatchSet::default(); for recovery_chunk in recovery_entries.chunks(chunk_size) { let patch = Storage::new(&db, &(), recovery_version, false) - .extend_during_recovery(recovery_chunk.to_vec()); + .extend_during_linear_recovery(recovery_chunk.to_vec()); db.apply_patch(patch); } let mut patch = db.patches_by_version.remove(&recovery_version).unwrap(); @@ -673,7 +673,7 @@ fn recovery_workflow_with_multiple_stages() { leaf_index: i, }); let patch = Storage::new(&db, &(), recovery_version, false) - .extend_during_recovery(recovery_entries.collect()); + .extend_during_linear_recovery(recovery_entries.collect()); assert_eq!(patch.root(recovery_version).unwrap().leaf_count(), 100); db.apply_patch(patch); @@ -684,7 +684,7 @@ fn recovery_workflow_with_multiple_stages() { }); let patch = Storage::new(&db, &(), recovery_version, false) - .extend_during_recovery(more_recovery_entries.collect()); + .extend_during_linear_recovery(more_recovery_entries.collect()); assert_eq!(patch.root(recovery_version).unwrap().leaf_count(), 200); db.apply_patch(patch); @@ -701,6 +701,7 @@ fn recovery_workflow_with_multiple_stages() { } fn test_recovery_pruning_equivalence( + is_linear: bool, chunk_size: usize, recovery_chunk_size: usize, hasher: &dyn HashTree, @@ -752,13 +753,21 @@ fn test_recovery_pruning_equivalence( }); let mut recovery_entries: Vec<_> = recovery_entries.collect(); assert_eq!(recovery_entries.len(), 100); - recovery_entries.sort_unstable_by_key(|entry| entry.key); + if is_linear { + recovery_entries.sort_unstable_by_key(|entry| entry.key); + } else { + recovery_entries.shuffle(&mut rng); + } // Recover the tree. let mut recovered_db = PatchSet::default(); for recovery_chunk in recovery_entries.chunks(recovery_chunk_size) { - let patch = Storage::new(&recovered_db, hasher, recovered_version, false) - .extend_during_recovery(recovery_chunk.to_vec()); + let storage = Storage::new(&recovered_db, hasher, recovered_version, false); + let patch = if is_linear { + storage.extend_during_linear_recovery(recovery_chunk.to_vec()) + } else { + storage.extend_during_random_recovery(recovery_chunk.to_vec()) + }; recovered_db.apply_patch(patch); } let sub_patch = recovered_db @@ -798,25 +807,54 @@ fn test_recovery_pruning_equivalence( } #[test] -fn recovery_pruning_equivalence() { +fn linear_recovery_pruning_equivalence() { + for chunk_size in [3, 5, 7, 11, 21, 42, 99, 100] { + // No chunking during recovery (simple case). + test_recovery_pruning_equivalence(true, chunk_size, 100, &()); + // Recovery is chunked (more complex case). + for recovery_chunk_size in [chunk_size, 1, 6, 19, 50, 73] { + test_recovery_pruning_equivalence(true, chunk_size, recovery_chunk_size, &()); + } + } +} + +#[test] +fn random_recovery_pruning_equivalence() { for chunk_size in [3, 5, 7, 11, 21, 42, 99, 100] { // No chunking during recovery (simple case). - test_recovery_pruning_equivalence(chunk_size, 100, &()); + test_recovery_pruning_equivalence(false, chunk_size, 100, &()); // Recovery is chunked (more complex case). for recovery_chunk_size in [chunk_size, 1, 6, 19, 50, 73] { - test_recovery_pruning_equivalence(chunk_size, recovery_chunk_size, &()); + test_recovery_pruning_equivalence(false, chunk_size, recovery_chunk_size, &()); } } } #[test] -fn recovery_pruning_equivalence_with_hashing() { +fn linear_recovery_pruning_equivalence_with_hashing() { for chunk_size in [3, 7, 21, 42, 100] { // No chunking during recovery (simple case). - test_recovery_pruning_equivalence(chunk_size, 100, &Blake2Hasher); + test_recovery_pruning_equivalence(true, chunk_size, 100, &Blake2Hasher); // Recovery is chunked (more complex case). for recovery_chunk_size in [chunk_size, 1, 19, 73] { - test_recovery_pruning_equivalence(chunk_size, recovery_chunk_size, &Blake2Hasher); + test_recovery_pruning_equivalence(true, chunk_size, recovery_chunk_size, &Blake2Hasher); + } + } +} + +#[test] +fn random_recovery_pruning_equivalence_with_hashing() { + for chunk_size in [3, 7, 21, 42, 100] { + // No chunking during recovery (simple case). + test_recovery_pruning_equivalence(false, chunk_size, 100, &Blake2Hasher); + // Recovery is chunked (more complex case). + for recovery_chunk_size in [chunk_size, 1, 19, 73] { + test_recovery_pruning_equivalence( + false, + chunk_size, + recovery_chunk_size, + &Blake2Hasher, + ); } } } diff --git a/core/lib/merkle_tree/tests/integration/recovery.rs b/core/lib/merkle_tree/tests/integration/recovery.rs index fe89dded5c3..9a1cfee9591 100644 --- a/core/lib/merkle_tree/tests/integration/recovery.rs +++ b/core/lib/merkle_tree/tests/integration/recovery.rs @@ -27,7 +27,7 @@ fn recovery_basics() { let recovered_version = 123; let mut recovery = MerkleTreeRecovery::new(PatchSet::default(), recovered_version); - recovery.extend(recovery_entries); + recovery.extend_linear(recovery_entries); assert_eq!(recovery.last_processed_key(), Some(greatest_key)); assert_eq!(recovery.root_hash(), *expected_hash); @@ -36,7 +36,7 @@ fn recovery_basics() { tree.verify_consistency(recovered_version).unwrap(); } -fn test_recovery_in_chunks(mut create_db: impl FnMut() -> DB) { +fn test_recovery_in_chunks(is_linear: bool, mut create_db: impl FnMut() -> DB) { let (kvs, expected_hash) = &*KVS_AND_HASH; let recovery_entries = kvs .iter() @@ -47,15 +47,25 @@ fn test_recovery_in_chunks(mut create_db: impl FnMut() -> DB) leaf_index: i as u64 + 1, }); let mut recovery_entries: Vec<_> = recovery_entries.collect(); - recovery_entries.sort_unstable_by_key(|entry| entry.key); - let greatest_key = recovery_entries[99].key; + if is_linear { + recovery_entries.sort_unstable_by_key(|entry| entry.key); + } + let greatest_key = recovery_entries + .iter() + .map(|entry| entry.key) + .max() + .unwrap(); let recovered_version = 123; for chunk_size in [6, 10, 17, 42] { let mut db = create_db(); let mut recovery = MerkleTreeRecovery::new(&mut db, recovered_version); for (i, chunk) in recovery_entries.chunks(chunk_size).enumerate() { - recovery.extend(chunk.to_vec()); + if is_linear { + recovery.extend_linear(chunk.to_vec()); + } else { + recovery.extend_random(chunk.to_vec()); + } if i % 3 == 1 { recovery = MerkleTreeRecovery::new(&mut db, recovered_version); // ^ Simulate recovery interruption and restart @@ -119,8 +129,13 @@ fn test_tree_after_recovery( } #[test] -fn recovery_in_chunks() { - test_recovery_in_chunks(PatchSet::default); +fn linear_recovery_in_chunks() { + test_recovery_in_chunks(true, PatchSet::default); +} + +#[test] +fn random_recovery_in_chunks() { + test_recovery_in_chunks(false, PatchSet::default); } mod rocksdb { @@ -130,10 +145,20 @@ mod rocksdb { use zksync_merkle_tree::RocksDBWrapper; #[test] - fn recovery_in_chunks() { + fn linear_recovery_in_chunks() { + let temp_dir = TempDir::new().unwrap(); + let mut counter = 0; + test_recovery_in_chunks(true, || { + counter += 1; + RocksDBWrapper::new(&temp_dir.path().join(counter.to_string())) + }); + } + + #[test] + fn random_recovery_in_chunks() { let temp_dir = TempDir::new().unwrap(); let mut counter = 0; - test_recovery_in_chunks(|| { + test_recovery_in_chunks(false, || { counter += 1; RocksDBWrapper::new(&temp_dir.path().join(counter.to_string())) }); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 38ea58ac436..d100677d746 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -6702,10 +6702,8 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", - "envy", "serde", "zksync_basic_types", - "zksync_contracts", ] [[package]] From 7c137b72b16a8671a27c24b378d33320877d6557 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Fri, 17 Nov 2023 16:54:31 +0100 Subject: [PATCH 026/115] fix(core): add tree url to jsonrpsee server on main node (#512) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/zksync_core/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index c12ea4625c4..94d050f36ad 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -1315,6 +1315,7 @@ async fn run_http_api( .with_last_miniblock_pool(last_miniblock_pool) .with_filter_limit(api_config.web3_json_rpc.filters_limit()) .with_threads(api_config.web3_json_rpc.http_server_threads()) + .with_tree_api(api_config.web3_json_rpc.tree_api_url()) .with_batch_request_size_limit(api_config.web3_json_rpc.max_batch_request_size()) .with_response_body_size_limit(api_config.web3_json_rpc.max_response_body_size()) .with_tx_sender(tx_sender, vm_barrier) From 1c315f3245be770ad729c19977861b79d9e438a5 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Mon, 20 Nov 2023 15:06:09 +0100 Subject: [PATCH 027/115] fix: Fixes panic on unknown FRI prover group id (#522) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes panic on unknown FRI prover group id. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../zksync_core/src/house_keeper/fri_prover_queue_monitor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index e0763377177..f962cf94a2e 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -37,7 +37,7 @@ impl PeriodicJob for FriProverStatsReporter { let group_id = self .config .get_group_id_for_circuit_id_and_aggregation_round(circuit_id, aggregation_round) - .unwrap(); + .unwrap_or(u8::MAX); metrics::gauge!( "fri_prover.prover.jobs", From c88c4360af75da6c4adfd1d9cc1996a9c8f88104 Mon Sep 17 00:00:00 2001 From: Danil Date: Mon, 20 Nov 2023 16:05:30 +0100 Subject: [PATCH 028/115] fix(deps): Update zk evm abstraction (#524) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Update zk evm abstarction ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index d98b5207003..ff4520eeed5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8330,7 +8330,7 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#7502a661d7d38906d849dcd3e7a15e5848af6581" +source = "git+https://github.com/matter-labs/era-zk_evm_abstractions.git#15a2af404902d5f10352e3d1fac693cc395fcff9" dependencies = [ "anyhow", "serde", From 89af5a41d77fbcc35161319e21fe38eaa62d5d52 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 21 Nov 2023 07:34:08 +0100 Subject: [PATCH 029/115] chore(main): release core 18.1.0 (#506) :robot: I have created a release *beep* *boop* --- ## [18.1.0](https://github.com/matter-labs/zksync-era/compare/core-v18.0.3...core-v18.1.0) (2023-11-20) ### Features * added consensus types and consensus column to miniblocks table ([#490](https://github.com/matter-labs/zksync-era/issues/490)) ([f9ae0ad](https://github.com/matter-labs/zksync-era/commit/f9ae0ad56b17fffa4b400ec2376517a2b630b862)) * Adds `prover_group_id` label into `fri_prover_prover_job` metric ([#503](https://github.com/matter-labs/zksync-era/issues/503)) ([851e800](https://github.com/matter-labs/zksync-era/commit/851e800721a627742d6781d6162009d61f83c1af)) * **core:** adds a get proof endpoint in zks namespace to http endpoint on main node ([#504](https://github.com/matter-labs/zksync-era/issues/504)) ([0ac4a4d](https://github.com/matter-labs/zksync-era/commit/0ac4a4ddb87d7728a99a29df9adeded5822e1def)) * **merkle tree:** Allow random-order tree recovery ([#485](https://github.com/matter-labs/zksync-era/issues/485)) ([146e4cf](https://github.com/matter-labs/zksync-era/commit/146e4cf2f8d890ff0a8d33229e224442e14be437)) ### Bug Fixes * **core:** add tree url to jsonrpsee server on main node ([#512](https://github.com/matter-labs/zksync-era/issues/512)) ([7c137b7](https://github.com/matter-labs/zksync-era/commit/7c137b72b16a8671a27c24b378d33320877d6557)) * Fixes panic on unknown FRI prover group id ([#522](https://github.com/matter-labs/zksync-era/issues/522)) ([1c315f3](https://github.com/matter-labs/zksync-era/commit/1c315f3245be770ad729c19977861b79d9e438a5)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index d34f7f5b5f7..be5d4e35e28 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.0.3", + "core": "18.1.0", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 92c48c29573..f64bd80a330 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## [18.1.0](https://github.com/matter-labs/zksync-era/compare/core-v18.0.3...core-v18.1.0) (2023-11-20) + + +### Features + +* added consensus types and consensus column to miniblocks table ([#490](https://github.com/matter-labs/zksync-era/issues/490)) ([f9ae0ad](https://github.com/matter-labs/zksync-era/commit/f9ae0ad56b17fffa4b400ec2376517a2b630b862)) +* Adds `prover_group_id` label into `fri_prover_prover_job` metric ([#503](https://github.com/matter-labs/zksync-era/issues/503)) ([851e800](https://github.com/matter-labs/zksync-era/commit/851e800721a627742d6781d6162009d61f83c1af)) +* **core:** adds a get proof endpoint in zks namespace to http endpoint on main node ([#504](https://github.com/matter-labs/zksync-era/issues/504)) ([0ac4a4d](https://github.com/matter-labs/zksync-era/commit/0ac4a4ddb87d7728a99a29df9adeded5822e1def)) +* **merkle tree:** Allow random-order tree recovery ([#485](https://github.com/matter-labs/zksync-era/issues/485)) ([146e4cf](https://github.com/matter-labs/zksync-era/commit/146e4cf2f8d890ff0a8d33229e224442e14be437)) + + +### Bug Fixes + +* **core:** add tree url to jsonrpsee server on main node ([#512](https://github.com/matter-labs/zksync-era/issues/512)) ([7c137b7](https://github.com/matter-labs/zksync-era/commit/7c137b72b16a8671a27c24b378d33320877d6557)) +* Fixes panic on unknown FRI prover group id ([#522](https://github.com/matter-labs/zksync-era/issues/522)) ([1c315f3](https://github.com/matter-labs/zksync-era/commit/1c315f3245be770ad729c19977861b79d9e438a5)) + ## [18.0.3](https://github.com/matter-labs/zksync-era/compare/core-v18.0.2...core-v18.0.3) (2023-11-16) From 2cddf3c0fa786394161060445aa8a085173e3f71 Mon Sep 17 00:00:00 2001 From: Roman Brodetski Date: Tue, 21 Nov 2023 09:39:52 +0300 Subject: [PATCH 030/115] fix(prover): use a more performant query to get next job for FRI prover (#527) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Change the query we use to get the next job for FRI prover. The new query should be equivalent ## Why ❔ The old one was evidently slow. Our DBA did some research and came up with the new version. --- core/lib/dal/sqlx-data.json | 118 ++++++++++++++--------------- core/lib/dal/src/fri_prover_dal.rs | 18 +++-- 2 files changed, 69 insertions(+), 67 deletions(-) diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index e031fe7d671..18f0d8f198e 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -5197,65 +5197,6 @@ }, "query": "INSERT INTO events_queue (l1_batch_number, serialized_events_queue) VALUES ($1, $2)" }, - "62aaa047e3da5bd966608fec421ddad1b8afa04aaf35e946219d703bbe6ac9c5": { - "describe": { - "columns": [ - { - "name": "id", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "l1_batch_number", - "ordinal": 1, - "type_info": "Int8" - }, - { - "name": "circuit_id", - "ordinal": 2, - "type_info": "Int2" - }, - { - "name": "aggregation_round", - "ordinal": 3, - "type_info": "Int2" - }, - { - "name": "sequence_number", - "ordinal": 4, - "type_info": "Int4" - }, - { - "name": "depth", - "ordinal": 5, - "type_info": "Int4" - }, - { - "name": "is_node_final_proof", - "ordinal": 6, - "type_info": "Bool" - } - ], - "nullable": [ - false, - false, - false, - false, - false, - false, - false - ], - "parameters": { - "Left": [ - "Int2Array", - "Int2Array", - "Int4Array", - "Text" - ] - } - }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $4\n WHERE id = (\n SELECT pj.id\n FROM prover_jobs_fri AS pj\n JOIN (\n SELECT * FROM unnest($1::smallint[], $2::smallint[])\n )\n AS tuple (circuit_id, round)\n ON tuple.circuit_id = pj.circuit_id AND tuple.round = pj.aggregation_round\n WHERE pj.status = 'queued'\n AND pj.protocol_version = ANY($3)\n ORDER BY pj.l1_batch_number ASC, pj.aggregation_round DESC, pj.id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " - }, "6317155050a5dae24ea202cfd54d1e58cc7aeb0bfd4d95aa351f85cff04d3bff": { "describe": { "columns": [ @@ -8994,6 +8935,65 @@ }, "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n " }, + "a9fc7d587aff79ecb78c1a56b8299d5cb39e7fb0b10cb82b9abb1691f87422e6": { + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "l1_batch_number", + "ordinal": 1, + "type_info": "Int8" + }, + { + "name": "circuit_id", + "ordinal": 2, + "type_info": "Int2" + }, + { + "name": "aggregation_round", + "ordinal": 3, + "type_info": "Int2" + }, + { + "name": "sequence_number", + "ordinal": 4, + "type_info": "Int4" + }, + { + "name": "depth", + "ordinal": 5, + "type_info": "Int4" + }, + { + "name": "is_node_final_proof", + "ordinal": 6, + "type_info": "Bool" + } + ], + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ], + "parameters": { + "Left": [ + "Int2Array", + "Int2Array", + "Int4Array", + "Text" + ] + } + }, + "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $4\n WHERE id = (\n SELECT pj.id\n FROM ( SELECT * FROM unnest($1::smallint[], $2::smallint[]) ) AS tuple (circuit_id, round)\n JOIN LATERAL\n (\n SELECT * FROM prover_jobs_fri AS pj\n WHERE pj.status = 'queued'\n AND pj.protocol_version = ANY($3)\n AND pj.circuit_id = tuple.circuit_id AND pj.aggregation_round = tuple.round\n ORDER BY pj.l1_batch_number ASC, pj.id ASC\n LIMIT 1\n ) AS pj ON true\n ORDER BY pj.l1_batch_number ASC, pj.aggregation_round DESC, pj.id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " + }, "aa279ce3351b30788711be6c65cb99cb14304ac38f8fed6d332237ffafc7c86b": { "describe": { "columns": [], diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs index d3cb1364455..0787e818f6d 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -113,14 +113,16 @@ impl FriProverDal<'_, '_> { picked_by = $4 WHERE id = ( SELECT pj.id - FROM prover_jobs_fri AS pj - JOIN ( - SELECT * FROM unnest($1::smallint[], $2::smallint[]) - ) - AS tuple (circuit_id, round) - ON tuple.circuit_id = pj.circuit_id AND tuple.round = pj.aggregation_round - WHERE pj.status = 'queued' - AND pj.protocol_version = ANY($3) + FROM ( SELECT * FROM unnest($1::smallint[], $2::smallint[]) ) AS tuple (circuit_id, round) + JOIN LATERAL + ( + SELECT * FROM prover_jobs_fri AS pj + WHERE pj.status = 'queued' + AND pj.protocol_version = ANY($3) + AND pj.circuit_id = tuple.circuit_id AND pj.aggregation_round = tuple.round + ORDER BY pj.l1_batch_number ASC, pj.id ASC + LIMIT 1 + ) AS pj ON true ORDER BY pj.l1_batch_number ASC, pj.aggregation_round DESC, pj.id ASC LIMIT 1 FOR UPDATE From cebe2333e2320aafb9408a7b87023adb6b8947f4 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 21 Nov 2023 09:54:13 +0100 Subject: [PATCH 031/115] chore(ci): rebuild provers if we have changes in library (#529) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- .github/workflows/release-test-stage.yml | 2 + core/lib/dal/sqlx-data.json | 106 +++++++++++------------ core/lib/dal/src/fri_prover_dal.rs | 2 +- 3 files changed, 56 insertions(+), 54 deletions(-) diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index ae3294ee62e..c8f7a1e9eee 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -31,6 +31,8 @@ jobs: # We don't want to be rebuilding and redeploying all the Docker images when eg. only document have changed prover: - prover/** + - core/lib/** + - '!core/lib/zksync_core/**' core: - core/** all: diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 18f0d8f198e..f22482183f8 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -8935,7 +8935,58 @@ }, "query": "SELECT prover_jobs.result as proof, scheduler_witness_jobs.aggregation_result_coords\n FROM prover_jobs\n INNER JOIN scheduler_witness_jobs\n ON prover_jobs.l1_batch_number = scheduler_witness_jobs.l1_batch_number\n WHERE prover_jobs.l1_batch_number >= $1 AND prover_jobs.l1_batch_number <= $2\n AND prover_jobs.aggregation_round = 3\n AND prover_jobs.status = 'successful'\n " }, - "a9fc7d587aff79ecb78c1a56b8299d5cb39e7fb0b10cb82b9abb1691f87422e6": { + "aa279ce3351b30788711be6c65cb99cb14304ac38f8fed6d332237ffafc7c86b": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Text", + "Time", + "Text", + "Int8" + ] + } + }, + "query": "UPDATE proof_compression_jobs_fri SET status = $1, updated_at = now(), time_taken = $2, l1_proof_blob_url = $3WHERE l1_batch_number = $4" + }, + "aa7ae476aed5979227887891e9be995924588aa10ccba7424d6ce58f811eaa02": { + "describe": { + "columns": [ + { + "name": "number!", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + null + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT COALESCE(MAX(number), 0) AS \"number!\" FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL" + }, + "aacaeff95b9a2988167dde78200d7139ba99edfa30dbcd8a7a57f72efc676477": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) WHERE commit_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" + }, + "ac179b3a4eca421f3151f4f1eb844f2cee16fa1d2a47c910feb8e07d8f8ace6c": { "describe": { "columns": [ { @@ -8992,58 +9043,7 @@ ] } }, - "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n updated_at = now(), processing_started_at = now(),\n picked_by = $4\n WHERE id = (\n SELECT pj.id\n FROM ( SELECT * FROM unnest($1::smallint[], $2::smallint[]) ) AS tuple (circuit_id, round)\n JOIN LATERAL\n (\n SELECT * FROM prover_jobs_fri AS pj\n WHERE pj.status = 'queued'\n AND pj.protocol_version = ANY($3)\n AND pj.circuit_id = tuple.circuit_id AND pj.aggregation_round = tuple.round\n ORDER BY pj.l1_batch_number ASC, pj.id ASC\n LIMIT 1\n ) AS pj ON true\n ORDER BY pj.l1_batch_number ASC, pj.aggregation_round DESC, pj.id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " - }, - "aa279ce3351b30788711be6c65cb99cb14304ac38f8fed6d332237ffafc7c86b": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Text", - "Time", - "Text", - "Int8" - ] - } - }, - "query": "UPDATE proof_compression_jobs_fri SET status = $1, updated_at = now(), time_taken = $2, l1_proof_blob_url = $3WHERE l1_batch_number = $4" - }, - "aa7ae476aed5979227887891e9be995924588aa10ccba7424d6ce58f811eaa02": { - "describe": { - "columns": [ - { - "name": "number!", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - null - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT COALESCE(MAX(number), 0) AS \"number!\" FROM l1_batches WHERE eth_prove_tx_id IS NOT NULL" - }, - "aacaeff95b9a2988167dde78200d7139ba99edfa30dbcd8a7a57f72efc676477": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [] - } - }, - "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS commit_tx ON (l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id) WHERE commit_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" + "query": "\n UPDATE prover_jobs_fri\n SET status = 'in_progress', attempts = attempts + 1,\n processing_started_at = now(), updated_at = now(), \n picked_by = $4\n WHERE id = (\n SELECT pj.id\n FROM ( SELECT * FROM unnest($1::smallint[], $2::smallint[]) ) AS tuple (circuit_id, round)\n JOIN LATERAL\n (\n SELECT * FROM prover_jobs_fri AS pj\n WHERE pj.status = 'queued'\n AND pj.protocol_version = ANY($3)\n AND pj.circuit_id = tuple.circuit_id AND pj.aggregation_round = tuple.round\n ORDER BY pj.l1_batch_number ASC, pj.id ASC\n LIMIT 1\n ) AS pj ON true\n ORDER BY pj.l1_batch_number ASC, pj.aggregation_round DESC, pj.id ASC\n LIMIT 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING prover_jobs_fri.id, prover_jobs_fri.l1_batch_number, prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round, prover_jobs_fri.sequence_number, prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n " }, "ac35fb205c83d82d78983f4c9b47f56d3c91fbb2c95046555c7d60a9a2ebb446": { "describe": { diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs index 0787e818f6d..026cb783dd3 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -109,7 +109,7 @@ impl FriProverDal<'_, '_> { " UPDATE prover_jobs_fri SET status = 'in_progress', attempts = attempts + 1, - updated_at = now(), processing_started_at = now(), + processing_started_at = now(), updated_at = now(), picked_by = $4 WHERE id = ( SELECT pj.id From 67ef1339d42786efbeb83c22fac99f3bf5dd4380 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 21 Nov 2023 13:03:49 +0100 Subject: [PATCH 032/115] fix(ci): Use the same nightly rust (#530) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- .github/workflows/build-core-template.yml | 2 +- docker/circuit-synthesizer/Dockerfile | 4 ++-- docker/contract-verifier/Dockerfile | 4 ++-- docker/cross-external-nodes-checker/Dockerfile | 4 ++-- docker/external-node/Dockerfile | 4 ++-- docker/proof-fri-compressor/Dockerfile | 4 ++-- docker/prover-fri-gateway/Dockerfile | 4 ++-- docker/prover-fri/Dockerfile | 4 ++-- docker/prover-gpu-fri/Dockerfile | 4 ++-- docker/prover/Dockerfile | 4 ++-- docker/server-v2/Dockerfile | 4 ++-- docker/witness-generator/Dockerfile | 4 ++-- docker/witness-vector-generator/Dockerfile | 4 ++-- docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile | 2 +- docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile | 2 +- docker/zk-environment/Dockerfile | 4 ++-- prover/proof_fri_compressor/README.md | 2 +- prover/prover_fri/README.md | 8 ++++---- prover/vk_setup_data_generator_server_fri/README.md | 8 ++++---- prover/witness_vector_generator/README.md | 2 +- 20 files changed, 39 insertions(+), 39 deletions(-) diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 2894f8fdb89..c4ae27faf9c 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -82,7 +82,7 @@ jobs: DOCKER_ACTION: ${{ inputs.action }} COMPONENT: ${{ matrix.component }} run: | - ci_run rustup default nightly-2023-07-21 + ci_run rustup default nightly-2023-08-21 ci_run zk docker $DOCKER_ACTION $COMPONENT -- --public - name: Show sccache stats if: always() diff --git a/docker/circuit-synthesizer/Dockerfile b/docker/circuit-synthesizer/Dockerfile index e64ada1d1a8..831fe167223 100644 --- a/docker/circuit-synthesizer/Dockerfile +++ b/docker/circuit-synthesizer/Dockerfile @@ -11,8 +11,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index f0aa565b1e1..618a9ba2fc1 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -10,8 +10,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/cross-external-nodes-checker/Dockerfile b/docker/cross-external-nodes-checker/Dockerfile index 40837169402..87b5d67d719 100644 --- a/docker/cross-external-nodes-checker/Dockerfile +++ b/docker/cross-external-nodes-checker/Dockerfile @@ -10,8 +10,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index 997a6e089e7..7e1e7c36395 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -11,8 +11,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/proof-fri-compressor/Dockerfile b/docker/proof-fri-compressor/Dockerfile index e60998fac70..a4654701311 100644 --- a/docker/proof-fri-compressor/Dockerfile +++ b/docker/proof-fri-compressor/Dockerfile @@ -13,8 +13,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index 6a7b27637ab..b0f11949551 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -11,8 +11,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-fri/Dockerfile b/docker/prover-fri/Dockerfile index 1fda048ca33..a4406b34ec7 100644 --- a/docker/prover-fri/Dockerfile +++ b/docker/prover-fri/Dockerfile @@ -11,8 +11,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 5e37c089ed9..5b7787332cd 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -14,8 +14,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ ENV CUDAARCHS=89 RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ chmod +x cmake-3.24.2-linux-x86_64.sh && \ diff --git a/docker/prover/Dockerfile b/docker/prover/Dockerfile index 7c96273a06a..a883aa02797 100644 --- a/docker/prover/Dockerfile +++ b/docker/prover/Dockerfile @@ -13,8 +13,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index ff441ab4170..1b79fb32854 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -14,8 +14,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 RUN cargo build --release --features=rocksdb/io-uring diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 984d8520313..f2fe7926a92 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -11,8 +11,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index b7e1c320cfb..b366006009e 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -11,8 +11,8 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ - rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 + rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 WORKDIR /usr/src/zksync COPY . . diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile index 18dc07b88d0..9aa7a2b0067 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile @@ -70,7 +70,7 @@ RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/so gcloud config set metrics/environment github_docker_image RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y -RUN rustup install nightly-2023-07-21 +RUN rustup install nightly-2023-08-21 RUN rustup default stable RUN cargo install --version=0.5.13 sqlx-cli RUN cargo install cargo-nextest diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile index 2b817806678..ed10b252974 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile @@ -68,7 +68,7 @@ RUN echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/so gcloud config set metrics/environment github_docker_image RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y -RUN rustup install nightly-2023-07-21 +RUN rustup install nightly-2023-08-21 RUN rustup default stable RUN cargo install --version=0.5.13 sqlx-cli RUN cargo install cargo-nextest diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 5616874c8b9..f86aeaddb11 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -133,5 +133,5 @@ ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache FROM rust-lightweight as rust-lightweight-nightly -RUN rustup install nightly-2023-07-21 && \ - rustup default nightly-2023-07-21 +RUN rustup install nightly-2023-08-21 && \ + rustup default nightly-2023-08-21 diff --git a/prover/proof_fri_compressor/README.md b/prover/proof_fri_compressor/README.md index bea0c9ac390..3da29b08e7c 100644 --- a/prover/proof_fri_compressor/README.md +++ b/prover/proof_fri_compressor/README.md @@ -4,4 +4,4 @@ Used to compress FRI proof to Bellman proof that gets sent to L1. ## running -`zk f cargo +nightly-2023-07-21 run --release --bin zksync_proof_fri_compressor` +`zk f cargo +nightly-2023-08-21 run --release --bin zksync_proof_fri_compressor` diff --git a/prover/prover_fri/README.md b/prover/prover_fri/README.md index cec915494b6..240bd13d238 100644 --- a/prover/prover_fri/README.md +++ b/prover/prover_fri/README.md @@ -2,11 +2,11 @@ ## running cpu prover -`zk f cargo +nightly-2023-07-21 run --release --bin zksync_prover_fri` +`zk f cargo +nightly-2023-08-21 run --release --bin zksync_prover_fri` ## running gpu prover(requires CUDA 12.0+) -`zk f cargo +nightly-2023-07-21 run --release --features "gpu" --bin zksync_prover_fri` +`zk f cargo +nightly-2023-08-21 run --release --features "gpu" --bin zksync_prover_fri` ## Proving a block using CPU prover locally @@ -17,7 +17,7 @@ Machine specs: - RAM: 60GB of RAM(if you have lower RAM machine enable swap) - Disk: 400GB of free disk -1. Install the correct nightly version using command: `rustup install nightly-2023-07-21` +1. Install the correct nightly version using command: `rustup install nightly-2023-08-21` 2. Generate the cpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. Use these commands: @@ -67,7 +67,7 @@ pre-requisite. This is useful for debugging and testing Machine specs: - Disk: 30GB of free disk - GPU: 1x Nvidia L4/T4 with 16GB of GPU RAM -1. Install the correct nightly version using command: `rustup install nightly-2023-07-21` +1. Install the correct nightly version using command: `rustup install nightly-2023-08-21` 2. Generate the gpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. Use these commands: diff --git a/prover/vk_setup_data_generator_server_fri/README.md b/prover/vk_setup_data_generator_server_fri/README.md index 1dc8b5c0fa2..87e448f3359 100644 --- a/prover/vk_setup_data_generator_server_fri/README.md +++ b/prover/vk_setup_data_generator_server_fri/README.md @@ -7,16 +7,16 @@ zk init keys/setup/setup_2^26.key ## generating setup-data for specific circuit type -`zk f cargo +nightly-2023-07-21 run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` +`zk f cargo +nightly-2023-08-21 run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` ## generating GPU setup-data for specific circuit type -`zk f cargo +nightly-2023-07-21 run --features "gpu" --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` +`zk f cargo +nightly-2023-08-21 run --features "gpu" --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 --is_base_layer` ## Generating VK's -`cargo +nightly-2023-07-21 run --release --bin zksync_vk_generator_fri` +`cargo +nightly-2023-08-21 run --release --bin zksync_vk_generator_fri` ## generating VK commitment for existing VK's -`cargo +nightly-2023-07-21 run --release --bin zksync_commitment_generator_fri` +`cargo +nightly-2023-08-21 run --release --bin zksync_commitment_generator_fri` diff --git a/prover/witness_vector_generator/README.md b/prover/witness_vector_generator/README.md index 8c4328afe8c..e287e4d53b2 100644 --- a/prover/witness_vector_generator/README.md +++ b/prover/witness_vector_generator/README.md @@ -4,4 +4,4 @@ Used to generate witness vectors using circuit and sending them to prover over T ## running -`zk f cargo +nightly-2023-07-21 run --release --bin zksync_witness_vector_generator` +`zk f cargo +nightly-2023-08-21 run --release --bin zksync_witness_vector_generator` From 6e4d2282046a98dcc864cb3bd3541fba7e26aacf Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 22 Nov 2023 15:18:27 +0200 Subject: [PATCH 033/115] chore: Add metrics on types of filters in API (#526) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add metrics on types of filters, especially: * Count of the current amount of filters of specific type * Frequency of requests to specific filter type * Lifetime of filters of specific type * Request count for a specific filter ## Why ❔ We receive a lot of requests in our WebSocket API to getFilterChanges method. We want to understand types of filters and distribution of load among them. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Alex Ostrovski --- core/lib/web3_decl/src/types.rs | 9 - .../src/api_server/web3/metrics.rs | 42 ++++- .../zksync_core/src/api_server/web3/mod.rs | 17 +- .../src/api_server/web3/namespaces/eth.rs | 157 +++++++++--------- .../zksync_core/src/api_server/web3/state.rs | 81 +++++++-- 5 files changed, 197 insertions(+), 109 deletions(-) diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 326d7936653..46033bc4118 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -9,7 +9,6 @@ use core::convert::{TryFrom, TryInto}; use core::fmt; use core::marker::PhantomData; -use chrono::NaiveDateTime; use itertools::unfold; use rlp::Rlp; use serde::{de, Deserialize, Serialize, Serializer}; @@ -105,14 +104,6 @@ pub enum FilterChanges { Empty([u8; 0]), } -/// Represents all kinds of `Filter`. -#[derive(Debug, Clone)] -pub enum TypedFilter { - Events(Filter, zksync_types::MiniblockNumber), - Blocks(zksync_types::MiniblockNumber), - PendingTransactions(NaiveDateTime), -} - /// Either value or array of values. #[derive(Default, Debug, PartialEq, Clone)] pub struct ValueOrArray(pub Vec); diff --git a/core/lib/zksync_core/src/api_server/web3/metrics.rs b/core/lib/zksync_core/src/api_server/web3/metrics.rs index 9185ba89c58..2df24f9dd60 100644 --- a/core/lib/zksync_core/src/api_server/web3/metrics.rs +++ b/core/lib/zksync_core/src/api_server/web3/metrics.rs @@ -2,7 +2,7 @@ use vise::{ Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LabeledFamily, - LatencyObserver, Metrics, + LatencyObserver, Metrics, Unit, }; use std::{ @@ -10,10 +10,9 @@ use std::{ time::{Duration, Instant}, }; +use super::{ApiTransport, TypedFilter}; use zksync_types::api; -use super::ApiTransport; - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "scheme", rename_all = "UPPERCASE")] pub(super) enum ApiTransportLabel { @@ -195,3 +194,40 @@ pub(super) struct PubSubMetrics { #[vise::register] pub(super) static PUB_SUB_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "type", rename_all = "snake_case")] +pub(super) enum FilterType { + Events, + Blocks, + PendingTransactions, +} + +impl From<&TypedFilter> for FilterType { + fn from(value: &TypedFilter) -> Self { + match value { + TypedFilter::Events(_, _) => FilterType::Events, + TypedFilter::Blocks(_) => FilterType::Blocks, + TypedFilter::PendingTransactions(_) => FilterType::PendingTransactions, + } + } +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "api_web3_filter")] +pub(super) struct FilterMetrics { + /// Number of currently active filters grouped by the filter type + pub metrics_count: Family, + /// Time in seconds between consecutive requests to the filter grouped by the filter type + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub request_frequency: Family>, + /// Lifetime of a filter in seconds grouped by the filter type + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub filter_lifetime: Family>, + /// Number of requests to the filter grouped by the filter type + #[metrics(buckets = Buckets::exponential(1.0..=1048576.0, 2.0))] + pub filter_count: Family>, +} + +#[vise::register] +pub(super) static FILTER_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 57da568ea9a..918bf5f67dc 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -4,9 +4,10 @@ use jsonrpc_core::MetaIoHandler; use jsonrpc_http_server::hyper; use jsonrpc_pubsub::PubSubHandler; use serde::Deserialize; -use tokio::sync::{oneshot, watch, RwLock}; +use tokio::sync::{oneshot, watch, Mutex}; use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; +use chrono::NaiveDateTime; use std::{net::SocketAddr, sync::Arc, time::Duration}; use tokio::task::JoinHandle; @@ -23,6 +24,7 @@ use zksync_web3_decl::{ DebugNamespaceServer, EnNamespaceServer, EthNamespaceServer, NetNamespaceServer, Web3NamespaceServer, ZksNamespaceServer, }, + types::Filter, }; use crate::{ @@ -63,6 +65,17 @@ use self::state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber}; /// Timeout for graceful shutdown logic within API servers. const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5); +/// Represents all kinds of `Filter`. +#[derive(Debug, Clone)] +pub(crate) enum TypedFilter { + // Events from some block with additional filters + Events(Filter, MiniblockNumber), + // Blocks from some block + Blocks(MiniblockNumber), + // Pending transactions from some timestamp + PendingTransactions(NaiveDateTime), +} + #[derive(Debug, Clone, Copy)] enum ApiBackend { Jsonrpsee, @@ -278,7 +291,7 @@ impl ApiBuilder { tokio::spawn(update_task); RpcState { - installed_filters: Arc::new(RwLock::new(Filters::new( + installed_filters: Arc::new(Mutex::new(Filters::new( self.filters_limit.unwrap_or(usize::MAX), ))), connection_pool: self.pool, diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index e2ba192ad1d..4cabb8e15da 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -14,7 +14,7 @@ use zksync_types::{ use zksync_utils::u256_to_h256; use zksync_web3_decl::{ error::Web3Error, - types::{Address, Block, Filter, FilterChanges, Log, TypedFilter, U64}, + types::{Address, Block, Filter, FilterChanges, Log, U64}, }; use crate::{ @@ -25,6 +25,7 @@ use crate::{ metrics::{BlockCallObserver, API_METRICS}, resolve_block, state::RpcState, + TypedFilter, }, }, l1_gas_price::L1GasPriceProvider, @@ -228,8 +229,8 @@ impl EthNamespace { let (from_block, to_block) = self.state.resolve_filter_block_range(&filter).await?; filter.to_block = Some(BlockNumber::Number(to_block.0.into())); - let (changes, _) = self - .filter_changes(TypedFilter::Events(filter, from_block)) + let changes = self + .filter_changes(&mut TypedFilter::Events(filter, from_block)) .await?; method_latency.observe(); Ok(match changes { @@ -242,22 +243,29 @@ impl EthNamespace { const METHOD_NAME: &str = "get_filter_logs"; let method_latency = API_METRICS.start_call(METHOD_NAME); - // Note: We have to keep this as a separate variable, since otherwise the lock guard would exist - // for duration of the whole `match` block, and this guard is not `Send`. This would make the whole future - // not `Send`, since `match` has an `await` point. - let maybe_filter = self.state.installed_filters.read().await.get(idx).cloned(); - let filter = match maybe_filter { - Some(TypedFilter::Events(filter, _)) => { - let from_block = self - .state - .resolve_filter_block_number(filter.from_block) - .await?; - TypedFilter::Events(filter, from_block) - } - _ => return Err(Web3Error::FilterNotFound), + // We clone the filter to not hold the filter lock for an extended period of time. + let maybe_filter = self + .state + .installed_filters + .lock() + .await + .get_and_update_stats(idx); + + let Some(TypedFilter::Events(filter, _)) = maybe_filter else { + return Err(Web3Error::FilterNotFound); }; - let logs = self.filter_changes(filter).await?.0; + let from_block = self + .state + .resolve_filter_block_number(filter.from_block) + .await?; + let logs = self + .filter_changes(&mut TypedFilter::Events(filter, from_block)) + .await?; + + // We are not updating the filter, since that is the purpose of `get_filter_changes` method, + // which is getting changes happened from the last poll and moving the cursor forward. + method_latency.observe(); Ok(logs) } @@ -538,24 +546,25 @@ impl EthNamespace { const METHOD_NAME: &str = "new_block_filter"; let method_latency = API_METRICS.start_call(METHOD_NAME); - let last_block_number = self + let mut conn = self .state .connection_pool .access_storage_tagged("api") .await - .unwrap() + .map_err(|err| internal_error(METHOD_NAME, err))?; + let last_block_number = conn .blocks_web3_dal() .get_sealed_miniblock_number() .await .map_err(|err| internal_error(METHOD_NAME, err))?; + drop(conn); let idx = self .state .installed_filters - .write() + .lock() .await .add(TypedFilter::Blocks(last_block_number)); - method_latency.observe(); Ok(idx) } @@ -570,15 +579,15 @@ impl EthNamespace { return Err(Web3Error::TooManyTopics); } } + self.state.resolve_filter_block_hash(&mut filter).await?; let from_block = self.state.get_filter_from_block(&filter).await?; let idx = self .state .installed_filters - .write() + .lock() .await .add(TypedFilter::Events(filter, from_block)); - method_latency.observe(); Ok(idx) } @@ -591,7 +600,7 @@ impl EthNamespace { let idx = self .state .installed_filters - .write() + .lock() .await .add(TypedFilter::PendingTransactions( chrono::Utc::now().naive_utc(), @@ -605,27 +614,26 @@ impl EthNamespace { const METHOD_NAME: &str = "get_filter_changes"; let method_latency = API_METRICS.start_call(METHOD_NAME); - let filter = self + let mut filter = self .state .installed_filters - .read() + .lock() .await - .get(idx) - .cloned() + .get_and_update_stats(idx) .ok_or(Web3Error::FilterNotFound)?; - let result = match self.filter_changes(filter).await { - Ok((changes, updated_filter)) => { + let result = match self.filter_changes(&mut filter).await { + Ok(changes) => { self.state .installed_filters - .write() + .lock() .await - .update(idx, updated_filter); + .update(idx, filter); Ok(changes) } - Err(Web3Error::LogsLimitExceeded(_, _, _)) => { + Err(Web3Error::LogsLimitExceeded(..)) => { // The filter was not being polled for a long time, so we remove it. - self.state.installed_filters.write().await.remove(idx); + self.state.installed_filters.lock().await.remove(idx); Err(Web3Error::FilterNotFound) } Err(err) => Err(err), @@ -639,7 +647,7 @@ impl EthNamespace { const METHOD_NAME: &str = "uninstall_filter"; let method_latency = API_METRICS.start_call(METHOD_NAME); - let removed = self.state.installed_filters.write().await.remove(idx); + let removed = self.state.installed_filters.lock().await.remove(idx); method_latency.observe(); removed } @@ -751,68 +759,65 @@ impl EthNamespace { #[tracing::instrument(skip(self, typed_filter))] async fn filter_changes( &self, - typed_filter: TypedFilter, - ) -> Result<(FilterChanges, TypedFilter), Web3Error> { + typed_filter: &mut TypedFilter, + ) -> Result { const METHOD_NAME: &str = "filter_changes"; let res = match typed_filter { TypedFilter::Blocks(from_block) => { - let (block_hashes, last_block_number) = self + let mut conn = self .state .connection_pool .access_storage_tagged("api") .await - .unwrap() + .map_err(|err| internal_error(METHOD_NAME, err))?; + let (block_hashes, last_block_number) = conn .blocks_web3_dal() - .get_block_hashes_after(from_block, self.state.api_config.req_entities_limit) + .get_block_hashes_after(*from_block, self.state.api_config.req_entities_limit) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - ( - FilterChanges::Hashes(block_hashes), - TypedFilter::Blocks(last_block_number.unwrap_or(from_block)), - ) + *from_block = last_block_number.unwrap_or(*from_block); + FilterChanges::Hashes(block_hashes) } + TypedFilter::PendingTransactions(from_timestamp) => { - let (tx_hashes, last_timestamp) = self + let mut conn = self .state .connection_pool .access_storage_tagged("api") .await - .unwrap() + .map_err(|err| internal_error(METHOD_NAME, err))?; + let (tx_hashes, last_timestamp) = conn .transactions_web3_dal() .get_pending_txs_hashes_after( - from_timestamp, + *from_timestamp, Some(self.state.api_config.req_entities_limit), ) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - ( - FilterChanges::Hashes(tx_hashes), - TypedFilter::PendingTransactions(last_timestamp.unwrap_or(from_timestamp)), - ) + *from_timestamp = last_timestamp.unwrap_or(*from_timestamp); + FilterChanges::Hashes(tx_hashes) } + TypedFilter::Events(filter, from_block) => { - let addresses: Vec<_> = filter - .address - .clone() - .into_iter() - .flat_map(|v| v.0) - .collect(); - if let Some(topics) = filter.topics.as_ref() { + let addresses = if let Some(addresses) = &filter.address { + addresses.0.clone() + } else { + vec![] + }; + let topics = if let Some(topics) = &filter.topics { if topics.len() > EVENT_TOPIC_NUMBER_LIMIT { return Err(Web3Error::TooManyTopics); } - } - let topics: Vec<_> = filter - .topics - .clone() - .into_iter() - .flatten() - .enumerate() - .filter_map(|(idx, topics)| topics.map(|topics| (idx as u32 + 1, topics.0))) - .collect(); + let topics_by_idx = topics.iter().enumerate().filter_map(|(idx, topics)| { + Some((idx as u32 + 1, topics.as_ref()?.0.clone())) + }); + topics_by_idx.collect::>() + } else { + vec![] + }; let get_logs_filter = GetLogsFilter { - from_block, + from_block: *from_block, to_block: filter.to_block, addresses, topics, @@ -827,11 +832,11 @@ impl EthNamespace { .connection_pool .access_storage_tagged("api") .await - .unwrap(); + .map_err(|err| internal_error(METHOD_NAME, err))?; // Check if there is more than one block in range and there are more than `req_entities_limit` logs that satisfies filter. // In this case we should return error and suggest requesting logs with smaller block range. - if from_block != to_block { + if *from_block != to_block { if let Some(miniblock_number) = storage .events_web3_dal() .get_log_block_number( @@ -854,14 +859,12 @@ impl EthNamespace { .get_logs(get_logs_filter, i32::MAX as usize) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - let new_from_block = logs + *from_block = logs .last() .map(|log| MiniblockNumber(log.block_number.unwrap().as_u32())) - .unwrap_or(from_block); - ( - FilterChanges::Logs(logs), - TypedFilter::Events(filter, new_from_block), - ) + .unwrap_or(*from_block); + // FIXME: why is `from_block` not updated? + FilterChanges::Logs(logs) } }; @@ -872,7 +875,7 @@ impl EthNamespace { // Bogus methods. // They are moved into a separate `impl` block so they don't make the actual implementation noisy. // This `impl` block contains methods that we *have* to implement for compliance, but don't really -// make sense in terms in L2. +// make sense in terms of L2. impl EthNamespace { pub fn coinbase_impl(&self) -> Address { // There is no coinbase account. diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index b143b6ccfc2..0463d482320 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -1,4 +1,3 @@ -use tokio::sync::RwLock; use zksync_utils::h256_to_u256; use std::{ @@ -9,8 +8,10 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::Duration, + time::{Duration, Instant}, }; +use tokio::sync::Mutex; +use vise::GaugeGuard; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig}; use zksync_dal::ConnectionPool; @@ -24,10 +25,10 @@ use zksync_types::{ }; use zksync_web3_decl::{ error::Web3Error, - types::{Filter, Log, TypedFilter}, + types::{Filter, Log}, }; -use super::metrics::API_METRICS; +use super::metrics::{FilterType, API_METRICS, FILTER_METRICS}; use crate::{ api_server::{ execution_sandbox::BlockArgs, @@ -35,7 +36,7 @@ use crate::{ tx_sender::TxSender, web3::{ backend_jsonrpc::error::internal_error, namespaces::eth::EVENT_TOPIC_NUMBER_LIMIT, - resolve_block, + resolve_block, TypedFilter, }, }, sync_layer::SyncState, @@ -166,7 +167,7 @@ impl SealedMiniblockNumber { /// Holder for the data required for the API to be functional. #[derive(Debug)] pub struct RpcState { - pub installed_filters: Arc>, + pub(crate) installed_filters: Arc>, pub connection_pool: ConnectionPool, pub tree_api: Option, pub tx_sender: TxSender, @@ -541,12 +542,54 @@ impl RpcState { } /// Contains mapping from index to `Filter` with optional location. -#[derive(Default, Debug, Clone)] -pub struct Filters { - state: HashMap, +#[derive(Default, Debug)] +pub(crate) struct Filters { + state: HashMap, max_cap: usize, } +#[derive(Debug)] +struct InstalledFilter { + pub filter: TypedFilter, + _guard: GaugeGuard, + created_at: Instant, + last_request: Instant, + request_count: usize, +} + +impl InstalledFilter { + pub fn new(filter: TypedFilter) -> Self { + let guard = FILTER_METRICS.metrics_count[&FilterType::from(&filter)].inc_guard(1); + Self { + filter, + _guard: guard, + created_at: Instant::now(), + last_request: Instant::now(), + request_count: 0, + } + } + + pub fn update_stats(&mut self) { + let previous_request_timestamp = self.last_request; + let now = Instant::now(); + + self.last_request = now; + self.request_count += 1; + + let filter_type = FilterType::from(&self.filter); + FILTER_METRICS.request_frequency[&filter_type].observe(now - previous_request_timestamp); + } +} + +impl Drop for InstalledFilter { + fn drop(&mut self) { + let filter_type = FilterType::from(&self.filter); + + FILTER_METRICS.filter_count[&filter_type].observe(self.request_count); + FILTER_METRICS.filter_lifetime[&filter_type].observe(self.created_at.elapsed()); + } +} + impl Filters { /// Instantiates `Filters` with given max capacity. pub fn new(max_cap: usize) -> Self { @@ -564,7 +607,8 @@ impl Filters { break val; } }; - self.state.insert(idx, filter); + + self.state.insert(idx, InstalledFilter::new(filter)); // Check if we reached max capacity if self.state.len() > self.max_cap { @@ -577,17 +621,18 @@ impl Filters { } /// Retrieves filter from the state. - pub fn get(&self, index: U256) -> Option<&TypedFilter> { - self.state.get(&index) + pub fn get_and_update_stats(&mut self, index: U256) -> Option { + let installed_filter = self.state.get_mut(&index)?; + + installed_filter.update_stats(); + + Some(installed_filter.filter.clone()) } /// Updates filter in the state. - pub fn update(&mut self, index: U256, new_filter: TypedFilter) -> bool { - if let Some(typed_filter) = self.state.get_mut(&index) { - *typed_filter = new_filter; - true - } else { - false + pub fn update(&mut self, index: U256, new_filter: TypedFilter) { + if let Some(installed_filter) = self.state.get_mut(&index) { + installed_filter.filter = new_filter; } } From 9aec8ef61ad73bcf14f75496507a64ef93548345 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 22 Nov 2023 16:05:21 +0200 Subject: [PATCH 034/115] chore: remove `recalculate_miniblock_hashes` method (#494) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove `recalculate_miniblock_hashes` method and other functions, that are used only by it ## Why ❔ It's not needed ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/bin/external_node/src/main.rs | 1 - core/lib/dal/sqlx-data.json | 88 ----------------- core/lib/dal/src/blocks_dal.rs | 94 ------------------- .../zksync_core/src/sync_layer/external_io.rs | 64 +------------ 4 files changed, 3 insertions(+), 244 deletions(-) diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index f9bfbebedbb..4e12315d930 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -83,7 +83,6 @@ async fn build_state_keeper( chain_id, ) .await; - io.recalculate_miniblock_hashes().await; ZkSyncStateKeeper::without_sealer(stop_receiver, Box::new(io), batch_executor_base) } diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index f22482183f8..3776b4f84b3 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -744,26 +744,6 @@ }, "query": "SELECT number FROM l1_batches LEFT JOIN eth_txs_history AS prove_tx ON (l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id) WHERE prove_tx.confirmed_at IS NOT NULL ORDER BY number DESC LIMIT 1" }, - "15135331e56e3e4e3eeae3aac609d8e8c7146d190dfe26c1a24f92d21cd34858": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - } - ], - "nullable": [ - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT number from miniblocks where timestamp > $1 ORDER BY number ASC LIMIT 1" - }, "157fc4ef4f5fd831399219850bc59ec0bd32d938ec8685dacaf913efdccfe7fe": { "describe": { "columns": [ @@ -3861,33 +3841,6 @@ }, "query": "UPDATE storage SET value = u.value FROM UNNEST($1::bytea[], $2::bytea[]) AS u(key, value) WHERE u.key = hashed_key" }, - "400bb5f012b95f5b327a65bf8a55e61a9e41a8040f546d75b9b8aa6be45e78d5": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Int4", - "Int8" - ] - } - }, - "query": "SELECT number, hash FROM miniblocks WHERE protocol_version = $1 ORDER BY number DESC LIMIT $2" - }, "4029dd84cde963ed8541426a659b10ccdbacbf4392664e34bfc29737aa630b28": { "describe": { "columns": [], @@ -9378,19 +9331,6 @@ }, "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET status ='failed', error= $1, updated_at = now()\n WHERE id = $2\n " }, - "b14d9a82e6b0a4174dde61642d3abc001cd8cb80d988eb81a685255e3ce920de": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8Array", - "ByteaArray" - ] - } - }, - "query": "UPDATE miniblocks SET hash = u.hash FROM UNNEST($1::bigint[], $2::bytea[]) AS u(number, hash) WHERE miniblocks.number = u.number\n " - }, "b250f4cb646081c8c0296a286d3fd921a1aefb310951a1ea25ec0fc533ed32ab": { "describe": { "columns": [ @@ -10850,34 +10790,6 @@ }, "query": "INSERT INTO contract_verification_requests ( contract_address, source_code, contract_name, zk_compiler_version, compiler_version, optimization_used, optimizer_mode, constructor_arguments, is_system, status, created_at, updated_at )\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, 'queued', now(), now()) RETURNING id" }, - "e409b39a5e62a3a4ec5d3b6aae4935c13b93129a22ffe6a0b68b5ade1f6082c8": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int8" - ] - } - }, - "query": "SELECT number, hash FROM miniblocks WHERE number >= $1 and protocol_version = $2 ORDER BY number LIMIT $3" - }, "e429061bd0f67910ad8676a34f2b89a051a6df3097c8afde81a491c342a10e3a": { "describe": { "columns": [ diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index cd894212c3c..ca5018ae51e 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -36,75 +36,6 @@ impl BlocksDal<'_, '_> { Ok(count == 0) } - pub async fn get_miniblock_hashes_from_date( - &mut self, - timestamp: u64, - limit: u32, - version: ProtocolVersionId, - ) -> sqlx::Result> { - let number = sqlx::query!( - "SELECT number from miniblocks where timestamp > $1 ORDER BY number ASC LIMIT 1", - timestamp as i64 - ) - .fetch_one(self.storage.conn()) - .await? - .number; - self.storage - .blocks_dal() - .get_miniblocks_since_block(number, limit, version) - .await - } - - pub async fn get_last_miniblocks_for_version( - &mut self, - limit: u32, - version: ProtocolVersionId, - ) -> sqlx::Result> { - let minibloks = sqlx::query!( - "SELECT number, hash FROM miniblocks WHERE protocol_version = $1 ORDER BY number DESC LIMIT $2", - version as i32, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await? - .iter() - .map(|block| { - ( - MiniblockNumber(block.number as u32), - H256::from_slice(&block.hash), - ) - }) - .collect(); - - Ok(minibloks) - } - - pub async fn get_miniblocks_since_block( - &mut self, - number: i64, - limit: u32, - version: ProtocolVersionId, - ) -> sqlx::Result> { - let minibloks = sqlx::query!( - "SELECT number, hash FROM miniblocks WHERE number >= $1 and protocol_version = $2 ORDER BY number LIMIT $3", - number, - version as i32, - limit as i32 - ) - .fetch_all(self.storage.conn()) - .await? - .iter() - .map(|block| { - ( - MiniblockNumber(block.number as u32), - H256::from_slice(&block.hash), - ) - }) - .collect(); - - Ok(minibloks) - } - pub async fn get_sealed_l1_batch_number(&mut self) -> anyhow::Result { let number = sqlx::query!( "SELECT MAX(number) as \"number\" FROM l1_batches WHERE is_finished = TRUE" @@ -465,7 +396,6 @@ impl BlocksDal<'_, '_> { .await?; Ok(()) } - /// Sets consensus-related fields for the specified miniblock. pub async fn set_miniblock_consensus_fields( &mut self, @@ -487,30 +417,6 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn update_hashes( - &mut self, - number_and_hashes: &[(MiniblockNumber, H256)], - ) -> sqlx::Result<()> { - let mut numbers = vec![]; - let mut hashes = vec![]; - for (number, hash) in number_and_hashes { - numbers.push(number.0 as i64); - hashes.push(hash.as_bytes().to_vec()); - } - - sqlx::query!( - "UPDATE miniblocks SET hash = u.hash \ - FROM UNNEST($1::bigint[], $2::bytea[]) AS u(number, hash) \ - WHERE miniblocks.number = u.number - ", - &numbers, - &hashes - ) - .execute(self.storage.conn()) - .await?; - Ok(()) - } - pub async fn get_last_sealed_miniblock_header( &mut self, ) -> sqlx::Result> { diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index e7b49fedddc..3742c92fca1 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -11,9 +11,9 @@ use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::ConnectionPool; use zksync_types::{ - block::legacy_miniblock_hash, ethabi::Address, l1::L1Tx, l2::L2Tx, - protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, L1BatchNumber, - L1BlockNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, + ethabi::Address, l1::L1Tx, l2::L2Tx, protocol_version::ProtocolUpgradeTx, + witness_block_state::WitnessBlockState, L1BatchNumber, L1BlockNumber, L2ChainId, + MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, }; use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; @@ -106,64 +106,6 @@ impl ExternalIO { } } - pub async fn recalculate_miniblock_hashes(&self) { - let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let last_blocks: Vec<_> = storage - .blocks_dal() - .get_last_miniblocks_for_version(5, ProtocolVersionId::Version12) - .await - .unwrap(); - - // All last miniblocks are good that means we have already applied this migrations - if last_blocks - .into_iter() - .all(|(number, hash)| legacy_miniblock_hash(number) == hash) - { - return; - } - - // August 29 2023 - let timestamp = 1693267200; - let mut miniblock_and_hashes = storage - .blocks_dal() - .get_miniblock_hashes_from_date(timestamp, 1000, ProtocolVersionId::Version12) - .await - .unwrap(); - - let mut updated_hashes = vec![]; - - let mut last_miniblock_number = 0; - while !miniblock_and_hashes.is_empty() { - for (number, hash) in miniblock_and_hashes { - if hash != legacy_miniblock_hash(number) { - updated_hashes.push((number, legacy_miniblock_hash(number))) - } - last_miniblock_number = number.0; - } - if !updated_hashes.is_empty() { - storage - .blocks_dal() - .update_hashes(&updated_hashes) - .await - .unwrap(); - updated_hashes = vec![]; - } - - miniblock_and_hashes = storage - .blocks_dal() - .get_miniblocks_since_block( - last_miniblock_number as i64 + 1, - 1000, - ProtocolVersionId::Version12, - ) - .await - .unwrap(); - tracing::info!("Last updated miniblock {}", last_miniblock_number); - } - - tracing::info!("Finish the hash recalculation") - } - async fn load_previous_l1_batch_hash(&self) -> U256 { let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); From e59a7c6552a9c99e56f0d37103386acac6a9c1b5 Mon Sep 17 00:00:00 2001 From: Roman Brodetski Date: Thu, 23 Nov 2023 12:01:08 +0300 Subject: [PATCH 035/115] feat(state-keeper): Remove computational gas limit from boojum protocol version (#536) **What** Removes `ComputationalGasCriterion` from post-Boojum versions **Why** * There is no limit on total number of circuits in post-Boojum * We want to test proving logic when we spend a lot of comp. gas in one l1 batch (this will result in tens of thousands of prover jobs - we want to test the system under that circumstances) Safety: we do need to properly test this on stage before we upgrade mainnet. We should also discuss if want to introduce _some_ limit - even though there is no hard limit --- .../criteria/geometry_seal_criteria.rs | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs index 1ec0c66e4d7..a102796d12a 100644 --- a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs @@ -119,15 +119,20 @@ impl MetricExtractor for MaxCyclesCriterion { impl MetricExtractor for ComputationalGasCriterion { const PROM_METRIC_CRITERION_NAME: &'static str = "computational_gas"; - fn limit_per_block(_protocol_version_id: ProtocolVersionId) -> usize { - // We subtract constant to take into account that circuits may be not fully filled. - // This constant should be greater than number of circuits types - // but we keep it larger to be on the safe side. - const MARGIN_NUMBER_OF_CIRCUITS: usize = 100; - const MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS: usize = - SCHEDULER_UPPER_BOUND as usize - MARGIN_NUMBER_OF_CIRCUITS; - - MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS * ERGS_PER_CIRCUIT as usize + fn limit_per_block(protocol_version_id: ProtocolVersionId) -> usize { + if protocol_version_id.is_pre_boojum() { + // We subtract constant to take into account that circuits may be not fully filled. + // This constant should be greater than number of circuits types + // but we keep it larger to be on the safe side. + const MARGIN_NUMBER_OF_CIRCUITS: usize = 100; + const MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS: usize = + SCHEDULER_UPPER_BOUND as usize - MARGIN_NUMBER_OF_CIRCUITS; + + MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS * ERGS_PER_CIRCUIT as usize + } else { + // In boojum there is no limit for computational gas. + usize::MAX + } } fn extract(metrics: &ExecutionMetrics, _writes: &DeduplicatedWritesMetrics) -> usize { From a49b61d7769f9dd7b4cbc4905f8f8a23abfb541c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 23 Nov 2023 11:21:01 +0200 Subject: [PATCH 036/115] feat(en): Implement gossip fetcher (#371) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ ...i.e., a fetcher component that would use gossip network instead of JSON-RPC API. Fixes BFT-326 and BFT-368. ## Why ❔ This can be used by external nodes to sync with the main node. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 33 +- core/bin/external_node/src/main.rs | 7 +- core/lib/types/Cargo.toml | 6 +- core/lib/zksync_core/Cargo.toml | 12 +- core/lib/zksync_core/src/consensus/mod.rs | 8 +- core/lib/zksync_core/src/consensus/payload.rs | 39 +- core/lib/zksync_core/src/lib.rs | 1 + .../zksync_core/src/sync_layer/external_io.rs | 47 +- .../lib/zksync_core/src/sync_layer/fetcher.rs | 214 ++++---- .../src/sync_layer/gossip/buffered/mod.rs | 340 +++++++++++++ .../src/sync_layer/gossip/buffered/tests.rs | 287 +++++++++++ .../src/sync_layer/gossip/conversions.rs | 57 +++ .../src/sync_layer/gossip/metrics.rs | 29 ++ .../zksync_core/src/sync_layer/gossip/mod.rs | 93 ++++ .../src/sync_layer/gossip/storage/mod.rs | 219 +++++++++ .../src/sync_layer/gossip/storage/tests.rs | 127 +++++ .../src/sync_layer/gossip/tests.rs | 339 +++++++++++++ .../src/sync_layer/gossip/utils.rs | 48 ++ core/lib/zksync_core/src/sync_layer/mod.rs | 5 +- .../zksync_core/src/sync_layer/sync_action.rs | 35 +- core/lib/zksync_core/src/sync_layer/tests.rs | 225 ++++----- prover/Cargo.lock | 464 +++++++++++++++++- 22 files changed, 2364 insertions(+), 271 deletions(-) create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/conversions.rs create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/metrics.rs create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/mod.rs create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/tests.rs create mode 100644 core/lib/zksync_core/src/sync_layer/gossip/utils.rs diff --git a/Cargo.lock b/Cargo.lock index ff4520eeed5..2527bb55022 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3320,9 +3320,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown 0.14.2", @@ -4971,7 +4971,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.0.2", + "indexmap 2.1.0", ] [[package]] @@ -7363,7 +7363,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.2", + "indexmap 2.1.0", "toml_datetime", "winnow", ] @@ -8514,7 +8514,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "once_cell", @@ -8541,7 +8541,7 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "once_cell", @@ -8561,7 +8561,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "blst", @@ -8579,7 +8579,7 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "prost", @@ -8601,7 +8601,7 @@ dependencies = [ [[package]] name = "zksync_consensus_network" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "async-trait", @@ -8625,7 +8625,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "bit-vec", @@ -8644,7 +8644,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "async-trait", @@ -8661,7 +8661,7 @@ dependencies = [ [[package]] name = "zksync_consensus_sync_blocks" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "thiserror", @@ -8676,7 +8676,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "thiserror", "zksync_concurrency", @@ -9001,7 +9001,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "bit-vec", @@ -9012,9 +9012,6 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "tokio", - "tracing", - "tracing-subscriber", "zksync_concurrency", "zksync_protobuf_build", ] @@ -9022,7 +9019,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=bdf9ed0af965cc7fa32d6c46a35ea065779ede8b#bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" dependencies = [ "anyhow", "heck 0.4.1", diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 4e12315d930..52f3353dc07 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -24,9 +24,8 @@ use zksync_core::{ setup_sigint_handler, state_keeper::{L1BatchExecutorBuilder, MainBatchExecutorBuilder, ZkSyncStateKeeper}, sync_layer::{ - batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, - fetcher::MainNodeFetcherCursor, genesis::perform_genesis_if_needed, ActionQueue, - MainNodeClient, SyncState, + batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, fetcher::FetcherCursor, + genesis::perform_genesis_if_needed, ActionQueue, MainNodeClient, SyncState, }, }; use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; @@ -128,7 +127,7 @@ async fn init_tasks( .await .context("failed to build a connection pool for `MainNodeFetcher`")?; let mut storage = pool.access_storage_tagged("sync_layer").await?; - MainNodeFetcherCursor::new(&mut storage) + FetcherCursor::new(&mut storage) .await .context("failed to load `MainNodeFetcher` cursor from Postgres")? }; diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 117cbdcec8d..6bf130bc70c 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -23,8 +23,8 @@ codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } zk_evm_1_4_0 = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0", package = "zk_evm" } zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } anyhow = "1.0.75" chrono = { version = "0.4", features = ["serde"] } @@ -55,4 +55,4 @@ tokio = { version = "1", features = ["rt", "macros"] } serde_with = { version = "1", features = ["hex"] } [build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index 6c8e43763fd..2bccff98ae9 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -40,11 +40,11 @@ vlog = { path = "../vlog" } multivm = { path = "../multivm" } # Consensus dependenices -zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } -zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } prost = "0.12.1" serde = { version = "1.0", features = ["derive"] } @@ -98,4 +98,4 @@ tempfile = "3.0.2" test-casing = "0.1.2" [build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "bdf9ed0af965cc7fa32d6c46a35ea065779ede8b" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } diff --git a/core/lib/zksync_core/src/consensus/mod.rs b/core/lib/zksync_core/src/consensus/mod.rs index 08a02e1dd2a..a229666e76c 100644 --- a/core/lib/zksync_core/src/consensus/mod.rs +++ b/core/lib/zksync_core/src/consensus/mod.rs @@ -1,10 +1,6 @@ -use anyhow::Context as _; -use zksync_concurrency::{ctx, time}; -use zksync_consensus_roles::validator; -use zksync_types::block::ConsensusBlockFields; -use zksync_types::{Address, MiniblockNumber}; +//! Consensus-related functionality. mod payload; mod proto; -pub(crate) use payload::Payload; +pub(crate) use self::payload::Payload; diff --git a/core/lib/zksync_core/src/consensus/payload.rs b/core/lib/zksync_core/src/consensus/payload.rs index 818d63d7414..8d53fdf21f3 100644 --- a/core/lib/zksync_core/src/consensus/payload.rs +++ b/core/lib/zksync_core/src/consensus/payload.rs @@ -1,9 +1,12 @@ use anyhow::Context as _; + use zksync_consensus_roles::validator; use zksync_protobuf::{required, ProtoFmt}; use zksync_types::api::en::SyncBlock; use zksync_types::{Address, L1BatchNumber, Transaction, H256}; +/// L2 block (= miniblock) payload. +#[derive(Debug)] pub(crate) struct Payload { pub hash: H256, pub l1_batch_number: L1BatchNumber, @@ -17,28 +20,31 @@ pub(crate) struct Payload { impl ProtoFmt for Payload { type Proto = super::proto::Payload; - fn read(r: &Self::Proto) -> anyhow::Result { - let mut transactions = vec![]; - for (i, t) in r.transactions.iter().enumerate() { + + fn read(message: &Self::Proto) -> anyhow::Result { + let mut transactions = Vec::with_capacity(message.transactions.len()); + for (i, tx) in message.transactions.iter().enumerate() { transactions.push( - required(&t.json) - .and_then(|s| Ok(serde_json::from_str(&*s)?)) + required(&tx.json) + .and_then(|json_str| Ok(serde_json::from_str(json_str)?)) .with_context(|| format!("transaction[{i}]"))?, ); } + Ok(Self { - hash: required(&r.hash) - .and_then(|h| Ok(<[u8; 32]>::try_from(h.as_slice())?.into())) + hash: required(&message.hash) + .and_then(|bytes| Ok(<[u8; 32]>::try_from(bytes.as_slice())?.into())) .context("hash")?, l1_batch_number: L1BatchNumber( - *required(&r.l1_batch_number).context("l1_batch_number")?, + *required(&message.l1_batch_number).context("l1_batch_number")?, ), - timestamp: *required(&r.timestamp).context("timestamp")?, - l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, - l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, - virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, - operator_address: required(&r.operator_address) - .and_then(|a| Ok(<[u8; 20]>::try_from(a.as_slice())?.into())) + timestamp: *required(&message.timestamp).context("timestamp")?, + l1_gas_price: *required(&message.l1_gas_price).context("l1_gas_price")?, + l2_fair_gas_price: *required(&message.l2_fair_gas_price) + .context("l2_fair_gas_price")?, + virtual_blocks: *required(&message.virtual_blocks).context("virtual_blocks")?, + operator_address: required(&message.operator_address) + .and_then(|bytes| Ok(<[u8; 20]>::try_from(bytes.as_slice())?.into())) .context("operator_address")?, transactions, }) @@ -67,6 +73,7 @@ impl ProtoFmt for Payload { impl TryFrom for Payload { type Error = anyhow::Error; + fn try_from(block: SyncBlock) -> anyhow::Result { Ok(Self { hash: block.hash.unwrap_or_default(), @@ -82,8 +89,8 @@ impl TryFrom for Payload { } impl Payload { - pub fn decode(p: &validator::Payload) -> anyhow::Result { - zksync_protobuf::decode(&p.0) + pub fn decode(payload: &validator::Payload) -> anyhow::Result { + zksync_protobuf::decode(&payload.0) } pub fn encode(&self) -> validator::Payload { diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 94d050f36ad..d52fd76661f 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -45,6 +45,7 @@ use zksync_verification_key_server::get_cached_commitments; pub mod api_server; pub mod basic_witness_input_producer; pub mod block_reverter; +mod consensus; pub mod consistency_checker; pub mod data_fetchers; pub mod eth_sender; diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 3742c92fca1..dcc38334a99 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -215,7 +215,10 @@ impl IoSealCriteria for ExternalIO { } fn should_seal_miniblock(&mut self, _manager: &UpdatesManager) -> bool { - matches!(self.actions.peek_action(), Some(SyncAction::SealMiniblock)) + matches!( + self.actions.peek_action(), + Some(SyncAction::SealMiniblock(_)) + ) } } @@ -368,7 +371,7 @@ impl StateKeeperIO for ExternalIO { virtual_blocks, }); } - Some(SyncAction::SealBatch { virtual_blocks }) => { + Some(SyncAction::SealBatch { virtual_blocks, .. }) => { // We've reached the next batch, so this situation would be handled by the batch sealer. // No need to pop the action from the queue. // It also doesn't matter which timestamp we return, since there will be no more miniblocks in this @@ -434,12 +437,9 @@ impl StateKeeperIO for ExternalIO { } async fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { - match self.actions.pop_action() { - Some(SyncAction::SealMiniblock) => {} - other => panic!( - "State keeper requested to seal miniblock, but the next action is {:?}", - other - ), + let action = self.actions.pop_action(); + let Some(SyncAction::SealMiniblock(consensus)) = action else { + panic!("State keeper requested to seal miniblock, but the next action is {action:?}"); }; let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); @@ -481,6 +481,16 @@ impl StateKeeperIO for ExternalIO { self.l2_erc20_bridge_addr, ); command.seal(&mut transaction).await; + + // We want to add miniblock consensus fields atomically with the miniblock data so that we + // don't need to deal with corner cases (e.g., a miniblock w/o consensus fields). + if let Some(consensus) = &consensus { + transaction + .blocks_dal() + .set_miniblock_consensus_fields(self.current_miniblock_number, consensus) + .await + .unwrap(); + } transaction.commit().await.unwrap(); self.sync_state @@ -497,23 +507,32 @@ impl StateKeeperIO for ExternalIO { l1_batch_env: &L1BatchEnv, finished_batch: FinishedL1Batch, ) -> anyhow::Result<()> { - match self.actions.pop_action() { - Some(SyncAction::SealBatch { .. }) => {} - other => anyhow::bail!( - "State keeper requested to seal the batch, but the next action is {other:?}" - ), + let action = self.actions.pop_action(); + let Some(SyncAction::SealBatch { consensus, .. }) = action else { + anyhow::bail!( + "State keeper requested to seal the batch, but the next action is {action:?}" + ); }; let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); + let mut transaction = storage.start_transaction().await.unwrap(); updates_manager .seal_l1_batch( - &mut storage, + &mut transaction, self.current_miniblock_number, l1_batch_env, finished_batch, self.l2_erc20_bridge_addr, ) .await; + if let Some(consensus) = &consensus { + transaction + .blocks_dal() + .set_miniblock_consensus_fields(self.current_miniblock_number, consensus) + .await + .unwrap(); + } + transaction.commit().await.unwrap(); tracing::info!("Batch {} is sealed", self.current_l1_batch_number); diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 02d8d3b1137..4aabd163f21 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -4,7 +4,10 @@ use tokio::sync::watch; use std::time::Duration; use zksync_dal::StorageProcessor; -use zksync_types::{L1BatchNumber, MiniblockNumber, H256}; +use zksync_types::{ + api::en::SyncBlock, block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, + ProtocolVersionId, H256, +}; use zksync_web3_decl::jsonrpsee::core::Error as RpcError; use super::{ @@ -18,27 +21,67 @@ use crate::metrics::{TxStage, APP_METRICS}; const DELAY_INTERVAL: Duration = Duration::from_millis(500); const RETRY_DELAY_INTERVAL: Duration = Duration::from_secs(5); +/// Common denominator for blocks fetched by an external node. +#[derive(Debug)] +pub(super) struct FetchedBlock { + pub number: MiniblockNumber, + pub l1_batch_number: L1BatchNumber, + pub last_in_batch: bool, + pub protocol_version: ProtocolVersionId, + pub timestamp: u64, + pub hash: H256, + pub l1_gas_price: u64, + pub l2_fair_gas_price: u64, + pub virtual_blocks: u32, + pub operator_address: Address, + pub transactions: Vec, + pub consensus: Option, +} + +impl FetchedBlock { + fn from_sync_block(block: SyncBlock) -> Self { + Self { + number: block.number, + l1_batch_number: block.l1_batch_number, + last_in_batch: block.last_in_batch, + protocol_version: block.protocol_version, + timestamp: block.timestamp, + hash: block.hash.unwrap_or_default(), + l1_gas_price: block.l1_gas_price, + l2_fair_gas_price: block.l2_fair_gas_price, + virtual_blocks: block.virtual_blocks.unwrap_or(0), + operator_address: block.operator_address, + transactions: block + .transactions + .expect("Transactions are always requested"), + consensus: block.consensus, + } + } +} + /// Cursor of [`MainNodeFetcher`]. #[derive(Debug)] -pub struct MainNodeFetcherCursor { +pub struct FetcherCursor { // Fields are public for testing purposes. - pub(super) miniblock: MiniblockNumber, + pub(super) next_miniblock: MiniblockNumber, + pub(super) prev_miniblock_hash: H256, pub(super) l1_batch: L1BatchNumber, } -impl MainNodeFetcherCursor { - /// Loads the cursor +impl FetcherCursor { + /// Loads the cursor from Postgres. pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { let last_sealed_l1_batch_header = storage .blocks_dal() .get_newest_l1_batch_header() .await .context("Failed getting newest L1 batch header")?; - let last_miniblock_number = storage + let last_miniblock_header = storage .blocks_dal() - .get_sealed_miniblock_number() + .get_last_sealed_miniblock_header() .await - .context("Failed getting sealed miniblock number")?; + .context("Failed getting sealed miniblock header")? + .context("No miniblocks sealed")?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. @@ -49,7 +92,8 @@ impl MainNodeFetcherCursor { .context("Failed checking whether pending L1 batch exists")?; // Miniblocks are always fully processed. - let miniblock = last_miniblock_number + 1; + let next_miniblock = last_miniblock_header.number + 1; + let prev_miniblock_hash = last_miniblock_header.hash; // Decide whether the next batch should be explicitly opened or not. let l1_batch = if was_new_batch_open { // No `OpenBatch` action needed. @@ -60,11 +104,75 @@ impl MainNodeFetcherCursor { }; Ok(Self { - miniblock, + next_miniblock, l1_batch, + prev_miniblock_hash, }) } + pub(super) fn advance(&mut self, block: FetchedBlock) -> Vec { + assert_eq!(block.number, self.next_miniblock); + + let mut new_actions = Vec::new(); + if block.l1_batch_number != self.l1_batch { + assert_eq!( + block.l1_batch_number, + self.l1_batch.next(), + "Unexpected batch number in the next received miniblock" + ); + + tracing::info!( + "New L1 batch: {}. Timestamp: {}", + block.l1_batch_number, + block.timestamp + ); + + new_actions.push(SyncAction::OpenBatch { + number: block.l1_batch_number, + timestamp: block.timestamp, + l1_gas_price: block.l1_gas_price, + l2_fair_gas_price: block.l2_fair_gas_price, + operator_address: block.operator_address, + protocol_version: block.protocol_version, + // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. + first_miniblock_info: (block.number, block.virtual_blocks), + prev_miniblock_hash: self.prev_miniblock_hash, + }); + FETCHER_METRICS.l1_batch[&L1BatchStage::Open].set(block.l1_batch_number.0.into()); + self.l1_batch += 1; + } else { + // New batch implicitly means a new miniblock, so we only need to push the miniblock action + // if it's not a new batch. + new_actions.push(SyncAction::Miniblock { + number: block.number, + timestamp: block.timestamp, + // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. + virtual_blocks: block.virtual_blocks, + }); + FETCHER_METRICS.miniblock.set(block.number.0.into()); + } + + APP_METRICS.processed_txs[&TxStage::added_to_mempool()] + .inc_by(block.transactions.len() as u64); + new_actions.extend(block.transactions.into_iter().map(SyncAction::from)); + + // Last miniblock of the batch is a "fictive" miniblock and would be replicated locally. + // We don't need to seal it explicitly, so we only put the seal miniblock command if it's not the last miniblock. + if block.last_in_batch { + new_actions.push(SyncAction::SealBatch { + // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. + virtual_blocks: block.virtual_blocks, + consensus: block.consensus, + }); + } else { + new_actions.push(SyncAction::SealMiniblock(block.consensus)); + } + self.next_miniblock += 1; + self.prev_miniblock_hash = block.hash; + + new_actions + } + /// Builds a fetcher from this cursor. pub fn into_fetcher( self, @@ -87,7 +195,7 @@ impl MainNodeFetcherCursor { #[derive(Debug)] pub struct MainNodeFetcher { client: CachingMainNodeClient, - cursor: MainNodeFetcherCursor, + cursor: FetcherCursor, actions: ActionQueueSender, sync_state: SyncState, stop_receiver: watch::Receiver, @@ -97,7 +205,7 @@ impl MainNodeFetcher { pub async fn run(mut self) -> anyhow::Result<()> { tracing::info!( "Starting the fetcher routine. Initial miniblock: {}, initial l1 batch: {}", - self.cursor.miniblock, + self.cursor.next_miniblock, self.cursor.l1_batch ); // Run the main routine and reconnect upon the network errors. @@ -137,7 +245,7 @@ impl MainNodeFetcher { self.sync_state.set_main_node_block(last_main_node_block); self.client - .populate_miniblocks_cache(self.cursor.miniblock, last_main_node_block) + .populate_miniblocks_cache(self.cursor.next_miniblock, last_main_node_block) .await; let has_action_capacity = self.actions.has_action_capacity(); if has_action_capacity { @@ -162,84 +270,26 @@ impl MainNodeFetcher { async fn fetch_next_miniblock(&mut self) -> anyhow::Result { let total_latency = FETCHER_METRICS.fetch_next_miniblock.start(); let request_latency = FETCHER_METRICS.requests[&FetchStage::SyncL2Block].start(); - let Some(block) = self.client.fetch_l2_block(self.cursor.miniblock).await? else { - return Ok(false); - }; - - // This will be fetched from cache. - let prev_block = self + let Some(block) = self .client - .fetch_l2_block(self.cursor.miniblock - 1) + .fetch_l2_block(self.cursor.next_miniblock) .await? - .expect("Previous block must exist"); + else { + return Ok(false); + }; request_latency.observe(); - let mut new_actions = Vec::new(); - if block.l1_batch_number != self.cursor.l1_batch { - assert_eq!( - block.l1_batch_number, - self.cursor.l1_batch.next(), - "Unexpected batch number in the next received miniblock" - ); - - tracing::info!( - "New batch: {}. Timestamp: {}", - block.l1_batch_number, - block.timestamp - ); - - new_actions.push(SyncAction::OpenBatch { - number: block.l1_batch_number, - timestamp: block.timestamp, - l1_gas_price: block.l1_gas_price, - l2_fair_gas_price: block.l2_fair_gas_price, - operator_address: block.operator_address, - protocol_version: block.protocol_version, - // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. - first_miniblock_info: (block.number, block.virtual_blocks.unwrap_or(0)), - // Same for `prev_block.hash` as above. - prev_miniblock_hash: prev_block.hash.unwrap_or_else(H256::zero), - }); - FETCHER_METRICS.l1_batch[&L1BatchStage::Open].set(block.l1_batch_number.0.into()); - self.cursor.l1_batch += 1; - } else { - // New batch implicitly means a new miniblock, so we only need to push the miniblock action - // if it's not a new batch. - new_actions.push(SyncAction::Miniblock { - number: block.number, - timestamp: block.timestamp, - // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. - virtual_blocks: block.virtual_blocks.unwrap_or(0), - }); - FETCHER_METRICS.miniblock.set(block.number.0.into()); - } - - let txs: Vec = block - .transactions - .expect("Transactions are always requested"); - APP_METRICS.processed_txs[&TxStage::added_to_mempool()].inc_by(txs.len() as u64); - new_actions.extend(txs.into_iter().map(SyncAction::from)); - - // Last miniblock of the batch is a "fictive" miniblock and would be replicated locally. - // We don't need to seal it explicitly, so we only put the seal miniblock command if it's not the last miniblock. - if block.last_in_batch { - new_actions.push(SyncAction::SealBatch { - // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. - virtual_blocks: block.virtual_blocks.unwrap_or(0), - }); - } else { - new_actions.push(SyncAction::SealMiniblock); - } + let block_number = block.number; + let fetched_block = FetchedBlock::from_sync_block(block); + let new_actions = self.cursor.advance(fetched_block); tracing::info!( - "New miniblock: {} / {}", - block.number, - self.sync_state.get_main_node_block().max(block.number) + "New miniblock: {block_number} / {}", + self.sync_state.get_main_node_block().max(block_number) ); // Forgetting only the previous one because we still need the current one in cache for the next iteration. - let prev_miniblock_number = MiniblockNumber(self.cursor.miniblock.0.saturating_sub(1)); + let prev_miniblock_number = MiniblockNumber(block_number.0.saturating_sub(1)); self.client.forget_miniblock(prev_miniblock_number); - self.cursor.miniblock += 1; self.actions.push_actions(new_actions).await; total_latency.observe(); diff --git a/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs new file mode 100644 index 00000000000..41ca50e1cf2 --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs @@ -0,0 +1,340 @@ +//! Buffered [`BlockStore`] implementation. + +use async_trait::async_trait; + +use std::{collections::BTreeMap, ops, time::Instant}; + +#[cfg(test)] +use zksync_concurrency::ctx::channel; +use zksync_concurrency::{ + ctx, scope, + sync::{self, watch, Mutex}, +}; +use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; +use zksync_consensus_storage::{BlockStore, StorageError, StorageResult, WriteBlockStore}; + +#[cfg(test)] +mod tests; + +use super::{ + metrics::{BlockResponseKind, METRICS}, + utils::MissingBlockNumbers, +}; + +/// [`BlockStore`] variation that upholds additional invariants as to how blocks are processed. +/// +/// The invariants are as follows: +/// +/// - Stored blocks always have contiguous numbers; there are no gaps. +/// - Blocks can be scheduled to be added using [`Self::schedule_next_block()`] only. New blocks do not +/// appear in the store otherwise. +#[async_trait] +pub(super) trait ContiguousBlockStore: BlockStore { + /// Schedules a block to be added to the store. Unlike [`WriteBlockStore::put_block()`], + /// there is no expectation that the block is added to the store *immediately*. It's + /// expected that it will be added to the store eventually, which will be signaled via + /// a subscriber returned from [`BlockStore::subscribe_to_block_writes()`]. + /// + /// [`Buffered`] guarantees that this method will only ever be called: + /// + /// - with the next block (i.e., one immediately after [`BlockStore::head_block()`]) + /// - sequentially (i.e., multiple blocks cannot be scheduled at once) + async fn schedule_next_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> StorageResult<()>; +} + +/// In-memory buffer or [`FinalBlock`]s received from peers, but not executed and persisted locally yet. +/// +/// Unlike with executed / persisted blocks, there may be gaps between blocks in the buffer. +/// These blocks are shared with peers using the gossip network, but are not persisted and lost +/// on the node restart. +#[derive(Debug)] +struct BlockBuffer { + store_block_number: BlockNumber, + blocks: BTreeMap, +} + +impl BlockBuffer { + fn new(store_block_number: BlockNumber) -> Self { + Self { + store_block_number, + blocks: BTreeMap::new(), + } + } + + fn head_block(&self) -> Option { + self.blocks.values().next_back().cloned() + } + + #[tracing::instrument(level = "trace", skip(self))] + fn set_store_block(&mut self, store_block_number: BlockNumber) { + assert!( + store_block_number > self.store_block_number, + "`ContiguousBlockStore` invariant broken: unexpected new head block number" + ); + + self.store_block_number = store_block_number; + let old_len = self.blocks.len(); + self.blocks = self.blocks.split_off(&store_block_number.next()); + // ^ Removes all entries up to and including `store_block_number` + tracing::debug!("Removed {} blocks from buffer", old_len - self.blocks.len()); + METRICS.buffer_size.set(self.blocks.len()); + } + + fn last_contiguous_block_number(&self) -> BlockNumber { + // By design, blocks in the underlying store are always contiguous. + let mut last_number = self.store_block_number; + for &number in self.blocks.keys() { + if number > last_number.next() { + return last_number; + } + last_number = number; + } + last_number + } + + fn missing_block_numbers(&self, mut range: ops::Range) -> Vec { + // Clamp the range start so we don't produce extra missing blocks. + range.start = range.start.max(self.store_block_number.next()); + if range.is_empty() { + return vec![]; // Return early to not trigger panic in `BTreeMap::range()` + } + + let keys = self.blocks.range(range.clone()).map(|(&num, _)| num); + MissingBlockNumbers::new(range, keys).collect() + } + + fn put_block(&mut self, block: FinalBlock) { + let block_number = block.header.number; + assert!(block_number > self.store_block_number); + // ^ Must be checked previously + self.blocks.insert(block_number, block); + tracing::debug!(%block_number, "Inserted block in buffer"); + METRICS.buffer_size.set(self.blocks.len()); + } +} + +/// Events emitted by [`Buffered`] storage. +#[cfg(test)] +#[derive(Debug)] +pub(super) enum BufferedStorageEvent { + /// Update was received from the underlying storage. + UpdateReceived(BlockNumber), +} + +/// [`BlockStore`] with an in-memory buffer for pending blocks. +/// +/// # Data flow +/// +/// The store is plugged into the `SyncBlocks` actor, so that it can receive new blocks +/// from peers over the gossip network and to share blocks with peers. Received blocks are stored +/// in a [`BlockBuffer`]. The `SyncBlocks` actor doesn't guarantee that blocks are received in order, +/// so we have a background task that waits for successive blocks and feeds them to +/// the underlying storage ([`ContiguousBlockStore`]). The underlying storage executes and persists +/// blocks using the state keeper; see [`PostgresBlockStorage`](super::PostgresBlockStorage) for more details. +/// This logic is largely shared with the old syncing logic using JSON-RPC; the only differing part +/// is producing block data. +/// +/// Once a block is processed and persisted by the state keeper, it can be removed from the [`BlockBuffer`]; +/// we do this in another background task. Removing blocks from the buffer ensures that it doesn't +/// grow infinitely; it also allows to track syncing progress via metrics. +#[derive(Debug)] +pub(super) struct Buffered { + inner: T, + inner_subscriber: watch::Receiver, + block_writes_sender: watch::Sender, + buffer: Mutex, + #[cfg(test)] + events_sender: channel::UnboundedSender, +} + +impl Buffered { + /// Creates a new buffered storage. The buffer is initially empty. + pub fn new(store: T) -> Self { + let inner_subscriber = store.subscribe_to_block_writes(); + let store_block_number = *inner_subscriber.borrow(); + tracing::debug!( + store_block_number = store_block_number.0, + "Initialized buffer storage" + ); + Self { + inner: store, + inner_subscriber, + block_writes_sender: watch::channel(store_block_number).0, + buffer: Mutex::new(BlockBuffer::new(store_block_number)), + #[cfg(test)] + events_sender: channel::unbounded().0, + } + } + + #[cfg(test)] + fn set_events_sender(&mut self, sender: channel::UnboundedSender) { + self.events_sender = sender; + } + + pub(super) fn inner(&self) -> &T { + &self.inner + } + + #[cfg(test)] + async fn buffer_len(&self) -> usize { + self.buffer.lock().await.blocks.len() + } + + /// Listens to the updates in the underlying storage. + #[tracing::instrument(level = "trace", skip_all)] + async fn listen_to_updates(&self, ctx: &ctx::Ctx) { + let mut subscriber = self.inner_subscriber.clone(); + loop { + let store_block_number = { + let Ok(number) = sync::changed(ctx, &mut subscriber).await else { + return; // Do not propagate cancellation errors + }; + *number + }; + tracing::debug!( + store_block_number = store_block_number.0, + "Underlying block number updated" + ); + + let Ok(mut buffer) = sync::lock(ctx, &self.buffer).await else { + return; // Do not propagate cancellation errors + }; + buffer.set_store_block(store_block_number); + #[cfg(test)] + self.events_sender + .send(BufferedStorageEvent::UpdateReceived(store_block_number)); + } + } + + /// Schedules blocks in the underlying store as they are pushed to this store. + #[tracing::instrument(level = "trace", skip_all, err)] + async fn schedule_blocks(&self, ctx: &ctx::Ctx) -> StorageResult<()> { + let mut blocks_subscriber = self.block_writes_sender.subscribe(); + + let mut next_scheduled_block_number = { + let Ok(buffer) = sync::lock(ctx, &self.buffer).await else { + return Ok(()); // Do not propagate cancellation errors + }; + buffer.store_block_number.next() + }; + loop { + loop { + let block = match self.buffered_block(ctx, next_scheduled_block_number).await { + Err(ctx::Canceled) => return Ok(()), // Do not propagate cancellation errors + Ok(None) => break, + Ok(Some(block)) => block, + }; + self.inner.schedule_next_block(ctx, &block).await?; + next_scheduled_block_number = next_scheduled_block_number.next(); + } + // Wait until some more blocks are pushed into the buffer. + let Ok(number) = sync::changed(ctx, &mut blocks_subscriber).await else { + return Ok(()); // Do not propagate cancellation errors + }; + tracing::debug!(block_number = number.0, "Received new block"); + } + } + + async fn buffered_block( + &self, + ctx: &ctx::Ctx, + number: BlockNumber, + ) -> ctx::OrCanceled> { + Ok(sync::lock(ctx, &self.buffer) + .await? + .blocks + .get(&number) + .cloned()) + } + + /// Runs background tasks for this store. This method **must** be spawned as a background task + /// which should be running as long at the [`Buffered`] is in use; otherwise, it will function incorrectly. + pub async fn run_background_tasks(&self, ctx: &ctx::Ctx) -> StorageResult<()> { + scope::run!(ctx, |ctx, s| { + s.spawn(async { + self.listen_to_updates(ctx).await; + Ok(()) + }); + self.schedule_blocks(ctx) + }) + .await + } +} + +#[async_trait] +impl BlockStore for Buffered { + async fn head_block(&self, ctx: &ctx::Ctx) -> StorageResult { + let buffered_head_block = sync::lock(ctx, &self.buffer).await?.head_block(); + if let Some(block) = buffered_head_block { + return Ok(block); + } + self.inner.head_block(ctx).await + } + + async fn first_block(&self, ctx: &ctx::Ctx) -> StorageResult { + // First block is always situated in the underlying store + self.inner.first_block(ctx).await + } + + async fn last_contiguous_block_number(&self, ctx: &ctx::Ctx) -> StorageResult { + Ok(sync::lock(ctx, &self.buffer) + .await? + .last_contiguous_block_number()) + } + + async fn block( + &self, + ctx: &ctx::Ctx, + number: BlockNumber, + ) -> StorageResult> { + let started_at = Instant::now(); + { + let buffer = sync::lock(ctx, &self.buffer).await?; + if number > buffer.store_block_number { + let block = buffer.blocks.get(&number).cloned(); + METRICS.get_block_latency[&BlockResponseKind::InMemory] + .observe(started_at.elapsed()); + return Ok(block); + } + } + let block = self.inner.block(ctx, number).await?; + METRICS.get_block_latency[&BlockResponseKind::Persisted].observe(started_at.elapsed()); + Ok(block) + } + + async fn missing_block_numbers( + &self, + ctx: &ctx::Ctx, + range: ops::Range, + ) -> StorageResult> { + // By design, the underlying store has no missing blocks. + Ok(sync::lock(ctx, &self.buffer) + .await? + .missing_block_numbers(range)) + } + + fn subscribe_to_block_writes(&self) -> watch::Receiver { + self.block_writes_sender.subscribe() + } +} + +#[async_trait] +impl WriteBlockStore for Buffered { + async fn put_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> StorageResult<()> { + let buffer_block_latency = METRICS.buffer_block_latency.start(); + { + let mut buffer = sync::lock(ctx, &self.buffer).await?; + let block_number = block.header.number; + if block_number <= buffer.store_block_number { + let err = anyhow::anyhow!( + "Cannot replace a block #{block_number} since it is already present in the underlying storage", + ); + return Err(StorageError::Database(err)); + } + buffer.put_block(block.clone()); + } + self.block_writes_sender.send_replace(block.header.number); + buffer_block_latency.observe(); + Ok(()) + } +} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs new file mode 100644 index 00000000000..de5ef8a88cb --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs @@ -0,0 +1,287 @@ +//! Tests for buffered storage. + +use assert_matches::assert_matches; +use async_trait::async_trait; +use rand::{rngs::StdRng, seq::SliceRandom, Rng}; +use test_casing::test_casing; + +use std::{iter, ops}; + +use zksync_concurrency::{ + ctx::{self, channel}, + scope, + sync::{self, watch}, + time, +}; +use zksync_consensus_roles::validator::{BlockHeader, BlockNumber, FinalBlock, Payload}; +use zksync_consensus_storage::{BlockStore, InMemoryStorage, StorageResult, WriteBlockStore}; + +use super::*; + +fn init_store(rng: &mut impl Rng) -> (FinalBlock, InMemoryStorage) { + let payload = Payload(vec![]); + let genesis_block = FinalBlock { + header: BlockHeader::genesis(payload.hash()), + payload, + justification: rng.gen(), + }; + let block_store = InMemoryStorage::new(genesis_block.clone()); + (genesis_block, block_store) +} + +fn gen_blocks(rng: &mut impl Rng, genesis_block: FinalBlock, count: usize) -> Vec { + let blocks = iter::successors(Some(genesis_block), |parent| { + let payload = Payload(vec![]); + let header = BlockHeader { + parent: parent.header.hash(), + number: parent.header.number.next(), + payload: payload.hash(), + }; + Some(FinalBlock { + header, + payload, + justification: rng.gen(), + }) + }); + blocks.skip(1).take(count).collect() +} + +#[derive(Debug)] +struct MockContiguousStore { + inner: InMemoryStorage, + block_sender: channel::UnboundedSender, +} + +impl MockContiguousStore { + fn new(inner: InMemoryStorage) -> (Self, channel::UnboundedReceiver) { + let (block_sender, block_receiver) = channel::unbounded(); + let this = Self { + inner, + block_sender, + }; + (this, block_receiver) + } + + async fn run_updates( + &self, + ctx: &ctx::Ctx, + mut block_receiver: channel::UnboundedReceiver, + ) -> StorageResult<()> { + let rng = &mut ctx.rng(); + while let Ok(block) = block_receiver.recv(ctx).await { + let head_block_number = self.head_block(ctx).await?.header.number; + assert_eq!(block.header.number, head_block_number.next()); + + let sleep_duration = time::Duration::milliseconds(rng.gen_range(0..5)); + ctx.sleep(sleep_duration).await?; + self.inner.put_block(ctx, &block).await?; + } + Ok(()) + } +} + +#[async_trait] +impl BlockStore for MockContiguousStore { + async fn head_block(&self, ctx: &ctx::Ctx) -> StorageResult { + self.inner.head_block(ctx).await + } + + async fn first_block(&self, ctx: &ctx::Ctx) -> StorageResult { + self.inner.first_block(ctx).await + } + + async fn last_contiguous_block_number(&self, ctx: &ctx::Ctx) -> StorageResult { + self.inner.last_contiguous_block_number(ctx).await + } + + async fn block( + &self, + ctx: &ctx::Ctx, + number: BlockNumber, + ) -> StorageResult> { + self.inner.block(ctx, number).await + } + + async fn missing_block_numbers( + &self, + ctx: &ctx::Ctx, + range: ops::Range, + ) -> StorageResult> { + self.inner.missing_block_numbers(ctx, range).await + } + + fn subscribe_to_block_writes(&self) -> watch::Receiver { + self.inner.subscribe_to_block_writes() + } +} + +#[async_trait] +impl ContiguousBlockStore for MockContiguousStore { + async fn schedule_next_block(&self, _ctx: &ctx::Ctx, block: &FinalBlock) -> StorageResult<()> { + tracing::trace!(block_number = block.header.number.0, "Scheduled next block"); + self.block_sender.send(block.clone()); + Ok(()) + } +} + +#[tracing::instrument(level = "trace", skip(shuffle_blocks))] +async fn test_buffered_storage( + initial_block_count: usize, + block_count: usize, + block_interval: time::Duration, + shuffle_blocks: impl FnOnce(&mut StdRng, &mut [FinalBlock]), +) { + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + + let (genesis_block, block_store) = init_store(rng); + let mut initial_blocks = gen_blocks(rng, genesis_block.clone(), initial_block_count); + for block in &initial_blocks { + block_store.put_block(ctx, block).await.unwrap(); + } + initial_blocks.insert(0, genesis_block.clone()); + + let (block_store, block_receiver) = MockContiguousStore::new(block_store); + let mut buffered_store = Buffered::new(block_store); + let (events_sender, mut events_receiver) = channel::unbounded(); + buffered_store.set_events_sender(events_sender); + + // Check initial values returned by the store. + let last_initial_block = initial_blocks.last().unwrap().clone(); + assert_eq!( + buffered_store.head_block(ctx).await.unwrap(), + last_initial_block + ); + for block in &initial_blocks { + let block_result = buffered_store.block(ctx, block.header.number).await; + assert_eq!(block_result.unwrap().as_ref(), Some(block)); + } + let mut subscriber = buffered_store.subscribe_to_block_writes(); + assert_eq!( + *subscriber.borrow(), + BlockNumber(initial_block_count as u64) + ); + + let mut blocks = gen_blocks(rng, last_initial_block, block_count); + shuffle_blocks(rng, &mut blocks); + let last_block_number = BlockNumber((block_count + initial_block_count) as u64); + + scope::run!(ctx, |ctx, s| async { + s.spawn_bg(buffered_store.inner().run_updates(ctx, block_receiver)); + s.spawn_bg(buffered_store.run_background_tasks(ctx)); + + for (idx, block) in blocks.iter().enumerate() { + buffered_store.put_block(ctx, block).await?; + let new_block_number = *sync::changed(ctx, &mut subscriber).await?; + assert_eq!(new_block_number, block.header.number); + + // Check that all written blocks are immediately accessible. + for existing_block in initial_blocks.iter().chain(&blocks[0..=idx]) { + let number = existing_block.header.number; + assert_eq!( + buffered_store.block(ctx, number).await?.as_ref(), + Some(existing_block) + ); + } + assert_eq!(buffered_store.first_block(ctx).await?, genesis_block); + + let expected_head_block = blocks[0..=idx] + .iter() + .max_by_key(|block| block.header.number) + .unwrap(); + assert_eq!(buffered_store.head_block(ctx).await?, *expected_head_block); + + let expected_last_contiguous_block = blocks[(idx + 1)..] + .iter() + .map(|block| block.header.number) + .min() + .map_or(last_block_number, BlockNumber::prev); + assert_eq!( + buffered_store.last_contiguous_block_number(ctx).await?, + expected_last_contiguous_block + ); + + ctx.sleep(block_interval).await?; + } + + let mut inner_subscriber = buffered_store.inner().subscribe_to_block_writes(); + while buffered_store + .inner() + .last_contiguous_block_number(ctx) + .await? + < last_block_number + { + sync::changed(ctx, &mut inner_subscriber).await?; + } + + // Check events emitted by the buffered storage. This also ensures that all underlying storage + // updates are processed before proceeding to the following checks. + let expected_numbers = (initial_block_count as u64 + 1)..=last_block_number.0; + for expected_number in expected_numbers.map(BlockNumber) { + assert_matches!( + events_receiver.recv(ctx).await?, + BufferedStorageEvent::UpdateReceived(number) if number == expected_number + ); + } + + assert_eq!(buffered_store.buffer_len().await, 0); + Ok(()) + }) + .await + .unwrap(); +} + +// Choose intervals so that they are both smaller and larger than the sleep duration in +// `MockContiguousStore::run_updates()`. +const BLOCK_INTERVALS: [time::Duration; 4] = [ + time::Duration::ZERO, + time::Duration::milliseconds(3), + time::Duration::milliseconds(5), + time::Duration::milliseconds(10), +]; + +#[test_casing(4, BLOCK_INTERVALS)] +#[tokio::test] +async fn buffered_storage_with_sequential_blocks(block_interval: time::Duration) { + test_buffered_storage(0, 30, block_interval, |_, _| { + // Do not perform shuffling + }) + .await; +} + +#[test_casing(4, BLOCK_INTERVALS)] +#[tokio::test] +async fn buffered_storage_with_random_blocks(block_interval: time::Duration) { + test_buffered_storage(0, 30, block_interval, |rng, blocks| blocks.shuffle(rng)).await; +} + +#[test_casing(4, BLOCK_INTERVALS)] +#[tokio::test] +async fn buffered_storage_with_slightly_shuffled_blocks(block_interval: time::Duration) { + test_buffered_storage(0, 30, block_interval, |rng, blocks| { + for chunk in blocks.chunks_mut(4) { + chunk.shuffle(rng); + } + }) + .await; +} + +#[test_casing(4, BLOCK_INTERVALS)] +#[tokio::test] +async fn buffered_storage_with_initial_blocks(block_interval: time::Duration) { + test_buffered_storage(10, 20, block_interval, |_, _| { + // Do not perform shuffling + }) + .await; +} + +#[test_casing(4, BLOCK_INTERVALS)] +#[tokio::test] +async fn buffered_storage_with_initial_blocks_and_slight_shuffling(block_interval: time::Duration) { + test_buffered_storage(10, 20, block_interval, |rng, blocks| { + for chunk in blocks.chunks_mut(5) { + chunk.shuffle(rng); + } + }) + .await; +} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs new file mode 100644 index 00000000000..8face4e6942 --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs @@ -0,0 +1,57 @@ +//! Conversion logic between server and consensus types. + +use anyhow::Context as _; + +use zksync_consensus_roles::validator::{BlockHeader, BlockNumber, FinalBlock}; +use zksync_types::{ + api::en::SyncBlock, block::ConsensusBlockFields, MiniblockNumber, ProtocolVersionId, +}; + +use crate::{consensus, sync_layer::fetcher::FetchedBlock}; + +pub(super) fn sync_block_to_consensus_block(mut block: SyncBlock) -> anyhow::Result { + let number = BlockNumber(block.number.0.into()); + let consensus = block.consensus.take().context("Missing consensus fields")?; + let payload: consensus::Payload = block.try_into().context("Missing `SyncBlock` data")?; + let payload = payload.encode(); + let header = BlockHeader { + parent: consensus.parent, + number, + payload: payload.hash(), + }; + Ok(FinalBlock { + header, + payload, + justification: consensus.justification, + }) +} + +impl FetchedBlock { + pub(super) fn from_gossip_block( + block: &FinalBlock, + last_in_batch: bool, + ) -> anyhow::Result { + let number = u32::try_from(block.header.number.0) + .context("Integer overflow converting block number")?; + let payload = consensus::Payload::decode(&block.payload) + .context("Failed deserializing block payload")?; + + Ok(Self { + number: MiniblockNumber(number), + l1_batch_number: payload.l1_batch_number, + last_in_batch, + protocol_version: ProtocolVersionId::latest(), // FIXME + timestamp: payload.timestamp, + hash: payload.hash, + l1_gas_price: payload.l1_gas_price, + l2_fair_gas_price: payload.l2_fair_gas_price, + virtual_blocks: payload.virtual_blocks, + operator_address: payload.operator_address, + transactions: payload.transactions, + consensus: Some(ConsensusBlockFields { + parent: block.header.parent, + justification: block.justification.clone(), + }), + }) + } +} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs b/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs new file mode 100644 index 00000000000..f67c150b99c --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs @@ -0,0 +1,29 @@ +//! Metrics for gossip-powered syncing. + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; + +use std::time::Duration; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "kind", rename_all = "snake_case")] +pub(super) enum BlockResponseKind { + Persisted, + InMemory, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "external_node_gossip_fetcher")] +pub(super) struct GossipFetcherMetrics { + /// Number of currently buffered unexecuted blocks. + pub buffer_size: Gauge, + /// Latency of a `get_block` call. + #[metrics(unit = Unit::Seconds, buckets = Buckets::LATENCIES)] + pub get_block_latency: Family>, + /// Latency of putting a block into the buffered storage. This may include the time to queue + /// block actions, but does not include block execution. + #[metrics(unit = Unit::Seconds, buckets = Buckets::LATENCIES)] + pub buffer_block_latency: Histogram, +} + +#[vise::register] +pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs new file mode 100644 index 00000000000..630ded95345 --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs @@ -0,0 +1,93 @@ +//! Consensus adapter for EN synchronization logic. + +use anyhow::Context as _; +use tokio::sync::watch; + +use std::sync::Arc; + +use zksync_concurrency::{ctx, scope}; +use zksync_consensus_executor::{Executor, ExecutorConfig}; +use zksync_consensus_roles::node; +use zksync_dal::ConnectionPool; + +mod buffered; +mod conversions; +mod metrics; +mod storage; +#[cfg(test)] +mod tests; +mod utils; + +use self::{buffered::Buffered, storage::PostgresBlockStorage}; +use super::{fetcher::FetcherCursor, sync_action::ActionQueueSender}; + +/// Starts fetching L2 blocks using peer-to-peer gossip network. +pub async fn run_gossip_fetcher( + pool: ConnectionPool, + actions: ActionQueueSender, + executor_config: ExecutorConfig, + node_key: node::SecretKey, + mut stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + scope::run!(&ctx::root(), |ctx, s| async { + s.spawn_bg(run_gossip_fetcher_inner( + ctx, + pool, + actions, + executor_config, + node_key, + )); + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for gossip fetcher was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, gossip fetcher is shutting down"); + Ok(()) + }) + .await +} + +async fn run_gossip_fetcher_inner( + ctx: &ctx::Ctx, + pool: ConnectionPool, + actions: ActionQueueSender, + executor_config: ExecutorConfig, + node_key: node::SecretKey, +) -> anyhow::Result<()> { + tracing::info!( + "Starting gossip fetcher with {executor_config:?} and node key {:?}", + node_key.public() + ); + + let mut storage = pool + .access_storage_tagged("sync_layer") + .await + .context("Failed acquiring Postgres connection for cursor")?; + let cursor = FetcherCursor::new(&mut storage).await?; + drop(storage); + + let store = PostgresBlockStorage::new(pool, actions, cursor); + let buffered = Arc::new(Buffered::new(store)); + let store = buffered.inner(); + let executor = Executor::new(executor_config, node_key, buffered.clone()) + .context("Node executor misconfiguration")?; + + scope::run!(ctx, |ctx, s| async { + s.spawn_bg(async { + store + .run_background_tasks(ctx) + .await + .context("`PostgresBlockStorage` background tasks failed") + }); + s.spawn_bg(async { + buffered + .run_background_tasks(ctx) + .await + .context("`Buffered` storage background tasks failed") + }); + + executor.run(ctx).await.context("Node executor terminated") + }) + .await +} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs new file mode 100644 index 00000000000..d4e95c9e2d4 --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs @@ -0,0 +1,219 @@ +//! Storage implementation based on DAL. + +use anyhow::Context as _; +use async_trait::async_trait; + +use std::ops; + +use zksync_concurrency::{ + ctx, + sync::{self, watch, Mutex}, + time, +}; +use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; +use zksync_consensus_storage::{BlockStore, StorageError, StorageResult}; +use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_types::{Address, MiniblockNumber}; + +#[cfg(test)] +mod tests; + +use super::{buffered::ContiguousBlockStore, conversions::sync_block_to_consensus_block}; +use crate::sync_layer::{ + fetcher::{FetchedBlock, FetcherCursor}, + sync_action::{ActionQueueSender, SyncAction}, +}; + +#[derive(Debug)] +struct CursorWithCachedBlock { + inner: FetcherCursor, + maybe_last_block_in_batch: Option, +} + +impl From for CursorWithCachedBlock { + fn from(inner: FetcherCursor) -> Self { + Self { + inner, + maybe_last_block_in_batch: None, + } + } +} + +impl CursorWithCachedBlock { + fn advance(&mut self, block: FetchedBlock) -> Vec> { + let mut actions = Vec::with_capacity(2); + if let Some(mut prev_block) = self.maybe_last_block_in_batch.take() { + prev_block.last_in_batch = prev_block.l1_batch_number != block.l1_batch_number; + actions.push(self.inner.advance(prev_block)); + } + + // We take advantage of the fact that the last block in a batch is a *fictive* block that + // does not contain transactions. Thus, any block with transactions cannot be last in an L1 batch. + let can_be_last_in_batch = block.transactions.is_empty(); + if can_be_last_in_batch { + self.maybe_last_block_in_batch = Some(block); + // We cannot convert the block into actions yet, since we don't know whether it seals an L1 batch. + } else { + actions.push(self.inner.advance(block)); + } + actions + } +} + +/// Postgres-based [`BlockStore`] implementation. New blocks are scheduled to be written via +/// [`ContiguousBlockStore`] trait, which internally uses an [`ActionQueueSender`] to queue +/// block data (miniblock and L1 batch parameters, transactions) for the state keeper. Block data processing +/// is shared with JSON-RPC-based syncing. +#[derive(Debug)] +pub(super) struct PostgresBlockStorage { + pool: ConnectionPool, + actions: ActionQueueSender, + block_sender: watch::Sender, + cursor: Mutex, +} + +impl PostgresBlockStorage { + /// Creates a new storage handle. `pool` should have multiple connections to work efficiently. + pub fn new(pool: ConnectionPool, actions: ActionQueueSender, cursor: FetcherCursor) -> Self { + let current_block_number = cursor.next_miniblock.0.saturating_sub(1).into(); + Self { + pool, + actions, + block_sender: watch::channel(BlockNumber(current_block_number)).0, + cursor: Mutex::new(cursor.into()), + } + } + + /// Runs background tasks for this store. This method **must** be spawned as a background task + /// which should be running as long at the [`PostgresBlockStorage`] is in use; otherwise, + /// it will function incorrectly. + pub async fn run_background_tasks(&self, ctx: &ctx::Ctx) -> StorageResult<()> { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); + loop { + let sealed_miniblock_number = match self.sealed_miniblock_number(ctx).await { + Ok(number) => number, + Err(err @ StorageError::Database(_)) => return Err(err), + Err(StorageError::Canceled(_)) => return Ok(()), // Do not propagate cancellation errors + }; + self.block_sender.send_if_modified(|number| { + if *number != sealed_miniblock_number { + *number = sealed_miniblock_number; + true + } else { + false + } + }); + if let Err(ctx::Canceled) = ctx.sleep(POLL_INTERVAL).await { + return Ok(()); // Do not propagate cancellation errors + } + } + } + + async fn storage(&self, ctx: &ctx::Ctx) -> StorageResult> { + ctx.wait(self.pool.access_storage_tagged("sync_layer")) + .await? + .context("Failed to connect to Postgres") + .map_err(StorageError::Database) + } + + async fn block( + ctx: &ctx::Ctx, + storage: &mut StorageProcessor<'_>, + number: MiniblockNumber, + ) -> StorageResult> { + let operator_address = Address::default(); // FIXME: where to get this address from? + let Some(block) = ctx + .wait( + storage + .sync_dal() + .sync_block(number, operator_address, true), + ) + .await? + .with_context(|| format!("Failed getting miniblock #{number} from Postgres")) + .map_err(StorageError::Database)? + else { + return Ok(None); + }; + let block = sync_block_to_consensus_block(block).map_err(StorageError::Database)?; + Ok(Some(block)) + } + + async fn sealed_miniblock_number(&self, ctx: &ctx::Ctx) -> StorageResult { + let mut storage = self.storage(ctx).await?; + let number = ctx + .wait(storage.blocks_dal().get_sealed_miniblock_number()) + .await? + .context("Failed getting sealed miniblock number") + .map_err(StorageError::Database)?; + Ok(BlockNumber(number.0.into())) + } +} + +#[async_trait] +impl BlockStore for PostgresBlockStorage { + async fn head_block(&self, ctx: &ctx::Ctx) -> StorageResult { + let mut storage = self.storage(ctx).await?; + let miniblock_number = ctx + .wait(storage.blocks_dal().get_sealed_miniblock_number()) + .await? + .context("Failed getting sealed miniblock number") + .map_err(StorageError::Database)?; + // ^ The number can get stale, but it's OK for our purposes + Ok(Self::block(ctx, &mut storage, miniblock_number) + .await? + .with_context(|| format!("Miniblock #{miniblock_number} disappeared from Postgres")) + .map_err(StorageError::Database)?) + } + + async fn first_block(&self, ctx: &ctx::Ctx) -> StorageResult { + let mut storage = self.storage(ctx).await?; + Self::block(ctx, &mut storage, MiniblockNumber(0)) + .await? + .context("Genesis miniblock not present in Postgres") + .map_err(StorageError::Database) + } + + async fn last_contiguous_block_number(&self, ctx: &ctx::Ctx) -> StorageResult { + self.sealed_miniblock_number(ctx).await + } + + async fn block( + &self, + ctx: &ctx::Ctx, + number: BlockNumber, + ) -> StorageResult> { + let number = u32::try_from(number.0) + .context("block number is too large") + .map_err(StorageError::Database)?; + let mut storage = self.storage(ctx).await?; + Self::block(ctx, &mut storage, MiniblockNumber(number)).await + } + + async fn missing_block_numbers( + &self, + _ctx: &ctx::Ctx, + _range: ops::Range, + ) -> StorageResult> { + Ok(vec![]) // The storage never has missing blocks by construction + } + + fn subscribe_to_block_writes(&self) -> watch::Receiver { + self.block_sender.subscribe() + } +} + +#[async_trait] +impl ContiguousBlockStore for PostgresBlockStorage { + async fn schedule_next_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> StorageResult<()> { + // last_in_batch` is always set to `false` by this call; it is properly set by `CursorWithCachedBlock`. + let fetched_block = + FetchedBlock::from_gossip_block(block, false).map_err(StorageError::Database)?; + let actions = sync::lock(ctx, &self.cursor).await?.advance(fetched_block); + for actions_chunk in actions { + // We don't wrap this in `ctx.wait()` because `PostgresBlockStorage` will get broken + // if it gets reused after context cancellation. + self.actions.push_actions(actions_chunk).await; + } + Ok(()) + } +} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs new file mode 100644 index 00000000000..437c5188330 --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs @@ -0,0 +1,127 @@ +//! Tests for Postgres storage implementation. + +use rand::{thread_rng, Rng}; + +use zksync_concurrency::scope; +use zksync_types::L2ChainId; + +use super::*; +use crate::{ + genesis::{ensure_genesis_state, GenesisParams}, + sync_layer::{ + gossip::tests::{ + add_consensus_fields, assert_first_block_actions, assert_second_block_actions, + load_final_block, + }, + tests::run_state_keeper_with_multiple_miniblocks, + ActionQueue, + }, +}; + +const TEST_TIMEOUT: time::Duration = time::Duration::seconds(10); + +#[tokio::test] +async fn block_store_basics_for_postgres() { + let pool = ConnectionPool::test_pool().await; + run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + + let mut storage = pool.access_storage().await.unwrap(); + add_consensus_fields(&mut storage, &thread_rng().gen(), 3).await; + let cursor = FetcherCursor::new(&mut storage).await.unwrap(); + drop(storage); + let (actions_sender, _) = ActionQueue::new(); + let storage = PostgresBlockStorage::new(pool.clone(), actions_sender, cursor); + + let ctx = &ctx::test_root(&ctx::RealClock); + let genesis_block = BlockStore::first_block(&storage, ctx).await.unwrap(); + assert_eq!(genesis_block.header.number, BlockNumber(0)); + let head_block = BlockStore::head_block(&storage, ctx).await.unwrap(); + assert_eq!(head_block.header.number, BlockNumber(2)); + let last_contiguous_block_number = storage.last_contiguous_block_number(ctx).await.unwrap(); + assert_eq!(last_contiguous_block_number, BlockNumber(2)); + + let block = storage + .block(ctx, BlockNumber(1)) + .await + .unwrap() + .expect("no block #1"); + assert_eq!(block.header.number, BlockNumber(1)); + let missing_block = storage.block(ctx, BlockNumber(3)).await.unwrap(); + assert!(missing_block.is_none(), "{missing_block:?}"); +} + +#[tokio::test] +async fn subscribing_to_block_updates_for_postgres() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + if storage.blocks_dal().is_genesis_needed().await.unwrap() { + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + } + let cursor = FetcherCursor::new(&mut storage).await.unwrap(); + // ^ This is logically incorrect (the storage should not be updated other than using + // `ContiguousBlockStore`), but for testing subscriptions this is fine. + drop(storage); + let (actions_sender, _) = ActionQueue::new(); + let storage = PostgresBlockStorage::new(pool.clone(), actions_sender, cursor); + let mut subscriber = storage.subscribe_to_block_writes(); + + let ctx = &ctx::test_root(&ctx::RealClock); + scope::run!(&ctx.with_timeout(TEST_TIMEOUT), |ctx, s| async { + s.spawn_bg(storage.run_background_tasks(ctx)); + s.spawn(async { + run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + Ok(()) + }); + + loop { + let block = *sync::changed(ctx, &mut subscriber).await?; + if block == BlockNumber(2) { + // We should receive at least the last update. + break; + } + } + Ok(()) + }) + .await + .unwrap(); +} + +#[tokio::test] +async fn processing_new_blocks() { + let pool = ConnectionPool::test_pool().await; + run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + + let mut storage = pool.access_storage().await.unwrap(); + add_consensus_fields(&mut storage, &thread_rng().gen(), 3).await; + let first_block = load_final_block(&mut storage, 1).await; + let second_block = load_final_block(&mut storage, 2).await; + storage + .transactions_dal() + .reset_transactions_state(MiniblockNumber(0)) + .await; + storage + .blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await + .unwrap(); + let cursor = FetcherCursor::new(&mut storage).await.unwrap(); + drop(storage); + + let (actions_sender, mut actions) = ActionQueue::new(); + let storage = PostgresBlockStorage::new(pool.clone(), actions_sender, cursor); + let ctx = &ctx::test_root(&ctx::RealClock); + let ctx = &ctx.with_timeout(TEST_TIMEOUT); + storage + .schedule_next_block(ctx, &first_block) + .await + .unwrap(); + assert_first_block_actions(&mut actions).await; + + storage + .schedule_next_block(ctx, &second_block) + .await + .unwrap(); + assert_second_block_actions(&mut actions).await; +} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs new file mode 100644 index 00000000000..ca3ce29f4d3 --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs @@ -0,0 +1,339 @@ +//! Tests for consensus adapters for EN synchronization logic. + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; + +use zksync_concurrency::{ctx, scope, time}; +use zksync_consensus_executor::testonly::FullValidatorConfig; +use zksync_consensus_roles::validator::{self, FinalBlock}; +use zksync_consensus_storage::{InMemoryStorage, WriteBlockStore}; +use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_types::{block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber}; + +use super::*; +use crate::{ + consensus, + sync_layer::{ + sync_action::SyncAction, + tests::{ + mock_l1_batch_hash_computation, run_state_keeper_with_multiple_l1_batches, + run_state_keeper_with_multiple_miniblocks, StateKeeperHandles, + }, + ActionQueue, + }, +}; + +const CLOCK_SPEEDUP: i64 = 20; +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50 * CLOCK_SPEEDUP); + +/// Loads a block from the storage and converts it to a `FinalBlock`. +pub(super) async fn load_final_block( + storage: &mut StorageProcessor<'_>, + number: u32, +) -> FinalBlock { + let sync_block = storage + .sync_dal() + .sync_block(MiniblockNumber(number), Address::repeat_byte(1), true) + .await + .unwrap() + .unwrap_or_else(|| panic!("no sync block #{number}")); + conversions::sync_block_to_consensus_block(sync_block).unwrap() +} + +pub async fn block_payload(storage: &mut StorageProcessor<'_>, number: u32) -> validator::Payload { + let sync_block = storage + .sync_dal() + .sync_block(MiniblockNumber(number), Address::repeat_byte(1), true) + .await + .unwrap() + .unwrap_or_else(|| panic!("no sync block #{number}")); + consensus::Payload::try_from(sync_block).unwrap().encode() +} + +/// Adds consensus information for the specified `count` of miniblocks, starting from the genesis. +pub(super) async fn add_consensus_fields( + storage: &mut StorageProcessor<'_>, + validator_key: &validator::SecretKey, + count: u32, +) { + let mut prev_block_hash = validator::BlockHeaderHash::from_bytes([0; 32]); + let validator_set = validator::ValidatorSet::new([validator_key.public()]).unwrap(); + for number in 0..count { + let payload = block_payload(storage, number).await; + let block_header = validator::BlockHeader { + parent: prev_block_hash, + number: validator::BlockNumber(number.into()), + payload: payload.hash(), + }; + let replica_commit = validator::ReplicaCommit { + protocol_version: validator::CURRENT_VERSION, + view: validator::ViewNumber(number.into()), + proposal: block_header, + }; + let replica_commit = validator_key.sign_msg(replica_commit); + let justification = validator::CommitQC::from(&[replica_commit], &validator_set) + .expect("Failed creating QC"); + + let consensus = ConsensusBlockFields { + parent: prev_block_hash, + justification, + }; + storage + .blocks_dal() + .set_miniblock_consensus_fields(MiniblockNumber(number), &consensus) + .await + .unwrap(); + prev_block_hash = block_header.hash(); + } +} + +pub(super) async fn assert_first_block_actions(actions: &mut ActionQueue) -> Vec { + let mut received_actions = vec![]; + while !matches!(received_actions.last(), Some(SyncAction::SealMiniblock(_))) { + received_actions.push(actions.recv_action().await); + } + assert_matches!( + received_actions.as_slice(), + [ + SyncAction::OpenBatch { + number: L1BatchNumber(1), + timestamp: 1, + first_miniblock_info: (MiniblockNumber(1), 1), + .. + }, + SyncAction::Tx(_), + SyncAction::Tx(_), + SyncAction::Tx(_), + SyncAction::Tx(_), + SyncAction::Tx(_), + SyncAction::SealMiniblock(_), + ] + ); + received_actions +} + +pub(super) async fn assert_second_block_actions(actions: &mut ActionQueue) -> Vec { + let mut received_actions = vec![]; + while !matches!(received_actions.last(), Some(SyncAction::SealMiniblock(_))) { + received_actions.push(actions.recv_action().await); + } + assert_matches!( + received_actions.as_slice(), + [ + SyncAction::Miniblock { + number: MiniblockNumber(2), + timestamp: 2, + virtual_blocks: 1, + }, + SyncAction::Tx(_), + SyncAction::Tx(_), + SyncAction::Tx(_), + SyncAction::SealMiniblock(_), + ] + ); + received_actions +} + +#[test_casing(4, Product(([false, true], [false, true])))] +#[tokio::test] +async fn syncing_via_gossip_fetcher(delay_first_block: bool, delay_second_block: bool) { + zksync_concurrency::testonly::abort_on_panic(); + let pool = ConnectionPool::test_pool().await; + let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + + let mut storage = pool.access_storage().await.unwrap(); + let genesis_block_payload = block_payload(&mut storage, 0).await; + let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); + let rng = &mut ctx.rng(); + let mut validator = FullValidatorConfig::for_single_validator(rng, genesis_block_payload); + let validator_set = validator.node_config.validators.clone(); + let external_node = validator.connect_full_node(rng); + + let (genesis_block, blocks) = + get_blocks_and_reset_storage(storage, &validator.validator_key).await; + let [first_block, second_block] = blocks.as_slice() else { + unreachable!("Unexpected blocks in storage: {blocks:?}"); + }; + tracing::trace!("Node storage reset"); + + let validator_storage = Arc::new(InMemoryStorage::new(genesis_block)); + if !delay_first_block { + validator_storage.put_block(ctx, first_block).await.unwrap(); + if !delay_second_block { + validator_storage + .put_block(ctx, second_block) + .await + .unwrap(); + } + } + let validator = Executor::new( + validator.node_config, + validator.node_key, + validator_storage.clone(), + ) + .unwrap(); + // ^ We intentionally do not run consensus on the validator node, since it'll produce blocks + // with payloads that cannot be parsed by the external node. + + let (actions_sender, mut actions) = ActionQueue::new(); + let (keeper_actions_sender, keeper_actions) = ActionQueue::new(); + let state_keeper = StateKeeperHandles::new(pool.clone(), keeper_actions, &[&tx_hashes]).await; + scope::run!(ctx, |ctx, s| async { + s.spawn_bg(validator.run(ctx)); + s.spawn_bg(run_gossip_fetcher_inner( + ctx, + pool.clone(), + actions_sender, + external_node.node_config, + external_node.node_key, + )); + + if delay_first_block { + ctx.sleep(POLL_INTERVAL).await?; + validator_storage.put_block(ctx, first_block).await.unwrap(); + if !delay_second_block { + validator_storage + .put_block(ctx, second_block) + .await + .unwrap(); + } + } + + let received_actions = assert_first_block_actions(&mut actions).await; + // Manually replicate actions to the state keeper. + keeper_actions_sender.push_actions(received_actions).await; + + if delay_second_block { + validator_storage + .put_block(ctx, second_block) + .await + .unwrap(); + } + + let received_actions = assert_second_block_actions(&mut actions).await; + keeper_actions_sender.push_actions(received_actions).await; + state_keeper + .wait(|state| state.get_local_block() == MiniblockNumber(2)) + .await; + Ok(()) + }) + .await + .unwrap(); + + // Check that received blocks have consensus fields persisted. + let mut storage = pool.access_storage().await.unwrap(); + for number in [1, 2] { + let block = load_final_block(&mut storage, number).await; + block.justification.verify(&validator_set, 1).unwrap(); + } +} + +async fn get_blocks_and_reset_storage( + mut storage: StorageProcessor<'_>, + validator_key: &validator::SecretKey, +) -> (FinalBlock, Vec) { + let sealed_miniblock_number = storage + .blocks_dal() + .get_sealed_miniblock_number() + .await + .unwrap(); + add_consensus_fields(&mut storage, validator_key, sealed_miniblock_number.0 + 1).await; + let genesis_block = load_final_block(&mut storage, 0).await; + + let mut blocks = Vec::with_capacity(sealed_miniblock_number.0 as usize); + for number in 1..=sealed_miniblock_number.0 { + blocks.push(load_final_block(&mut storage, number).await); + } + + storage + .transactions_dal() + .reset_transactions_state(MiniblockNumber(0)) + .await; + storage + .blocks_dal() + .delete_miniblocks(MiniblockNumber(0)) + .await + .unwrap(); + storage + .blocks_dal() + .delete_l1_batches(L1BatchNumber(0)) + .await + .unwrap(); + (genesis_block, blocks) +} + +#[test_casing(4, [3, 2, 1, 0])] +#[tokio::test] +async fn syncing_via_gossip_fetcher_with_multiple_l1_batches(initial_block_count: usize) { + assert!(initial_block_count <= 3); + zksync_concurrency::testonly::abort_on_panic(); + + let pool = ConnectionPool::test_pool().await; + let tx_hashes = run_state_keeper_with_multiple_l1_batches(pool.clone()).await; + let tx_hashes: Vec<_> = tx_hashes.iter().map(Vec::as_slice).collect(); + + let mut storage = pool.access_storage().await.unwrap(); + let genesis_block_payload = block_payload(&mut storage, 0).await; + let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); + let rng = &mut ctx.rng(); + let mut validator = FullValidatorConfig::for_single_validator(rng, genesis_block_payload); + let validator_set = validator.node_config.validators.clone(); + let external_node = validator.connect_full_node(rng); + + let (genesis_block, blocks) = + get_blocks_and_reset_storage(storage, &validator.validator_key).await; + assert_eq!(blocks.len(), 3); // 2 real + 1 fictive blocks + tracing::trace!("Node storage reset"); + let (initial_blocks, delayed_blocks) = blocks.split_at(initial_block_count); + + let validator_storage = Arc::new(InMemoryStorage::new(genesis_block)); + for block in initial_blocks { + validator_storage.put_block(ctx, block).await.unwrap(); + } + let validator = Executor::new( + validator.node_config, + validator.node_key, + validator_storage.clone(), + ) + .unwrap(); + + let (actions_sender, actions) = ActionQueue::new(); + let state_keeper = StateKeeperHandles::new(pool.clone(), actions, &tx_hashes).await; + scope::run!(ctx, |ctx, s| async { + s.spawn_bg(validator.run(ctx)); + s.spawn_bg(async { + for block in delayed_blocks { + ctx.sleep(POLL_INTERVAL).await?; + validator_storage.put_block(ctx, block).await?; + } + Ok(()) + }); + + let cloned_pool = pool.clone(); + s.spawn_bg(async { + mock_l1_batch_hash_computation(cloned_pool, 1).await; + Ok(()) + }); + s.spawn_bg(run_gossip_fetcher_inner( + ctx, + pool.clone(), + actions_sender, + external_node.node_config, + external_node.node_key, + )); + + state_keeper + .wait(|state| state.get_local_block() == MiniblockNumber(3)) + .await; + Ok(()) + }) + .await + .unwrap(); + + // Check that received blocks have consensus fields persisted. + let mut storage = pool.access_storage().await.unwrap(); + for number in [1, 2, 3] { + let block = load_final_block(&mut storage, number).await; + block.justification.verify(&validator_set, 1).unwrap(); + } +} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/utils.rs b/core/lib/zksync_core/src/sync_layer/gossip/utils.rs new file mode 100644 index 00000000000..8407821a2ec --- /dev/null +++ b/core/lib/zksync_core/src/sync_layer/gossip/utils.rs @@ -0,0 +1,48 @@ +use std::{iter, ops}; + +use zksync_consensus_roles::validator::BlockNumber; + +/// Iterator over missing block numbers. +pub(crate) struct MissingBlockNumbers { + range: ops::Range, + existing_numbers: iter::Peekable, +} + +impl MissingBlockNumbers +where + I: Iterator, +{ + /// Creates a new iterator based on the provided params. + pub(crate) fn new(range: ops::Range, existing_numbers: I) -> Self { + Self { + range, + existing_numbers: existing_numbers.peekable(), + } + } +} + +impl Iterator for MissingBlockNumbers +where + I: Iterator, +{ + type Item = BlockNumber; + + fn next(&mut self) -> Option { + // Loop while existing numbers match the starting numbers from the range. The check + // that the range is non-empty is redundant given how `existing_numbers` are constructed + // (they are guaranteed to be lesser than the upper range bound); we add it just to be safe. + while !self.range.is_empty() + && matches!(self.existing_numbers.peek(), Some(&num) if num == self.range.start) + { + self.range.start = self.range.start.next(); + self.existing_numbers.next(); // Advance to the next number + } + + if self.range.is_empty() { + return None; + } + let next_number = self.range.start; + self.range.start = self.range.start.next(); + Some(next_number) + } +} diff --git a/core/lib/zksync_core/src/sync_layer/mod.rs b/core/lib/zksync_core/src/sync_layer/mod.rs index e216ef4f8c5..df059947e3e 100644 --- a/core/lib/zksync_core/src/sync_layer/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/mod.rs @@ -3,6 +3,7 @@ mod client; pub mod external_io; pub mod fetcher; pub mod genesis; +mod gossip; mod metrics; pub(crate) mod sync_action; mod sync_state; @@ -10,6 +11,6 @@ mod sync_state; mod tests; pub use self::{ - client::MainNodeClient, external_io::ExternalIO, sync_action::ActionQueue, - sync_state::SyncState, + client::MainNodeClient, external_io::ExternalIO, gossip::run_gossip_fetcher, + sync_action::ActionQueue, sync_state::SyncState, }; diff --git a/core/lib/zksync_core/src/sync_layer/sync_action.rs b/core/lib/zksync_core/src/sync_layer/sync_action.rs index 977d03dd532..b4f56999d4f 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_action.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_action.rs @@ -1,6 +1,9 @@ use tokio::sync::mpsc; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, H256}; +use zksync_types::{ + block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, + Transaction, H256, +}; use super::metrics::QUEUE_METRICS; @@ -52,7 +55,7 @@ impl ActionQueueSender { return Err(format!("Unexpected Tx: {:?}", actions)); } } - SyncAction::SealMiniblock | SyncAction::SealBatch { .. } => { + SyncAction::SealMiniblock(_) | SyncAction::SealBatch { .. } => { if !opened || miniblock_sealed { return Err(format!("Unexpected SealMiniblock/SealBatch: {:?}", actions)); } @@ -89,7 +92,7 @@ impl ActionQueue { } /// Removes the first action from the queue. - pub(crate) fn pop_action(&mut self) -> Option { + pub(super) fn pop_action(&mut self) -> Option { if let Some(peeked) = self.peeked.take() { QUEUE_METRICS.action_queue_size.dec_by(1); return Some(peeked); @@ -101,8 +104,19 @@ impl ActionQueue { action } + #[cfg(test)] + pub(super) async fn recv_action(&mut self) -> SyncAction { + if let Some(peeked) = self.peeked.take() { + return peeked; + } + self.receiver + .recv() + .await + .expect("actions sender was dropped prematurely") + } + /// Returns the first action from the queue without removing it. - pub(crate) fn peek_action(&mut self) -> Option { + pub(super) fn peek_action(&mut self) -> Option { if let Some(action) = &self.peeked { return Some(action.clone()); } @@ -135,11 +149,13 @@ pub(crate) enum SyncAction { /// that they are sealed, but at the same time the next miniblock may not exist yet. /// By having a dedicated action for that we prevent a situation where the miniblock is kept open on the EN until /// the next one is sealed on the main node. - SealMiniblock, + SealMiniblock(Option), /// Similarly to `SealMiniblock` we must be able to seal the batch even if there is no next miniblock yet. SealBatch { - // Virtual blocks count for the fictive miniblock. + /// Virtual blocks count for the fictive miniblock. virtual_blocks: u32, + /// Consensus-related fields for the fictive miniblock. + consensus: Option, }, } @@ -193,11 +209,14 @@ mod tests { } fn seal_miniblock() -> SyncAction { - SyncAction::SealMiniblock + SyncAction::SealMiniblock(None) } fn seal_batch() -> SyncAction { - SyncAction::SealBatch { virtual_blocks: 1 } + SyncAction::SealBatch { + virtual_blocks: 1, + consensus: None, + } } #[test] diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 576cb56dd7d..4a337bbf5dc 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -16,11 +16,7 @@ use zksync_types::{ api, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; -use super::{ - fetcher::MainNodeFetcherCursor, - sync_action::{ActionQueueSender, SyncAction}, - *, -}; +use super::{fetcher::FetcherCursor, sync_action::SyncAction, *}; use crate::{ api_server::web3::tests::spawn_http_server, genesis::{ensure_genesis_state, GenesisParams}, @@ -146,15 +142,52 @@ fn open_l1_batch(number: u32, timestamp: u64, first_miniblock_number: u32) -> Sy } #[derive(Debug)] -struct StateKeeperHandles { - actions_sender: ActionQueueSender, - stop_sender: watch::Sender, - sync_state: SyncState, - task: JoinHandle>, +pub(super) struct StateKeeperHandles { + pub stop_sender: watch::Sender, + pub sync_state: SyncState, + pub task: JoinHandle>, } impl StateKeeperHandles { - async fn wait(self, mut condition: impl FnMut(&SyncState) -> bool) { + /// `tx_hashes` are grouped by the L1 batch. + pub async fn new(pool: ConnectionPool, actions: ActionQueue, tx_hashes: &[&[H256]]) -> Self { + assert!(!tx_hashes.is_empty()); + assert!(tx_hashes.iter().all(|tx_hashes| !tx_hashes.is_empty())); + + ensure_genesis(&mut pool.access_storage().await.unwrap()).await; + + let sync_state = SyncState::new(); + let io = ExternalIO::new( + pool, + actions, + sync_state.clone(), + Box::::default(), + Address::repeat_byte(1), + u32::MAX, + L2ChainId::default(), + ) + .await; + + let (stop_sender, stop_receiver) = watch::channel(false); + let mut batch_executor_base = TestBatchExecutorBuilder::default(); + for &tx_hashes_in_l1_batch in tx_hashes { + batch_executor_base.push_successful_transactions(tx_hashes_in_l1_batch); + } + + let state_keeper = ZkSyncStateKeeper::without_sealer( + stop_receiver, + Box::new(io), + Box::new(batch_executor_base), + ); + Self { + stop_sender, + sync_state, + task: tokio::spawn(state_keeper.run()), + } + } + + /// Waits for the given condition. + pub async fn wait(self, mut condition: impl FnMut(&SyncState) -> bool) { let started_at = Instant::now(); loop { assert!( @@ -187,45 +220,6 @@ async fn ensure_genesis(storage: &mut StorageProcessor<'_>) { } } -/// `tx_hashes` are grouped by the L1 batch. -async fn run_state_keeper(pool: ConnectionPool, tx_hashes: &[&[H256]]) -> StateKeeperHandles { - assert!(!tx_hashes.is_empty()); - assert!(tx_hashes.iter().all(|tx_hashes| !tx_hashes.is_empty())); - - ensure_genesis(&mut pool.access_storage().await.unwrap()).await; - - let (actions_sender, actions) = ActionQueue::new(); - let sync_state = SyncState::new(); - let io = ExternalIO::new( - pool, - actions, - sync_state.clone(), - Box::::default(), - Address::repeat_byte(1), - u32::MAX, - L2ChainId::default(), - ) - .await; - - let (stop_sender, stop_receiver) = watch::channel(false); - let mut batch_executor_base = TestBatchExecutorBuilder::default(); - for &tx_hashes_in_l1_batch in tx_hashes { - batch_executor_base.push_successful_transactions(tx_hashes_in_l1_batch); - } - - let state_keeper = ZkSyncStateKeeper::without_sealer( - stop_receiver, - Box::new(io), - Box::new(batch_executor_base), - ); - StateKeeperHandles { - actions_sender, - stop_sender, - sync_state, - task: tokio::spawn(state_keeper.run()), - } -} - fn extract_tx_hashes<'a>(actions: impl IntoIterator) -> Vec { actions .into_iter() @@ -246,10 +240,12 @@ async fn external_io_basics() { let tx = create_l2_transaction(10, 100); let tx_hash = tx.hash(); let tx = SyncAction::Tx(Box::new(tx.into())); - let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock]; + let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock(None)]; - let state_keeper = run_state_keeper(pool.clone(), &[&extract_tx_hashes(&actions)]).await; - state_keeper.actions_sender.push_actions(actions).await; + let (actions_sender, action_queue) = ActionQueue::new(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), action_queue, &[&extract_tx_hashes(&actions)]).await; + actions_sender.push_actions(actions).await; // Wait until the miniblock is sealed. state_keeper .wait(|state| state.get_local_block() == MiniblockNumber(1)) @@ -279,7 +275,7 @@ async fn external_io_basics() { assert_eq!(tx_receipt.transaction_index, 0.into()); } -async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPool) -> Vec { +pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPool) -> Vec { let open_l1_batch = open_l1_batch(1, 1, 1); let txs = (0..5).map(|_| { let tx = create_l2_transaction(10, 100); @@ -287,7 +283,7 @@ async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPool) -> Vec< }); let first_miniblock_actions: Vec<_> = iter::once(open_l1_batch) .chain(txs) - .chain([SyncAction::SealMiniblock]) + .chain([SyncAction::SealMiniblock(None)]) .collect(); let open_miniblock = SyncAction::Miniblock { @@ -301,7 +297,7 @@ async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPool) -> Vec< }); let second_miniblock_actions: Vec<_> = iter::once(open_miniblock) .chain(more_txs) - .chain([SyncAction::SealMiniblock]) + .chain([SyncAction::SealMiniblock(None)]) .collect(); let tx_hashes = extract_tx_hashes( @@ -309,15 +305,10 @@ async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPool) -> Vec< .iter() .chain(&second_miniblock_actions), ); - let state_keeper = run_state_keeper(pool, &[&tx_hashes]).await; - state_keeper - .actions_sender - .push_actions(first_miniblock_actions) - .await; - state_keeper - .actions_sender - .push_actions(second_miniblock_actions) - .await; + let (actions_sender, action_queue) = ActionQueue::new(); + let state_keeper = StateKeeperHandles::new(pool, action_queue, &[&tx_hashes]).await; + actions_sender.push_actions(first_miniblock_actions).await; + actions_sender.push_actions(second_miniblock_actions).await; // Wait until both miniblocks are sealed. state_keeper .wait(|state| state.get_local_block() == MiniblockNumber(2)) @@ -366,7 +357,8 @@ async fn test_external_io_recovery(pool: ConnectionPool, mut tx_hashes: Vec Vec> { let l1_batch = open_l1_batch(1, 1, 1); let first_tx = create_l2_transaction(10, 100); let first_tx_hash = first_tx.hash(); let first_tx = SyncAction::Tx(Box::new(first_tx.into())); - let first_l1_batch_actions = vec![l1_batch, first_tx, SyncAction::SealMiniblock]; + let first_l1_batch_actions = vec![l1_batch, first_tx, SyncAction::SealMiniblock(None)]; let fictive_miniblock = SyncAction::Miniblock { number: MiniblockNumber(2), timestamp: 2, virtual_blocks: 0, }; - let seal_l1_batch = SyncAction::SealBatch { virtual_blocks: 0 }; + let seal_l1_batch = SyncAction::SealBatch { + virtual_blocks: 0, + consensus: None, + }; let fictive_miniblock_actions = vec![fictive_miniblock, seal_l1_batch]; let l1_batch = open_l1_batch(2, 3, 3); let second_tx = create_l2_transaction(10, 100); let second_tx_hash = second_tx.hash(); let second_tx = SyncAction::Tx(Box::new(second_tx.into())); - let second_l1_batch_actions = vec![l1_batch, second_tx, SyncAction::SealMiniblock]; + let second_l1_batch_actions = vec![l1_batch, second_tx, SyncAction::SealMiniblock(None)]; - let state_keeper = run_state_keeper(pool.clone(), &[&[first_tx_hash], &[second_tx_hash]]).await; - state_keeper - .actions_sender - .push_actions(first_l1_batch_actions) - .await; - state_keeper - .actions_sender - .push_actions(fictive_miniblock_actions) - .await; - state_keeper - .actions_sender - .push_actions(second_l1_batch_actions) - .await; + let (actions_sender, action_queue) = ActionQueue::new(); + let state_keeper = StateKeeperHandles::new( + pool.clone(), + action_queue, + &[&[first_tx_hash], &[second_tx_hash]], + ) + .await; + actions_sender.push_actions(first_l1_batch_actions).await; + actions_sender.push_actions(fictive_miniblock_actions).await; + actions_sender.push_actions(second_l1_batch_actions).await; let hash_task = tokio::spawn(mock_l1_batch_hash_computation(pool.clone(), 1)); // Wait until the miniblocks are sealed. @@ -463,6 +456,14 @@ async fn external_io_with_multiple_l1_batches() { .await; hash_task.await.unwrap(); + vec![vec![first_tx_hash], vec![second_tx_hash]] +} + +#[tokio::test] +async fn external_io_with_multiple_l1_batches() { + let pool = ConnectionPool::test_pool().await; + run_state_keeper_with_multiple_l1_batches(pool.clone()).await; + let mut storage = pool.access_storage().await.unwrap(); let l1_batch_header = storage .blocks_dal() @@ -497,9 +498,9 @@ async fn fetcher_basics() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis(&mut storage).await; - let fetcher_cursor = MainNodeFetcherCursor::new(&mut storage).await.unwrap(); + let fetcher_cursor = FetcherCursor::new(&mut storage).await.unwrap(); assert_eq!(fetcher_cursor.l1_batch, L1BatchNumber(0)); - assert_eq!(fetcher_cursor.miniblock, MiniblockNumber(1)); + assert_eq!(fetcher_cursor.next_miniblock, MiniblockNumber(1)); drop(storage); let mut mock_client = MockMainNodeClient::default(); @@ -529,15 +530,11 @@ async fn fetcher_basics() { let mut current_miniblock_number = MiniblockNumber(0); let mut tx_count_in_miniblock = 0; let started_at = Instant::now(); + let deadline = started_at + TEST_TIMEOUT; loop { - assert!( - started_at.elapsed() <= TEST_TIMEOUT, - "Timed out waiting for fetcher" - ); - let Some(action) = actions.pop_action() else { - tokio::time::sleep(POLL_INTERVAL).await; - continue; - }; + let action = tokio::time::timeout_at(deadline.into(), actions.recv_action()) + .await + .unwrap(); match action { SyncAction::OpenBatch { number, .. } => { current_l1_batch_number += 1; @@ -550,7 +547,7 @@ async fn fetcher_basics() { tx_count_in_miniblock = 0; assert_eq!(number, current_miniblock_number); } - SyncAction::SealBatch { virtual_blocks } => { + SyncAction::SealBatch { virtual_blocks, .. } => { assert_eq!(virtual_blocks, 0); assert_eq!(tx_count_in_miniblock, 0); if current_miniblock_number == MiniblockNumber(5) { @@ -561,7 +558,7 @@ async fn fetcher_basics() { assert_eq!(tx.hash(), tx_hashes.pop_front().unwrap()); tx_count_in_miniblock += 1; } - SyncAction::SealMiniblock => { + SyncAction::SealMiniblock(_) => { assert_eq!(tx_count_in_miniblock, 1); } } @@ -577,6 +574,15 @@ async fn fetcher_with_real_server() { // Fill in transactions grouped in multiple miniblocks in the storage. let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; let mut tx_hashes = VecDeque::from(tx_hashes); + let mut connection = pool.access_storage().await.unwrap(); + let genesis_miniblock_hash = connection + .blocks_dal() + .get_miniblock_header(MiniblockNumber(0)) + .await + .unwrap() + .expect("No genesis miniblock") + .hash; + drop(connection); // Start the API server. let network_config = NetworkConfig::for_tests(); @@ -590,8 +596,9 @@ async fn fetcher_with_real_server() { let sync_state = SyncState::default(); let (actions_sender, mut actions) = ActionQueue::new(); let client = ::json_rpc(&format!("http://{server_addr}/")).unwrap(); - let fetcher_cursor = MainNodeFetcherCursor { - miniblock: MiniblockNumber(1), + let fetcher_cursor = FetcherCursor { + next_miniblock: MiniblockNumber(1), + prev_miniblock_hash: genesis_miniblock_hash, l1_batch: L1BatchNumber(0), }; let fetcher = fetcher_cursor.into_fetcher( @@ -607,15 +614,11 @@ async fn fetcher_with_real_server() { let mut tx_count_in_miniblock = 0; let miniblock_number_to_tx_count = HashMap::from([(1, 5), (2, 3)]); let started_at = Instant::now(); + let deadline = started_at + TEST_TIMEOUT; loop { - assert!( - started_at.elapsed() <= TEST_TIMEOUT, - "Timed out waiting for fetcher actions" - ); - let Some(action) = actions.pop_action() else { - tokio::time::sleep(POLL_INTERVAL).await; - continue; - }; + let action = tokio::time::timeout_at(deadline.into(), actions.recv_action()) + .await + .unwrap(); match action { SyncAction::OpenBatch { number, @@ -637,7 +640,7 @@ async fn fetcher_with_real_server() { assert_eq!(tx.hash(), tx_hashes.pop_front().unwrap()); tx_count_in_miniblock += 1; } - SyncAction::SealMiniblock => { + SyncAction::SealMiniblock(_) => { assert_eq!( tx_count_in_miniblock, miniblock_number_to_tx_count[¤t_miniblock_number] diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d100677d746..d27b787084f 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -324,6 +324,12 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" + [[package]] name = "bellman_ce" version = "0.3.2" @@ -617,6 +623,18 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" +[[package]] +name = "blst" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + [[package]] name = "boojum" version = "0.1.0" @@ -1350,6 +1368,34 @@ dependencies = [ "serde_json", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "platforms", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.39", +] + [[package]] name = "darling" version = "0.13.4" @@ -1429,6 +1475,16 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +dependencies = [ + "const-oid 0.9.5", + "zeroize", +] + [[package]] name = "deranged" version = "0.3.9" @@ -1524,7 +1580,32 @@ dependencies = [ "der 0.6.1", "elliptic-curve", "rfc6979", - "signature", + "signature 1.6.4", +] + +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8 0.10.2", + "signature 2.2.0", +] + +[[package]] +name = "ed25519-dalek" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f628eaec48bfd21b865dc2950cfa014450c01d2fa2b69a86c2fd5844ec523c0" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2 0.10.8", + "subtle", + "zeroize", ] [[package]] @@ -1764,6 +1845,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "fiat-crypto" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" + [[package]] name = "findshlibs" version = "0.10.2" @@ -1812,6 +1899,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "fnv" version = "1.0.7" @@ -2840,6 +2933,38 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +[[package]] +name = "logos" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c000ca4d908ff18ac99b93a062cb8958d331c3220719c52e77cb19cc6ac5d2c1" +dependencies = [ + "logos-derive", +] + +[[package]] +name = "logos-codegen" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68" +dependencies = [ + "beef", + "fnv", + "proc-macro2 1.0.69", + "quote 1.0.33", + "regex-syntax 0.6.29", + "syn 2.0.39", +] + +[[package]] +name = "logos-derive" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbfc0d229f1f42d790440136d941afd806bc9e949e2bcb8faa813b0f00d1267e" +dependencies = [ + "logos-codegen", +] + [[package]] name = "mach2" version = "0.4.1" @@ -2959,6 +3084,29 @@ dependencies = [ "sketches-ddsketch", ] +[[package]] +name = "miette" +version = "5.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59bb584eaeeab6bd0226ccf3509a69d7936d148cf3d036ad350abe35e8c6856e" +dependencies = [ + "miette-derive", + "once_cell", + "thiserror", + "unicode-width", +] + +[[package]] +name = "miette-derive" +version = "5.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.39", +] + [[package]] name = "mime" version = "0.3.17" @@ -3016,6 +3164,12 @@ dependencies = [ "windows-sys", ] +[[package]] +name = "multimap" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + [[package]] name = "multivm" version = "0.1.0" @@ -3395,6 +3549,15 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + [[package]] name = "os_info" version = "3.7.0" @@ -3435,6 +3598,18 @@ dependencies = [ "serde", ] +[[package]] +name = "pairing_ce" +version = "0.28.5" +source = "git+https://github.com/matter-labs/pairing.git?rev=f55393f#f55393fd366596eac792d78525d26e9c4d6ed1ca" +dependencies = [ + "byteorder", + "cfg-if 1.0.0", + "ff_ce", + "rand 0.4.6", + "serde", +] + [[package]] name = "pairing_ce" version = "0.28.5" @@ -3684,6 +3859,16 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "petgraph" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +dependencies = [ + "fixedbitset", + "indexmap 2.1.0", +] + [[package]] name = "pin-project" version = "1.1.3" @@ -3748,12 +3933,28 @@ dependencies = [ "spki 0.6.0", ] +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der 0.7.8", + "spki 0.7.2", +] + [[package]] name = "pkg-config" version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +[[package]] +name = "platforms" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" + [[package]] name = "plotters" version = "0.3.5" @@ -3949,6 +4150,103 @@ dependencies = [ "unarray", ] +[[package]] +name = "prost" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5a410fc7882af66deb8d01d01737353cf3ad6204c408177ba494291a626312" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa3d084c8704911bfefb2771be2f9b6c5c0da7343a71e0021ee3c665cada738" +dependencies = [ + "bytes", + "heck 0.4.1", + "itertools 0.11.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.39", + "tempfile", + "which", +] + +[[package]] +name = "prost-derive" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "065717a5dfaca4a83d2fe57db3487b311365200000551d7a364e715dbf4346bc" +dependencies = [ + "anyhow", + "itertools 0.11.0", + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.39", +] + +[[package]] +name = "prost-reflect" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" +dependencies = [ + "base64 0.21.5", + "logos", + "miette", + "once_cell", + "prost", + "prost-types", + "serde", + "serde-value", +] + +[[package]] +name = "prost-types" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8339f32236f590281e2f6368276441394fcd1b2133b549cc895d0ae80f2f9a52" +dependencies = [ + "prost", +] + +[[package]] +name = "protox" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00bb76c5f6221de491fe2c8f39b106330bbd9762c6511119c07940e10eb9ff11" +dependencies = [ + "bytes", + "miette", + "prost", + "prost-reflect", + "prost-types", + "protox-parse", + "thiserror", +] + +[[package]] +name = "protox-parse" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4581f441c58863525a3e6bec7b8de98188cf75239a56c725a3e7288450a33f" +dependencies = [ + "logos", + "miette", + "prost-types", + "thiserror", +] + [[package]] name = "prover-service" version = "0.1.0" @@ -4004,6 +4302,15 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + [[package]] name = "quote" version = "0.6.13" @@ -4881,6 +5188,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + [[package]] name = "serde_derive" version = "1.0.192" @@ -5096,6 +5413,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "simple_asn1" version = "0.6.2" @@ -5209,6 +5535,16 @@ dependencies = [ "der 0.6.1", ] +[[package]] +name = "spki" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +dependencies = [ + "base64ct", + "der 0.7.8", +] + [[package]] name = "splitmut" version = "0.2.1" @@ -5571,6 +5907,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "time" version = "0.3.30" @@ -6448,6 +6793,20 @@ name = "zeroize" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" +dependencies = [ + "proc-macro2 1.0.69", + "quote 1.0.33", + "syn 2.0.39", +] [[package]] name = "zk_evm" @@ -6697,6 +7056,24 @@ dependencies = [ "zksync_verification_key_generator_and_server", ] +[[package]] +name = "zksync_concurrency" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +dependencies = [ + "anyhow", + "once_cell", + "pin-project", + "rand 0.8.5", + "sha3 0.10.8", + "thiserror", + "time", + "tokio", + "tracing", + "tracing-subscriber", + "vise", +] + [[package]] name = "zksync_config" version = "0.1.0" @@ -6706,6 +7083,52 @@ dependencies = [ "zksync_basic_types", ] +[[package]] +name = "zksync_consensus_crypto" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +dependencies = [ + "anyhow", + "blst", + "ed25519-dalek", + "ff_ce", + "hex", + "pairing_ce 0.28.5 (git+https://github.com/matter-labs/pairing.git?rev=f55393f)", + "rand 0.4.6", + "rand 0.8.5", + "sha3 0.10.8", + "thiserror", + "tracing", +] + +[[package]] +name = "zksync_consensus_roles" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +dependencies = [ + "anyhow", + "bit-vec", + "hex", + "prost", + "rand 0.8.5", + "serde", + "tracing", + "zksync_concurrency", + "zksync_consensus_crypto", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", +] + +[[package]] +name = "zksync_consensus_utils" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +dependencies = [ + "thiserror", + "zksync_concurrency", +] + [[package]] name = "zksync_contracts" version = "0.1.0" @@ -6875,6 +7298,40 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_protobuf" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +dependencies = [ + "anyhow", + "bit-vec", + "once_cell", + "prost", + "prost-reflect", + "quick-protobuf", + "rand 0.8.5", + "serde", + "serde_json", + "zksync_concurrency", + "zksync_protobuf_build", +] + +[[package]] +name = "zksync_protobuf_build" +version = "0.1.0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +dependencies = [ + "anyhow", + "heck 0.4.1", + "prettyplease", + "proc-macro2 1.0.69", + "prost-build", + "prost-reflect", + "protox", + "quote 1.0.33", + "syn 2.0.39", +] + [[package]] name = "zksync_prover" version = "0.1.0" @@ -7071,6 +7528,7 @@ dependencies = [ name = "zksync_types" version = "0.1.0" dependencies = [ + "anyhow", "blake2 0.10.6 (registry+https://github.com/rust-lang/crates.io-index)", "chrono", "codegen 0.1.0", @@ -7080,6 +7538,7 @@ dependencies = [ "num_enum", "once_cell", "parity-crypto", + "prost", "rlp", "serde", "serde_json", @@ -7090,8 +7549,11 @@ dependencies = [ "zk_evm 1.4.0", "zkevm_test_harness 1.3.3", "zksync_basic_types", + "zksync_consensus_roles", "zksync_contracts", "zksync_mini_merkle_tree", + "zksync_protobuf", + "zksync_protobuf_build", "zksync_system_constants", "zksync_utils", ] From 1e9e55651a95b509b5dfd644b8f9f3c718e41804 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Thu, 23 Nov 2023 15:51:19 +0100 Subject: [PATCH 037/115] fix(core): differentiate l2 to l1 logs tree size for pre and post boojum batches (#538) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes proofs L2ToL1Logs for pre-boojum blocks. --- .../src/api_server/web3/namespaces/zks.rs | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 849f88615b9..7f38c6afc52 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -344,8 +344,27 @@ impl ZksNamespace { return Ok(None); }; + let Some(batch) = storage + .blocks_dal() + .get_l1_batch_header(l1_batch_number) + .await + .map_err(|err| internal_error(method_name, err))? + else { + return Ok(None); + }; + let merkle_tree_leaves = all_l1_logs_in_batch.iter().map(L2ToL1Log::to_bytes); - let min_tree_size = Some(L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE); + + let min_tree_size = if batch + .protocol_version + .map(|v| v.is_pre_boojum()) + .unwrap_or(true) + { + Some(L2ToL1Log::PRE_BOOJUM_MIN_L2_L1_LOGS_TREE_SIZE) + } else { + Some(L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE) + }; + let (root, proof) = MiniMerkleTree::new(merkle_tree_leaves, min_tree_size) .merkle_root_and_path(l1_log_index); Ok(Some(L2ToL1LogProof { From 9ced921611fd11e61578a88c673674d5450886af Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 24 Nov 2023 14:34:48 +0200 Subject: [PATCH 038/115] test(merkle tree): Use `test-casing` for Merkle tree tests (#540) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Structure tests in the Merkle tree crate using `test-casing` crate, since it's already used in the server codebase. ## Why ❔ Makes tests more concise and makes it easier to cover more cases. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 1 + core/lib/merkle_tree/Cargo.toml | 1 + core/lib/merkle_tree/src/storage/tests.rs | 142 ++----- .../tests/integration/merkle_tree.rs | 364 ++++++++---------- .../merkle_tree/tests/integration/recovery.rs | 85 ++-- ...cksdb__db-snapshot-21-chunked-commits.snap | 193 +--------- ...ocksdb__db-snapshot-8-chunked-commits.snap | 115 +----- 7 files changed, 236 insertions(+), 665 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2527bb55022..073f25b71bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8959,6 +8959,7 @@ dependencies = [ "serde_json", "serde_with", "tempfile", + "test-casing", "thiserror", "tracing", "tracing-subscriber", diff --git a/core/lib/merkle_tree/Cargo.toml b/core/lib/merkle_tree/Cargo.toml index 0fc3777d3ed..9a6c4a6b65d 100644 --- a/core/lib/merkle_tree/Cargo.toml +++ b/core/lib/merkle_tree/Cargo.toml @@ -33,4 +33,5 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" serde_with = { version = "1", features = ["hex"] } tempfile = "3.0.2" +test-casing = "0.1.2" tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/core/lib/merkle_tree/src/storage/tests.rs b/core/lib/merkle_tree/src/storage/tests.rs index 3ed0cbada52..958c906289e 100644 --- a/core/lib/merkle_tree/src/storage/tests.rs +++ b/core/lib/merkle_tree/src/storage/tests.rs @@ -4,6 +4,7 @@ use rand::{ seq::{IteratorRandom, SliceRandom}, Rng, SeedableRng, }; +use test_casing::test_casing; use std::collections::{HashMap, HashSet}; @@ -341,7 +342,8 @@ fn big_endian_key(index: u64) -> U256 { U256([0, 0, 0, index.swap_bytes()]) } -fn test_read_instructions_do_not_lead_to_copied_nodes(writes_per_block: u64) { +#[test_casing(3, [10, 20, 50])] +fn read_instructions_do_not_lead_to_copied_nodes(writes_per_block: u64) { const RNG_SEED: u64 = 12; // Write some keys into the database. @@ -390,15 +392,8 @@ fn assert_no_copied_nodes(database: &PatchSet, patch: &PatchSet) { } } -#[test] -fn read_instructions_do_not_lead_to_copied_nodes() { - for writes_per_block in [10, 20, 50] { - println!("Testing {writes_per_block} writes / block"); - test_read_instructions_do_not_lead_to_copied_nodes(writes_per_block); - } -} - -fn test_replaced_keys_are_correctly_tracked(writes_per_block: usize, with_proofs: bool) { +#[test_casing(12, test_casing::Product(([1, 3, 5, 10, 20, 50], [false, true])))] +fn replaced_keys_are_correctly_tracked(writes_per_block: usize, with_proofs: bool) { const RNG_SEED: u64 = 12; // Write some keys into the database. @@ -431,22 +426,6 @@ fn test_replaced_keys_are_correctly_tracked(writes_per_block: usize, with_proofs } } -#[test] -fn replaced_keys_are_correctly_tracked() { - for writes_per_block in [1, 3, 5, 10, 20, 50] { - println!("Testing {writes_per_block} writes / block"); - test_replaced_keys_are_correctly_tracked(writes_per_block, false); - } -} - -#[test] -fn replaced_keys_are_correctly_tracked_with_proofs() { - for writes_per_block in [1, 3, 5, 10, 20, 50] { - println!("Testing {writes_per_block} writes / block"); - test_replaced_keys_are_correctly_tracked(writes_per_block, true); - } -} - fn assert_replaced_keys(db: &PatchSet, patch: &PatchSet) { assert_eq!(patch.patches_by_version.len(), 1); let (&version, sub_patch) = patch.patches_by_version.iter().next().unwrap(); @@ -534,7 +513,8 @@ fn recovery_flattens_node_versions() { } } -fn test_recovery_with_node_hierarchy(chunk_size: usize) { +#[test_casing(7, [256, 4, 5, 20, 69, 127, 128])] +fn recovery_with_node_hierarchy(chunk_size: usize) { let recovery_version = 100; let recovery_entries = (0_u64..256).map(|i| RecoveryEntry { key: Key::from(i) << 248, // the first two key nibbles are distinct @@ -584,16 +564,8 @@ fn test_recovery_with_node_hierarchy(chunk_size: usize) { } } -#[test] -fn recovery_with_node_hierarchy() { - test_recovery_with_node_hierarchy(256); // single chunk - for chunk_size in [4, 5, 20, 69, 127, 128] { - println!("Testing recovery with chunk size {chunk_size}"); - test_recovery_with_node_hierarchy(chunk_size); - } -} - -fn test_recovery_with_deep_node_hierarchy(chunk_size: usize) { +#[test_casing(7, [256, 5, 7, 20, 59, 127, 128])] +fn recovery_with_deep_node_hierarchy(chunk_size: usize) { let recovery_version = 1_000; let recovery_entries = (0_u64..256).map(|i| RecoveryEntry { key: Key::from(i), // the last two key nibbles are distinct @@ -654,15 +626,6 @@ fn test_recovery_with_deep_node_hierarchy(chunk_size: usize) { } } -#[test] -fn recovery_with_deep_node_hierarchy() { - test_recovery_with_deep_node_hierarchy(256); - for chunk_size in [5, 7, 20, 59, 127, 128] { - println!("Testing recovery with chunk size {chunk_size}"); - test_recovery_with_deep_node_hierarchy(chunk_size); - } -} - #[test] fn recovery_workflow_with_multiple_stages() { let mut db = PatchSet::default(); @@ -700,8 +663,18 @@ fn recovery_workflow_with_multiple_stages() { .all(|log| matches!(log.base, TreeLogEntry::Read { .. }))); } +#[derive(Debug, Clone, Copy)] +enum RecoveryKind { + Linear, + Random, +} + +impl RecoveryKind { + const ALL: [Self; 2] = [Self::Linear, Self::Random]; +} + fn test_recovery_pruning_equivalence( - is_linear: bool, + kind: RecoveryKind, chunk_size: usize, recovery_chunk_size: usize, hasher: &dyn HashTree, @@ -753,20 +726,18 @@ fn test_recovery_pruning_equivalence( }); let mut recovery_entries: Vec<_> = recovery_entries.collect(); assert_eq!(recovery_entries.len(), 100); - if is_linear { - recovery_entries.sort_unstable_by_key(|entry| entry.key); - } else { - recovery_entries.shuffle(&mut rng); + match kind { + RecoveryKind::Linear => recovery_entries.sort_unstable_by_key(|entry| entry.key), + RecoveryKind::Random => recovery_entries.shuffle(&mut rng), } // Recover the tree. let mut recovered_db = PatchSet::default(); for recovery_chunk in recovery_entries.chunks(recovery_chunk_size) { let storage = Storage::new(&recovered_db, hasher, recovered_version, false); - let patch = if is_linear { - storage.extend_during_linear_recovery(recovery_chunk.to_vec()) - } else { - storage.extend_during_random_recovery(recovery_chunk.to_vec()) + let patch = match kind { + RecoveryKind::Linear => storage.extend_during_linear_recovery(recovery_chunk.to_vec()), + RecoveryKind::Random => storage.extend_during_random_recovery(recovery_chunk.to_vec()), }; recovered_db.apply_patch(patch); } @@ -806,55 +777,20 @@ fn test_recovery_pruning_equivalence( assert_eq!(all_recovered_nodes, flattened_version_nodes); } -#[test] -fn linear_recovery_pruning_equivalence() { - for chunk_size in [3, 5, 7, 11, 21, 42, 99, 100] { - // No chunking during recovery (simple case). - test_recovery_pruning_equivalence(true, chunk_size, 100, &()); - // Recovery is chunked (more complex case). - for recovery_chunk_size in [chunk_size, 1, 6, 19, 50, 73] { - test_recovery_pruning_equivalence(true, chunk_size, recovery_chunk_size, &()); - } - } -} +const HASHERS: [&'static dyn HashTree; 2] = [&(), &Blake2Hasher]; +const CHUNK_SIZES: [usize; 8] = [3, 5, 7, 11, 21, 42, 99, 100]; +#[test_casing(32, test_casing::Product((RecoveryKind::ALL, HASHERS, CHUNK_SIZES)))] #[test] -fn random_recovery_pruning_equivalence() { - for chunk_size in [3, 5, 7, 11, 21, 42, 99, 100] { - // No chunking during recovery (simple case). - test_recovery_pruning_equivalence(false, chunk_size, 100, &()); - // Recovery is chunked (more complex case). - for recovery_chunk_size in [chunk_size, 1, 6, 19, 50, 73] { - test_recovery_pruning_equivalence(false, chunk_size, recovery_chunk_size, &()); - } - } -} - -#[test] -fn linear_recovery_pruning_equivalence_with_hashing() { - for chunk_size in [3, 7, 21, 42, 100] { - // No chunking during recovery (simple case). - test_recovery_pruning_equivalence(true, chunk_size, 100, &Blake2Hasher); - // Recovery is chunked (more complex case). - for recovery_chunk_size in [chunk_size, 1, 19, 73] { - test_recovery_pruning_equivalence(true, chunk_size, recovery_chunk_size, &Blake2Hasher); - } - } -} - -#[test] -fn random_recovery_pruning_equivalence_with_hashing() { - for chunk_size in [3, 7, 21, 42, 100] { - // No chunking during recovery (simple case). - test_recovery_pruning_equivalence(false, chunk_size, 100, &Blake2Hasher); - // Recovery is chunked (more complex case). - for recovery_chunk_size in [chunk_size, 1, 19, 73] { - test_recovery_pruning_equivalence( - false, - chunk_size, - recovery_chunk_size, - &Blake2Hasher, - ); - } +fn recovery_pruning_equivalence( + kind: RecoveryKind, + hasher: &'static dyn HashTree, + chunk_size: usize, +) { + // No chunking during recovery (simple case). + test_recovery_pruning_equivalence(kind, chunk_size, 100, hasher); + // Recovery is chunked (more complex case). + for recovery_chunk_size in [chunk_size, 1, 19, 73] { + test_recovery_pruning_equivalence(kind, chunk_size, recovery_chunk_size, hasher); } } diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index eb84bb7248e..9f3eb970cd3 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -1,6 +1,7 @@ //! Tests not tied to the zksync domain. use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use test_casing::test_casing; use std::{cmp, mem}; @@ -28,91 +29,81 @@ fn compute_tree_hash_works_correctly() { assert_eq!(hash, EXPECTED_HASH); } -#[test] -fn root_hash_is_computed_correctly_on_empty_tree() { - for kv_count in [1, 2, 3, 5, 8, 13, 21, 100] { - println!("Inserting {kv_count} key-value pairs"); - - let mut tree = MerkleTree::new(PatchSet::default()); - let kvs = generate_key_value_pairs(0..kv_count); - let expected_hash = compute_tree_hash(kvs.iter().copied()); - let output = tree.extend(kvs); - assert_eq!(output.root_hash, expected_hash); - } +const KV_COUNTS: [u64; 8] = [1, 2, 3, 5, 8, 13, 21, 100]; + +#[test_casing(8, KV_COUNTS)] +fn root_hash_is_computed_correctly_on_empty_tree(kv_count: u64) { + let mut tree = MerkleTree::new(PatchSet::default()); + let kvs = generate_key_value_pairs(0..kv_count); + let expected_hash = compute_tree_hash(kvs.iter().copied()); + let output = tree.extend(kvs); + assert_eq!(output.root_hash, expected_hash); } -#[test] -fn output_proofs_are_computed_correctly_on_empty_tree() { +#[test_casing(8, KV_COUNTS)] +fn output_proofs_are_computed_correctly_on_empty_tree(kv_count: u64) { const RNG_SEED: u64 = 123; let mut rng = StdRng::seed_from_u64(RNG_SEED); let empty_tree_hash = Blake2Hasher.empty_subtree_hash(256); - for kv_count in [1, 2, 3, 5, 8, 13, 21, 100] { - println!("Inserting {kv_count} key-value pairs"); - - let mut tree = MerkleTree::new(PatchSet::default()); - let kvs = generate_key_value_pairs(0..kv_count); - let expected_hash = compute_tree_hash(kvs.iter().copied()); - let instructions = convert_to_writes(&kvs); - let output = tree.extend_with_proofs(instructions.clone()); + let mut tree = MerkleTree::new(PatchSet::default()); + let kvs = generate_key_value_pairs(0..kv_count); + let expected_hash = compute_tree_hash(kvs.iter().copied()); + let instructions = convert_to_writes(&kvs); + let output = tree.extend_with_proofs(instructions.clone()); - assert_eq!(output.root_hash(), Some(expected_hash)); - assert_eq!(output.logs.len(), instructions.len()); - output.verify_proofs(&Blake2Hasher, empty_tree_hash, &instructions); - let root_hash = output.root_hash().unwrap(); + assert_eq!(output.root_hash(), Some(expected_hash)); + assert_eq!(output.logs.len(), instructions.len()); + output.verify_proofs(&Blake2Hasher, empty_tree_hash, &instructions); + let root_hash = output.root_hash().unwrap(); - let reads = instructions - .iter() - .map(|(key, _)| (*key, TreeInstruction::Read)); - let mut reads: Vec<_> = reads.collect(); - reads.shuffle(&mut rng); - let output = tree.extend_with_proofs(reads.clone()); - output.verify_proofs(&Blake2Hasher, root_hash, &reads); - assert_eq!(output.root_hash(), Some(root_hash)); - } + let reads = instructions + .iter() + .map(|(key, _)| (*key, TreeInstruction::Read)); + let mut reads: Vec<_> = reads.collect(); + reads.shuffle(&mut rng); + let output = tree.extend_with_proofs(reads.clone()); + output.verify_proofs(&Blake2Hasher, root_hash, &reads); + assert_eq!(output.root_hash(), Some(root_hash)); } -#[test] -fn entry_proofs_are_computed_correctly_on_empty_tree() { +#[test_casing(8, KV_COUNTS)] +fn entry_proofs_are_computed_correctly_on_empty_tree(kv_count: u64) { const RNG_SEED: u64 = 123; let mut rng = StdRng::seed_from_u64(RNG_SEED); - for kv_count in [1, 2, 3, 5, 8, 13, 21, 100] { - println!("Inserting {kv_count} key-value pairs"); - - let mut tree = MerkleTree::new(PatchSet::default()); - let kvs = generate_key_value_pairs(0..kv_count); - let expected_hash = compute_tree_hash(kvs.iter().copied()); - tree.extend(kvs.clone()); - - let existing_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); - let entries = tree.entries_with_proofs(0, &existing_keys).unwrap(); - assert_eq!(entries.len(), existing_keys.len()); - for ((key, value), entry) in kvs.iter().zip(entries) { - entry.verify(&Blake2Hasher, *key, expected_hash); - assert_eq!(entry.base.value_hash, *value); - } + let mut tree = MerkleTree::new(PatchSet::default()); + let kvs = generate_key_value_pairs(0..kv_count); + let expected_hash = compute_tree_hash(kvs.iter().copied()); + tree.extend(kvs.clone()); - // Test some keys adjacent to existing ones. - let adjacent_keys = kvs.iter().flat_map(|(key, _)| { - [ - *key ^ (U256::one() << rng.gen_range(0..256)), - *key ^ (U256::one() << rng.gen_range(0..256)), - *key ^ (U256::one() << rng.gen_range(0..256)), - ] - }); - let random_keys = generate_key_value_pairs(kv_count..(kv_count * 2)) - .into_iter() - .map(|(key, _)| key); - let mut missing_keys: Vec<_> = adjacent_keys.chain(random_keys).collect(); - missing_keys.shuffle(&mut rng); - - let entries = tree.entries_with_proofs(0, &missing_keys).unwrap(); - assert_eq!(entries.len(), missing_keys.len()); - for (key, entry) in missing_keys.iter().zip(entries) { - assert!(entry.base.is_empty()); - entry.verify(&Blake2Hasher, *key, expected_hash); - } + let existing_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let entries = tree.entries_with_proofs(0, &existing_keys).unwrap(); + assert_eq!(entries.len(), existing_keys.len()); + for ((key, value), entry) in kvs.iter().zip(entries) { + entry.verify(&Blake2Hasher, *key, expected_hash); + assert_eq!(entry.base.value_hash, *value); + } + + // Test some keys adjacent to existing ones. + let adjacent_keys = kvs.iter().flat_map(|(key, _)| { + [ + *key ^ (U256::one() << rng.gen_range(0..256)), + *key ^ (U256::one() << rng.gen_range(0..256)), + *key ^ (U256::one() << rng.gen_range(0..256)), + ] + }); + let random_keys = generate_key_value_pairs(kv_count..(kv_count * 2)) + .into_iter() + .map(|(key, _)| key); + let mut missing_keys: Vec<_> = adjacent_keys.chain(random_keys).collect(); + missing_keys.shuffle(&mut rng); + + let entries = tree.entries_with_proofs(0, &missing_keys).unwrap(); + assert_eq!(entries.len(), missing_keys.len()); + for (key, entry) in missing_keys.iter().zip(entries) { + assert!(entry.base.is_empty()); + entry.verify(&Blake2Hasher, *key, expected_hash); } } @@ -185,62 +176,52 @@ fn test_intermediate_commits(db: &mut impl Database, chunk_size: usize) { } } -#[test] -fn root_hash_is_computed_correctly_with_intermediate_commits() { - for chunk_size in [3, 5, 10, 17, 28, 42] { - println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); - test_intermediate_commits(&mut PatchSet::default(), chunk_size); - } +#[test_casing(6, [3, 5, 10, 17, 28, 42])] +fn root_hash_is_computed_correctly_with_intermediate_commits(chunk_size: usize) { + test_intermediate_commits(&mut PatchSet::default(), chunk_size); } -#[test] -fn output_proofs_are_computed_correctly_with_intermediate_commits() { +#[test_casing(6, [3, 5, 10, 17, 28, 42])] +fn output_proofs_are_computed_correctly_with_intermediate_commits(chunk_size: usize) { let (kvs, expected_hash) = &*KVS_AND_HASH; - for chunk_size in [3, 5, 10, 17, 28, 42] { - println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); - - let mut tree = MerkleTree::new(PatchSet::default()); - let mut root_hash = Blake2Hasher.empty_subtree_hash(256); - for chunk in kvs.chunks(chunk_size) { - let instructions = convert_to_writes(chunk); - let output = tree.extend_with_proofs(instructions.clone()); - output.verify_proofs(&Blake2Hasher, root_hash, &instructions); - root_hash = output.root_hash().unwrap(); - } - assert_eq!(root_hash, *expected_hash); + + let mut tree = MerkleTree::new(PatchSet::default()); + let mut root_hash = Blake2Hasher.empty_subtree_hash(256); + for chunk in kvs.chunks(chunk_size) { + let instructions = convert_to_writes(chunk); + let output = tree.extend_with_proofs(instructions.clone()); + output.verify_proofs(&Blake2Hasher, root_hash, &instructions); + root_hash = output.root_hash().unwrap(); } + assert_eq!(root_hash, *expected_hash); } -#[test] -fn entry_proofs_are_computed_correctly_with_intermediate_commits() { +#[test_casing(4, [10, 17, 28, 42])] +fn entry_proofs_are_computed_correctly_with_intermediate_commits(chunk_size: usize) { let (kvs, _) = &*KVS_AND_HASH; let all_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); - for chunk_size in [10, 17, 28, 42] { - println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); - - let mut tree = MerkleTree::new(PatchSet::default()); - let mut root_hashes = vec![]; - for chunk in kvs.chunks(chunk_size) { - let output = tree.extend(chunk.to_vec()); - root_hashes.push(output.root_hash); - - let version = root_hashes.len() - 1; - let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); - assert_eq!(entries.len(), all_keys.len()); - for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { - assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); - entry.verify(&Blake2Hasher, *key, output.root_hash); - } + let mut tree = MerkleTree::new(PatchSet::default()); + let mut root_hashes = vec![]; + for chunk in kvs.chunks(chunk_size) { + let output = tree.extend(chunk.to_vec()); + root_hashes.push(output.root_hash); + + let version = root_hashes.len() - 1; + let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); + assert_eq!(entries.len(), all_keys.len()); + for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { + assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); + entry.verify(&Blake2Hasher, *key, output.root_hash); } + } - // Check all tree versions. - for (version, root_hash) in root_hashes.into_iter().enumerate() { - let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); - assert_eq!(entries.len(), all_keys.len()); - for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { - assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); - entry.verify(&Blake2Hasher, *key, root_hash); - } + // Check all tree versions. + for (version, root_hash) in root_hashes.into_iter().enumerate() { + let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); + assert_eq!(entries.len(), all_keys.len()); + for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { + assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); + entry.verify(&Blake2Hasher, *key, root_hash); } } } @@ -266,12 +247,9 @@ fn test_accumulated_commits(db: DB, chunk_size: usize) -> DB { db } -#[test] -fn accumulating_commits() { - for chunk_size in [3, 5, 10, 17, 28, 42] { - println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); - test_accumulated_commits(PatchSet::default(), chunk_size); - } +#[test_casing(6, [3, 5, 10, 17, 28, 42])] +fn accumulating_commits(chunk_size: usize) { + test_accumulated_commits(PatchSet::default(), chunk_size); } fn test_root_hash_computing_with_reverts(db: &mut impl Database) { @@ -379,44 +357,40 @@ fn root_hash_is_computed_correctly_with_key_updates() { test_root_hash_computing_with_key_updates(PatchSet::default()); } -#[test] -fn proofs_are_computed_correctly_with_key_updates() { +#[test_casing(5, [5, 10, 17, 28, 42])] +fn proofs_are_computed_correctly_with_key_updates(updated_keys: usize) { const RNG_SEED: u64 = 1_234; let (kvs, expected_hash) = &*KVS_AND_HASH; let mut rng = StdRng::seed_from_u64(RNG_SEED); - for updated_keys in [5, 10, 17, 28, 42] { - println!("Inserting 100 key-value pairs with {updated_keys} updates"); - - let old_instructions: Vec<_> = kvs[..updated_keys] - .iter() - .map(|(key, _)| (*key, TreeInstruction::Write(H256([255; 32])))) - .collect(); - // Move the updated keys to the random places in the `kvs` vector. - let mut writes = convert_to_writes(kvs); - let mut instructions = writes.split_off(updated_keys); - for updated_kv in writes { - let idx = rng.gen_range(0..=instructions.len()); - instructions.insert(idx, updated_kv); - } - - let mut tree = MerkleTree::new(PatchSet::default()); - let output = tree.extend_with_proofs(old_instructions.clone()); - let empty_tree_hash = Blake2Hasher.empty_subtree_hash(256); - output.verify_proofs(&Blake2Hasher, empty_tree_hash, &old_instructions); + let old_instructions: Vec<_> = kvs[..updated_keys] + .iter() + .map(|(key, _)| (*key, TreeInstruction::Write(H256([255; 32])))) + .collect(); + // Move the updated keys to the random places in the `kvs` vector. + let mut writes = convert_to_writes(kvs); + let mut instructions = writes.split_off(updated_keys); + for updated_kv in writes { + let idx = rng.gen_range(0..=instructions.len()); + instructions.insert(idx, updated_kv); + } - let root_hash = output.root_hash().unwrap(); - let output = tree.extend_with_proofs(instructions.clone()); - assert_eq!(output.root_hash(), Some(*expected_hash)); - output.verify_proofs(&Blake2Hasher, root_hash, &instructions); + let mut tree = MerkleTree::new(PatchSet::default()); + let output = tree.extend_with_proofs(old_instructions.clone()); + let empty_tree_hash = Blake2Hasher.empty_subtree_hash(256); + output.verify_proofs(&Blake2Hasher, empty_tree_hash, &old_instructions); - let keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); - let proofs = tree.entries_with_proofs(1, &keys).unwrap(); - for ((key, value), proof) in kvs.iter().zip(proofs) { - assert_eq!(proof.base.value_hash, *value); - proof.verify(&Blake2Hasher, *key, *expected_hash); - } + let root_hash = output.root_hash().unwrap(); + let output = tree.extend_with_proofs(instructions.clone()); + assert_eq!(output.root_hash(), Some(*expected_hash)); + output.verify_proofs(&Blake2Hasher, root_hash, &instructions); + + let keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let proofs = tree.entries_with_proofs(1, &keys).unwrap(); + for ((key, value), proof) in kvs.iter().zip(proofs) { + assert_eq!(proof.base.value_hash, *value); + proof.verify(&Blake2Hasher, *key, *expected_hash); } } @@ -461,7 +435,8 @@ fn root_hash_equals_to_previous_implementation() { test_root_hash_equals_to_previous_implementation(&mut PatchSet::default()); } -fn test_range_proofs_simple(range_size: usize) { +#[test_casing(7, [2, 3, 5, 10, 17, 28, 42])] +fn range_proofs_with_multiple_existing_items(range_size: usize) { let (kvs, expected_hash) = &*KVS_AND_HASH; assert!(range_size >= 2 && range_size <= kvs.len()); @@ -493,20 +468,9 @@ fn test_range_proofs_simple(range_size: usize) { } } -#[test] -fn range_proofs_with_multiple_existing_items() { - for range_size in [2, 3, 5, 10, 17, 28, 42] { - println!("Testing range proofs with {range_size} items"); - test_range_proofs_simple(range_size); - } -} - -#[test] -fn range_proofs_for_almost_full_range() { - for range_size in 95..=100 { - println!("Testing range proofs with {range_size} items"); - test_range_proofs_simple(range_size); - } +#[test_casing(6, 95..=100)] +fn range_proofs_for_almost_full_range(range_size: usize) { + range_proofs_with_multiple_existing_items(range_size); } #[test] @@ -615,44 +579,28 @@ mod rocksdb { test_root_hash_computing_with_key_updates(harness.db); } - #[test] - fn root_hash_is_computed_correctly_with_intermediate_commits() { + #[test_casing(3, [3, 8, 21])] + fn root_hash_is_computed_correctly_with_intermediate_commits(chunk_size: usize) { let Harness { mut db, dir: _dir } = Harness::new(); - for chunk_size in [3, 8, 21] { - test_intermediate_commits(&mut db, chunk_size); + test_intermediate_commits(&mut db, chunk_size); - let raw_db = db.into_inner(); - let snapshot_name = format!("db-snapshot-{chunk_size}-chunked-commits"); - insta::assert_yaml_snapshot!(snapshot_name, DatabaseSnapshot::new(&raw_db)); - db = clean_db(raw_db); - } - } - - fn clean_db(raw_db: RocksDB) -> RocksDBWrapper { - // Clear the entire database instead of using `MerkleTree::truncate_versions()` - // so that it doesn't contain any junk that can influence snapshots. - let mut batch = raw_db.new_write_batch(); - let cf = MerkleTreeColumnFamily::Tree; - batch.delete_range_cf(cf, (&[] as &[_])..&u64::MAX.to_be_bytes()); - raw_db.write(batch).unwrap(); - RocksDBWrapper::from(raw_db) + let raw_db = db.into_inner(); + let snapshot_name = format!("db-snapshot-{chunk_size}-chunked-commits"); + insta::assert_yaml_snapshot!(snapshot_name, DatabaseSnapshot::new(&raw_db)); } - #[test] - fn snapshot_for_pruned_tree() { + #[test_casing(3, [3, 8, 21])] + fn snapshot_for_pruned_tree(chunk_size: usize) { let Harness { mut db, dir: _dir } = Harness::new(); - for chunk_size in [3, 8, 21] { - test_intermediate_commits(&mut db, chunk_size); - let (mut pruner, _) = MerkleTreePruner::new(&mut db, 0); - pruner.run_once(); - - let raw_db = db.into_inner(); - let snapshot_name = format!("db-snapshot-{chunk_size}-chunked-commits-pruned"); - let db_snapshot = DatabaseSnapshot::new(&raw_db); - assert!(db_snapshot.stale_keys.is_empty()); - insta::assert_yaml_snapshot!(snapshot_name, db_snapshot); - db = clean_db(raw_db); - } + test_intermediate_commits(&mut db, chunk_size); + let (mut pruner, _) = MerkleTreePruner::new(&mut db, 0); + pruner.run_once(); + + let raw_db = db.into_inner(); + let snapshot_name = format!("db-snapshot-{chunk_size}-chunked-commits-pruned"); + let db_snapshot = DatabaseSnapshot::new(&raw_db); + assert!(db_snapshot.stale_keys.is_empty()); + insta::assert_yaml_snapshot!(snapshot_name, db_snapshot); } #[test] @@ -674,14 +622,10 @@ mod rocksdb { assert_eq!(latest_kvs.len(), 1, "{latest_kvs:?}"); } - #[test] - fn accumulating_commits() { - let Harness { mut db, dir: _dir } = Harness::new(); - for chunk_size in [3, 5, 10, 17, 28, 42] { - println!("Inserting 100 key-value pairs in {chunk_size}-sized chunks"); - db = test_accumulated_commits(db, chunk_size); - MerkleTree::new(&mut db).truncate_recent_versions(0); - } + #[test_casing(6, [3, 5, 10, 17, 28, 42])] + fn accumulating_commits(chunk_size: usize) { + let harness = Harness::new(); + test_accumulated_commits(harness.db, chunk_size); } #[test] diff --git a/core/lib/merkle_tree/tests/integration/recovery.rs b/core/lib/merkle_tree/tests/integration/recovery.rs index 9a1cfee9591..fda57f78851 100644 --- a/core/lib/merkle_tree/tests/integration/recovery.rs +++ b/core/lib/merkle_tree/tests/integration/recovery.rs @@ -1,8 +1,9 @@ //! Tests for tree recovery. use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng}; -use zksync_crypto::hasher::blake2::Blake2Hasher; +use test_casing::test_casing; +use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ recovery::{MerkleTreeRecovery, RecoveryEntry}, Database, MerkleTree, PatchSet, PruneDatabase, ValueHash, @@ -10,6 +11,16 @@ use zksync_merkle_tree::{ use crate::common::{convert_to_writes, generate_key_value_pairs, TreeMap, KVS_AND_HASH}; +#[derive(Debug, Clone, Copy)] +enum RecoveryKind { + Linear, + Random, +} + +impl RecoveryKind { + const ALL: [Self; 2] = [Self::Linear, Self::Random]; +} + #[test] fn recovery_basics() { let (kvs, expected_hash) = &*KVS_AND_HASH; @@ -36,7 +47,7 @@ fn recovery_basics() { tree.verify_consistency(recovered_version).unwrap(); } -fn test_recovery_in_chunks(is_linear: bool, mut create_db: impl FnMut() -> DB) { +fn test_recovery_in_chunks(mut db: impl PruneDatabase, kind: RecoveryKind, chunk_size: usize) { let (kvs, expected_hash) = &*KVS_AND_HASH; let recovery_entries = kvs .iter() @@ -47,7 +58,7 @@ fn test_recovery_in_chunks(is_linear: bool, mut create_db: im leaf_index: i as u64 + 1, }); let mut recovery_entries: Vec<_> = recovery_entries.collect(); - if is_linear { + if matches!(kind, RecoveryKind::Linear) { recovery_entries.sort_unstable_by_key(|entry| entry.key); } let greatest_key = recovery_entries @@ -57,29 +68,25 @@ fn test_recovery_in_chunks(is_linear: bool, mut create_db: im .unwrap(); let recovered_version = 123; - for chunk_size in [6, 10, 17, 42] { - let mut db = create_db(); - let mut recovery = MerkleTreeRecovery::new(&mut db, recovered_version); - for (i, chunk) in recovery_entries.chunks(chunk_size).enumerate() { - if is_linear { - recovery.extend_linear(chunk.to_vec()); - } else { - recovery.extend_random(chunk.to_vec()); - } - if i % 3 == 1 { - recovery = MerkleTreeRecovery::new(&mut db, recovered_version); - // ^ Simulate recovery interruption and restart - } + let mut recovery = MerkleTreeRecovery::new(&mut db, recovered_version); + for (i, chunk) in recovery_entries.chunks(chunk_size).enumerate() { + match kind { + RecoveryKind::Linear => recovery.extend_linear(chunk.to_vec()), + RecoveryKind::Random => recovery.extend_random(chunk.to_vec()), } + if i % 3 == 1 { + recovery = MerkleTreeRecovery::new(&mut db, recovered_version); + // ^ Simulate recovery interruption and restart + } + } - assert_eq!(recovery.last_processed_key(), Some(greatest_key)); - assert_eq!(recovery.root_hash(), *expected_hash); + assert_eq!(recovery.last_processed_key(), Some(greatest_key)); + assert_eq!(recovery.root_hash(), *expected_hash); - let mut tree = recovery.finalize(); - tree.verify_consistency(recovered_version).unwrap(); - // Check that new tree versions can be built and function as expected. - test_tree_after_recovery(&mut tree, recovered_version, *expected_hash); - } + let mut tree = recovery.finalize(); + tree.verify_consistency(recovered_version).unwrap(); + // Check that new tree versions can be built and function as expected. + test_tree_after_recovery(&mut tree, recovered_version, *expected_hash); } fn test_tree_after_recovery( @@ -128,14 +135,9 @@ fn test_tree_after_recovery( } } -#[test] -fn linear_recovery_in_chunks() { - test_recovery_in_chunks(true, PatchSet::default); -} - -#[test] -fn random_recovery_in_chunks() { - test_recovery_in_chunks(false, PatchSet::default); +#[test_casing(8, test_casing::Product((RecoveryKind::ALL, [6, 10, 17, 42])))] +fn recovery_in_chunks(kind: RecoveryKind, chunk_size: usize) { + test_recovery_in_chunks(PatchSet::default(), kind, chunk_size); } mod rocksdb { @@ -144,23 +146,10 @@ mod rocksdb { use super::*; use zksync_merkle_tree::RocksDBWrapper; - #[test] - fn linear_recovery_in_chunks() { + #[test_casing(8, test_casing::Product((RecoveryKind::ALL, [6, 10, 17, 42])))] + fn recovery_in_chunks(kind: RecoveryKind, chunk_size: usize) { let temp_dir = TempDir::new().unwrap(); - let mut counter = 0; - test_recovery_in_chunks(true, || { - counter += 1; - RocksDBWrapper::new(&temp_dir.path().join(counter.to_string())) - }); - } - - #[test] - fn random_recovery_in_chunks() { - let temp_dir = TempDir::new().unwrap(); - let mut counter = 0; - test_recovery_in_chunks(false, || { - counter += 1; - RocksDBWrapper::new(&temp_dir.path().join(counter.to_string())) - }); + let db = RocksDBWrapper::new(temp_dir.path()); + test_recovery_in_chunks(db, kind, chunk_size); } } diff --git a/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-21-chunked-commits.snap b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-21-chunked-commits.snap index 69bec4fbe84..3f5eb579a28 100644 --- a/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-21-chunked-commits.snap +++ b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-21-chunked-commits.snap @@ -1,6 +1,6 @@ --- -source: core/lib/merkle_tree2/tests/integration/merkle_tree.rs -assertion_line: 508 +source: core/lib/merkle_tree/tests/integration/merkle_tree.rs +assertion_line: 589 expression: "DatabaseSnapshot::new(&raw_db)" --- tree: @@ -193,15 +193,12 @@ stale_keys: "000000000000000100000000000000000140": "" "000000000000000100000000000000000150": "" "000000000000000100000000000000000160": "" - "000000000000000100000000000000000170": "" 0000000000000001000000000000000001b0: "" 0000000000000001000000000000000001d0: "" "0000000000000001000000000000000001e0": "" 0000000000000001000000000000000002b9: "" "0000000000000001000000000000000002e8": "" - "000000000000000200000000000000000100": "" "000000000000000200000000000000000110": "" - "000000000000000200000000000000000170": "" 0000000000000002000000000000000001a0: "" "0000000000000002000000000000000100": "" "000000000000000200000000000000010100": "" @@ -209,7 +206,6 @@ stale_keys: "000000000000000200000000000000010150": "" "000000000000000200000000000000010160": "" "000000000000000200000000000000010190": "" - 0000000000000002000000000000000101a0: "" 0000000000000002000000000000000101b0: "" 0000000000000002000000000000000101c0: "" 0000000000000002000000000000000101d0: "" @@ -217,11 +213,8 @@ stale_keys: 0000000000000002000000000000000101f0: "" "000000000000000200000000000000010260": "" "000000000000000300000000000000000170": "" - 0000000000000003000000000000000001d0: "" "000000000000000300000000000000010130": "" - 0000000000000003000000000000000101b0: "" "000000000000000300000000000000010294": "" - 0000000000000003000000000000000102b9: "" "0000000000000003000000000000000200": "" "000000000000000300000000000000020100": "" "000000000000000300000000000000020160": "" @@ -231,17 +224,12 @@ stale_keys: 0000000000000003000000000000000201c0: "" 0000000000000003000000000000000201d0: "" 0000000000000003000000000000000202d8: "" - "000000000000000400000000000000000150": "" "000000000000000400000000000000010140": "" - 0000000000000004000000000000000101b0: "" "000000000000000400000000000000010248": "" "000000000000000400000000000000010295": "" - "0000000000000004000000000000000102e8": "" - "000000000000000400000000000000020100": "" "000000000000000400000000000000020110": "" "000000000000000400000000000000020120": "" "000000000000000400000000000000020150": "" - "000000000000000400000000000000020160": "" "0000000000000004000000000000000201e0": "" 0000000000000004000000000000000201f0: "" "000000000000000400000000000000020225": "" @@ -249,183 +237,6 @@ stale_keys: "000000000000000400000000000000030160": "" "000000000000000400000000000000030170": "" "000000000000000400000000000000030190": "" - 0000000000000004000000000000000301b0: "" 0000000000000004000000000000000301c0: "" 0000000000000004000000000000000301d0: "" - 0000000000000004000000000000000302dd: "" - "000000000000000500000000000000010130": "" - "0000000000000005000000000000000101e0": "" - "000000000000000500000000000000020120": "" - 0000000000000005000000000000000201c0: "" - "0000000000000005000000000000000400": "" - "000000000000000500000000000000040100": "" - "000000000000000500000000000000040150": "" - "000000000000000500000000000000040160": "" - 0000000000000005000000000000000401d0: "" - "000000000000000500000000000000040260": "" - "000000000000000600000000000000000110": "" - 0000000000000006000000000000000201a0: "" - "000000000000000600000000000000030120": "" - 0000000000000006000000000000000401a0: "" - 0000000000000006000000000000000401b0: "" - "0000000000000006000000000000000500": "" - "000000000000000600000000000000050100": "" - "000000000000000600000000000000050120": "" - 0000000000000006000000000000000501c0: "" - 0000000000000006000000000000000501d0: "" - "000000000000000600000000000000050225": "" - 0000000000000007000000000000000201f0: "" - "000000000000000700000000000000030100": "" - "000000000000000700000000000000040190": "" - "0000000000000007000000000000000401e0": "" - "000000000000000700000000000000050150": "" - "000000000000000700000000000000050160": "" - "0000000000000007000000000000000600": "" - "000000000000000700000000000000060100": "" - "000000000000000700000000000000060120": "" - 0000000000000007000000000000000601d0: "" - 0000000000000007000000000000000602d8: "" - 0000000000000008000000000000000401b0: "" - 0000000000000008000000000000000402b9: "" - 0000000000000008000000000000000601b0: "" - 0000000000000008000000000000000601c0: "" - "0000000000000008000000000000000700": "" - "000000000000000800000000000000070160": "" - "000000000000000800000000000000070190": "" - 0000000000000008000000000000000701d0: "" - 0000000000000008000000000000000702d8: "" - "000000000000000900000000000000010170": "" - 0000000000000009000000000000000101d0: "" - "000000000000000900000000000000030294": "" - "000000000000000900000000000000050130": "" - 0000000000000009000000000000000601a0: "" - "000000000000000900000000000000070100": "" - "0000000000000009000000000000000800": "" - "000000000000000900000000000000080190": "" - 0000000000000009000000000000000801b0: "" - 0000000000000009000000000000000801c0: "" - 000000000000000a00000000000000040140: "" - 000000000000000a00000000000000040248: "" - 000000000000000a00000000000000060160: "" - 000000000000000a000000000000000701e0: "" - 000000000000000a000000000000000900: "" - 000000000000000a00000000000000090130: "" - 000000000000000a00000000000000090170: "" - 000000000000000a00000000000000090190: "" - 000000000000000a000000000000000901a0: "" - 000000000000000a000000000000000901b0: "" - 000000000000000a000000000000000901c0: "" - 000000000000000b00000000000000060110: "" - 000000000000000b00000000000000060225: "" - 000000000000000b00000000000000070100: "" - 000000000000000b00000000000000070120: "" - 000000000000000b00000000000000070150: "" - 000000000000000b000000000000000701f0: "" - 000000000000000b000000000000000801d0: "" - 000000000000000b000000000000000a00: "" - 000000000000000b000000000000000a0140: "" - 000000000000000b000000000000000a0170: "" - 000000000000000b000000000000000a0190: "" - 000000000000000b000000000000000a01b0: "" - 000000000000000b000000000000000a01e0: "" - 000000000000000c00000000000000030140: "" - 000000000000000c00000000000000030295: "" - 000000000000000c000000000000000501e0: "" - 000000000000000c000000000000000502e8: "" - 000000000000000c00000000000000080160: "" - 000000000000000c000000000000000901d0: "" - 000000000000000c000000000000000902dd: "" - 000000000000000c000000000000000a0190: "" - 000000000000000c000000000000000b00: "" - 000000000000000c000000000000000b0150: "" - 000000000000000c000000000000000b01f0: "" - 000000000000000d00000000000000000150: "" - 000000000000000d00000000000000040130: "" - 000000000000000d00000000000000060120: "" - 000000000000000d000000000000000c00: "" - 000000000000000e000000000000000b0100: "" - 000000000000000e000000000000000c01d0: "" - 000000000000000e000000000000000d00: "" - 000000000000000e000000000000000d0150: "" - 000000000000000f000000000000000701c0: "" - 000000000000000f000000000000000a0160: "" - 000000000000000f000000000000000a0260: "" - 000000000000000f000000000000000d0120: "" - 000000000000000f000000000000000e00: "" - 0000000000000010000000000000000601a0: "" - 0000000000000010000000000000000e01d0: "" - 0000000000000010000000000000000f00: "" - 0000000000000010000000000000000f01c0: "" - "000000000000001100000000000000020110": "" - "0000000000000011000000000000000e0100": "" - "0000000000000011000000000000001000": "" - 0000000000000012000000000000000b01b0: "" - "0000000000000012000000000000000e0150": "" - 0000000000000012000000000000000f0120: "" - 0000000000000012000000000000000f0225: "" - "0000000000000012000000000000001100": "" - 0000000000000013000000000000000c01e0: "" - 0000000000000013000000000000000f0160: "" - 0000000000000013000000000000001001d0: "" - 0000000000000013000000000000001002d8: "" - "0000000000000013000000000000001200": "" - 0000000000000014000000000000000701f0: "" - 0000000000000014000000000000000b0190: "" - "000000000000001400000000000000120120": "" - "0000000000000014000000000000001300": "" - 0000000000000015000000000000001001c0: "" - "000000000000001500000000000000110100": "" - 0000000000000015000000000000001201b0: "" - "0000000000000015000000000000001400": "" - 0000000000000016000000000000001301d0: "" - "000000000000001600000000000000140190": "" - "0000000000000016000000000000001500": "" - 0000000000000016000000000000001501b0: "" - "000000000000001700000000000000130160": "" - 0000000000000017000000000000001302d8: "" - "0000000000000017000000000000001600": "" - 0000000000000017000000000000001601b0: "" - 0000000000000017000000000000001601d0: "" - 0000000000000017000000000000001602b5: "" - "000000000000001800000000000000020170": "" - 0000000000000018000000000000001001a0: "" - "000000000000001800000000000000150100": "" - "0000000000000018000000000000001700": "" - 0000000000000019000000000000001501c0: "" - "0000000000000019000000000000001800": "" - "000000000000001900000000000000180100": "" - 000000000000001a00000000000000090294: "" - 000000000000001a000000000000000d0130: "" - 000000000000001a00000000000000160190: "" - 000000000000001a000000000000001801a0: "" - 000000000000001a000000000000001900: "" - 000000000000001b00000000000000180170: "" - 000000000000001b000000000000001a00: "" - 000000000000001b000000000000001a0130: "" - 000000000000001b000000000000001a0190: "" - 000000000000001c000000000000000c0140: "" - 000000000000001c000000000000000c0248: "" - 000000000000001c000000000000001301e0: "" - 000000000000001c000000000000001901c0: "" - 000000000000001c000000000000001b00: "" - 000000000000001d000000000000001701d0: "" - 000000000000001d000000000000001c00: "" - 000000000000001d000000000000001c0140: "" - 000000000000001d000000000000001c01e0: "" - "000000000000001e00000000000000120150": "" - "000000000000001e00000000000000120225": "" - "000000000000001e00000000000000140120": "" - 000000000000001e000000000000001b0170: "" - 000000000000001e000000000000001d00: "" - 000000000000001f00000000000000110110: "" - 000000000000001f000000000000001401f0: "" - 000000000000001f000000000000001d0140: "" - 000000000000001f000000000000001e00: "" - "000000000000002000000000000000080295": "" - 0000000000000020000000000000001b0190: "" - "0000000000000020000000000000001e0150": "" - 0000000000000020000000000000001f00: "" - 0000000000000020000000000000001f01f0: "" - "000000000000002100000000000000170160": "" - "0000000000000021000000000000002000": "" diff --git a/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-8-chunked-commits.snap b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-8-chunked-commits.snap index 954b4e75739..e2a98847269 100644 --- a/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-8-chunked-commits.snap +++ b/core/lib/merkle_tree/tests/integration/snapshots/integration__merkle_tree__rocksdb__db-snapshot-8-chunked-commits.snap @@ -1,6 +1,6 @@ --- -source: core/lib/merkle_tree2/tests/integration/merkle_tree.rs -assertion_line: 508 +source: core/lib/merkle_tree/tests/integration/merkle_tree.rs +assertion_line: 589 expression: "DatabaseSnapshot::new(&raw_db)" --- tree: @@ -231,8 +231,6 @@ stale_keys: "000000000000000100000000000000000170": "" 0000000000000001000000000000000001b0: "" "0000000000000001000000000000000001e0": "" - "000000000000000200000000000000000100": "" - "000000000000000200000000000000000170": "" "0000000000000002000000000000000100": "" "000000000000000200000000000000010100": "" "000000000000000200000000000000010120": "" @@ -242,10 +240,8 @@ stale_keys: 0000000000000003000000000000000101b0: "" 0000000000000003000000000000000102b9: "" "0000000000000003000000000000000200": "" - "000000000000000300000000000000020100": "" "000000000000000400000000000000000150": "" "000000000000000400000000000000010140": "" - 0000000000000004000000000000000101b0: "" "0000000000000004000000000000000102e8": "" "000000000000000400000000000000020100": "" "000000000000000400000000000000020160": "" @@ -256,7 +252,6 @@ stale_keys: 0000000000000004000000000000000301d0: "" 0000000000000004000000000000000302dd: "" "000000000000000500000000000000010130": "" - "0000000000000005000000000000000101e0": "" "000000000000000500000000000000020120": "" 0000000000000005000000000000000201c0: "" "0000000000000005000000000000000400": "" @@ -267,8 +262,6 @@ stale_keys: "000000000000000500000000000000040260": "" "000000000000000600000000000000000110": "" 0000000000000006000000000000000201a0: "" - "000000000000000600000000000000030120": "" - 0000000000000006000000000000000401a0: "" 0000000000000006000000000000000401b0: "" "0000000000000006000000000000000500": "" "000000000000000600000000000000050100": "" @@ -277,7 +270,6 @@ stale_keys: 0000000000000006000000000000000501d0: "" "000000000000000600000000000000050225": "" 0000000000000007000000000000000201f0: "" - "000000000000000700000000000000030100": "" "000000000000000700000000000000040190": "" "0000000000000007000000000000000401e0": "" "000000000000000700000000000000050150": "" @@ -287,8 +279,6 @@ stale_keys: "000000000000000700000000000000060120": "" 0000000000000007000000000000000601d0: "" 0000000000000007000000000000000602d8: "" - 0000000000000008000000000000000401b0: "" - 0000000000000008000000000000000402b9: "" 0000000000000008000000000000000601b0: "" 0000000000000008000000000000000601c0: "" "0000000000000008000000000000000700": "" @@ -297,29 +287,24 @@ stale_keys: 0000000000000008000000000000000701d0: "" 0000000000000008000000000000000702d8: "" "000000000000000900000000000000010170": "" - 0000000000000009000000000000000101d0: "" "000000000000000900000000000000030294": "" "000000000000000900000000000000050130": "" 0000000000000009000000000000000601a0: "" "000000000000000900000000000000070100": "" "0000000000000009000000000000000800": "" "000000000000000900000000000000080190": "" - 0000000000000009000000000000000801b0: "" 0000000000000009000000000000000801c0: "" 000000000000000a00000000000000040140: "" 000000000000000a00000000000000040248: "" - 000000000000000a00000000000000060160: "" 000000000000000a000000000000000701e0: "" 000000000000000a000000000000000900: "" 000000000000000a00000000000000090130: "" 000000000000000a00000000000000090170: "" 000000000000000a00000000000000090190: "" 000000000000000a000000000000000901a0: "" - 000000000000000a000000000000000901b0: "" 000000000000000a000000000000000901c0: "" 000000000000000b00000000000000060110: "" 000000000000000b00000000000000060225: "" - 000000000000000b00000000000000070100: "" 000000000000000b00000000000000070120: "" 000000000000000b00000000000000070150: "" 000000000000000b000000000000000701f0: "" @@ -327,107 +312,11 @@ stale_keys: 000000000000000b000000000000000a00: "" 000000000000000b000000000000000a0140: "" 000000000000000b000000000000000a0170: "" - 000000000000000b000000000000000a0190: "" - 000000000000000b000000000000000a01b0: "" 000000000000000b000000000000000a01e0: "" - 000000000000000c00000000000000030140: "" 000000000000000c00000000000000030295: "" - 000000000000000c000000000000000501e0: "" - 000000000000000c000000000000000502e8: "" 000000000000000c00000000000000080160: "" - 000000000000000c000000000000000901d0: "" - 000000000000000c000000000000000902dd: "" 000000000000000c000000000000000a0190: "" 000000000000000c000000000000000b00: "" 000000000000000c000000000000000b0150: "" 000000000000000c000000000000000b01f0: "" - 000000000000000d00000000000000000150: "" - 000000000000000d00000000000000040130: "" - 000000000000000d00000000000000060120: "" - 000000000000000d000000000000000c00: "" - 000000000000000e000000000000000b0100: "" - 000000000000000e000000000000000c01d0: "" - 000000000000000e000000000000000d00: "" - 000000000000000e000000000000000d0150: "" - 000000000000000f000000000000000701c0: "" - 000000000000000f000000000000000a0160: "" - 000000000000000f000000000000000a0260: "" - 000000000000000f000000000000000d0120: "" - 000000000000000f000000000000000e00: "" - 0000000000000010000000000000000601a0: "" - 0000000000000010000000000000000e01d0: "" - 0000000000000010000000000000000f00: "" - 0000000000000010000000000000000f01c0: "" - "000000000000001100000000000000020110": "" - "0000000000000011000000000000000e0100": "" - "0000000000000011000000000000001000": "" - 0000000000000012000000000000000b01b0: "" - "0000000000000012000000000000000e0150": "" - 0000000000000012000000000000000f0120: "" - 0000000000000012000000000000000f0225: "" - "0000000000000012000000000000001100": "" - 0000000000000013000000000000000c01e0: "" - 0000000000000013000000000000000f0160: "" - 0000000000000013000000000000001001d0: "" - 0000000000000013000000000000001002d8: "" - "0000000000000013000000000000001200": "" - 0000000000000014000000000000000701f0: "" - 0000000000000014000000000000000b0190: "" - "000000000000001400000000000000120120": "" - "0000000000000014000000000000001300": "" - 0000000000000015000000000000001001c0: "" - "000000000000001500000000000000110100": "" - 0000000000000015000000000000001201b0: "" - "0000000000000015000000000000001400": "" - 0000000000000016000000000000001301d0: "" - "000000000000001600000000000000140190": "" - "0000000000000016000000000000001500": "" - 0000000000000016000000000000001501b0: "" - "000000000000001700000000000000130160": "" - 0000000000000017000000000000001302d8: "" - "0000000000000017000000000000001600": "" - 0000000000000017000000000000001601b0: "" - 0000000000000017000000000000001601d0: "" - 0000000000000017000000000000001602b5: "" - "000000000000001800000000000000020170": "" - 0000000000000018000000000000001001a0: "" - "000000000000001800000000000000150100": "" - "0000000000000018000000000000001700": "" - 0000000000000019000000000000001501c0: "" - "0000000000000019000000000000001800": "" - "000000000000001900000000000000180100": "" - 000000000000001a00000000000000090294: "" - 000000000000001a000000000000000d0130: "" - 000000000000001a00000000000000160190: "" - 000000000000001a000000000000001801a0: "" - 000000000000001a000000000000001900: "" - 000000000000001b00000000000000180170: "" - 000000000000001b000000000000001a00: "" - 000000000000001b000000000000001a0130: "" - 000000000000001b000000000000001a0190: "" - 000000000000001c000000000000000c0140: "" - 000000000000001c000000000000000c0248: "" - 000000000000001c000000000000001301e0: "" - 000000000000001c000000000000001901c0: "" - 000000000000001c000000000000001b00: "" - 000000000000001d000000000000001701d0: "" - 000000000000001d000000000000001c00: "" - 000000000000001d000000000000001c0140: "" - 000000000000001d000000000000001c01e0: "" - "000000000000001e00000000000000120150": "" - "000000000000001e00000000000000120225": "" - "000000000000001e00000000000000140120": "" - 000000000000001e000000000000001b0170: "" - 000000000000001e000000000000001d00: "" - 000000000000001f00000000000000110110: "" - 000000000000001f000000000000001401f0: "" - 000000000000001f000000000000001d0140: "" - 000000000000001f000000000000001e00: "" - "000000000000002000000000000000080295": "" - 0000000000000020000000000000001b0190: "" - "0000000000000020000000000000001e0150": "" - 0000000000000020000000000000001f00: "" - 0000000000000020000000000000001f01f0: "" - "000000000000002100000000000000170160": "" - "0000000000000021000000000000002000": "" From 7e95a3a66ea48be7b6059d34630e22c503399bdf Mon Sep 17 00:00:00 2001 From: Roman Brodetski Date: Sat, 25 Nov 2023 14:00:08 +0400 Subject: [PATCH 039/115] feat(witness-generator): add logs to leaf aggregation job (#542) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add some log lines to leaf aggreagtion prover job ## Why ❔ It doesn't save anything to the database and I want to understand why --- .../witness_generator/src/leaf_aggregation.rs | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index 084a7bd2d4a..af742775c84 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -150,6 +150,11 @@ impl JobProcessor for LeafAggregationWitnessGenerator { let block_number = artifacts.block_number; let circuit_id = artifacts.circuit_id; let blob_urls = save_artifacts(artifacts, &*self.object_store).await; + tracing::info!( + "Saved leaf aggregation artifacts for block {} with circuit {}", + block_number.0, + circuit_id, + ); update_database( &self.prover_connection_pool, started_at, @@ -266,6 +271,12 @@ async fn update_database( blob_urls: BlobUrls, circuit_id: u8, ) { + tracing::info!( + "Updating database for job_id {}, block {} with circuit id {}", + job_id, + block_number.0, + circuit_id, + ); let mut prover_connection = prover_connection_pool.access_storage().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); @@ -273,6 +284,13 @@ async fn update_database( .fri_witness_generator_dal() .protocol_version_for_l1_batch(block_number) .await; + tracing::info!( + "Inserting {} prover jobs for job_id {}, block {} with circuit id {}", + blob_urls.circuit_ids_and_urls.len(), + job_id, + block_number.0, + circuit_id, + ); transaction .fri_prover_jobs_dal() .insert_prover_jobs( @@ -283,6 +301,12 @@ async fn update_database( protocol_version_id, ) .await; + tracing::info!( + "Updating node aggregation jobs url for job_id {}, block {} with circuit id {}", + job_id, + block_number.0, + circuit_id, + ); transaction .fri_witness_generator_dal() .update_node_aggregation_jobs_url( @@ -293,11 +317,23 @@ async fn update_database( blob_urls.aggregations_urls, ) .await; + tracing::info!( + "Marking leaf aggregation job as successful for job id {}, block {} with circuit id {}", + job_id, + block_number.0, + circuit_id, + ); transaction .fri_witness_generator_dal() .mark_leaf_aggregation_as_successful(job_id, started_at.elapsed()) .await; + tracing::info!( + "Committing transaction for job_id {}, block {} with circuit id {}", + job_id, + block_number.0, + circuit_id, + ); transaction.commit().await.unwrap(); } From 925da43c3b85b4818cb910f286b0f69ae118d692 Mon Sep 17 00:00:00 2001 From: Roman Brodetski Date: Sun, 26 Nov 2023 11:51:39 +0400 Subject: [PATCH 040/115] chore(witness-generation): more logs (#543) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Additional logs in witness generation --- prover/witness_generator/src/leaf_aggregation.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index af742775c84..e31a44c42aa 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -149,11 +149,17 @@ impl JobProcessor for LeafAggregationWitnessGenerator { ) -> anyhow::Result<()> { let block_number = artifacts.block_number; let circuit_id = artifacts.circuit_id; + tracing::info!( + "Saving leaf aggregation artifacts for block {} with circuit {}", + block_number.0, + circuit_id, + ); let blob_urls = save_artifacts(artifacts, &*self.object_store).await; tracing::info!( - "Saved leaf aggregation artifacts for block {} with circuit {}", + "Saved leaf aggregation artifacts for block {} with circuit {} (count: {})", block_number.0, circuit_id, + blob_urls.circuit_ids_and_urls.len(), ); update_database( &self.prover_connection_pool, From 0cab378886d4408a28bb1b71afb3e46c0b82a7c6 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Mon, 27 Nov 2023 12:56:30 +0100 Subject: [PATCH 041/115] fix(proof_data_handler): Feature flag state_diff_hash check (#545) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Feature flag `state_diff_hash` check to boojum only. ## Why ❔ This work is needed only on boojum capable deployments. Outside that, check `.expect("No state diff hash key")` will always fail. Discussion [here](https://matter-labs-workspace.slack.com/archives/C05EB746E8G/p1701080840186819). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../proof_data_handler/request_processor.rs | 38 ++++++++++--------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index d82e66f0cf5..f091993812b 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -192,24 +192,26 @@ impl RequestProcessor { let system_logs = serialize_commitments(&l1_batch.header.system_logs); let system_logs_hash = H256(keccak256(&system_logs)); - let state_diff_hash = l1_batch - .header - .system_logs - .into_iter() - .find(|elem| elem.0.key == u256_to_h256(2.into())) - .expect("No state diff hash key") - .0 - .value; - - if state_diff_hash != state_diff_hash_from_prover - || system_logs_hash != system_logs_hash_from_prover - { - let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); - let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); - panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values - ); + if !is_pre_boojum { + let state_diff_hash = l1_batch + .header + .system_logs + .into_iter() + .find(|elem| elem.0.key == u256_to_h256(2.into())) + .expect("No state diff hash key") + .0 + .value; + + if state_diff_hash != state_diff_hash_from_prover + || system_logs_hash != system_logs_hash_from_prover + { + let server_values = format!("system_logs_hash = {system_logs_hash}, state_diff_hash = {state_diff_hash}"); + let prover_values = format!("system_logs_hash = {system_logs_hash_from_prover}, state_diff_hash = {state_diff_hash_from_prover}"); + panic!( + "Auxilary output doesn't match, server values: {} prover values: {}", + server_values, prover_values + ); + } } storage .proof_generation_dal() From 698dbc355af3220bbcb608ced2559362e96204de Mon Sep 17 00:00:00 2001 From: Roman Brodetski Date: Mon, 27 Nov 2023 16:17:05 +0400 Subject: [PATCH 042/115] feat(state-keeper): reapply computational gas limit (#544) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ for safety reason we want to rllback the removal of comp. gas limit --------- Signed-off-by: Danil Co-authored-by: Danil --- .../criteria/geometry_seal_criteria.rs | 23 ++++++++----------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs index a102796d12a..1ec0c66e4d7 100644 --- a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs @@ -119,20 +119,15 @@ impl MetricExtractor for MaxCyclesCriterion { impl MetricExtractor for ComputationalGasCriterion { const PROM_METRIC_CRITERION_NAME: &'static str = "computational_gas"; - fn limit_per_block(protocol_version_id: ProtocolVersionId) -> usize { - if protocol_version_id.is_pre_boojum() { - // We subtract constant to take into account that circuits may be not fully filled. - // This constant should be greater than number of circuits types - // but we keep it larger to be on the safe side. - const MARGIN_NUMBER_OF_CIRCUITS: usize = 100; - const MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS: usize = - SCHEDULER_UPPER_BOUND as usize - MARGIN_NUMBER_OF_CIRCUITS; - - MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS * ERGS_PER_CIRCUIT as usize - } else { - // In boojum there is no limit for computational gas. - usize::MAX - } + fn limit_per_block(_protocol_version_id: ProtocolVersionId) -> usize { + // We subtract constant to take into account that circuits may be not fully filled. + // This constant should be greater than number of circuits types + // but we keep it larger to be on the safe side. + const MARGIN_NUMBER_OF_CIRCUITS: usize = 100; + const MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS: usize = + SCHEDULER_UPPER_BOUND as usize - MARGIN_NUMBER_OF_CIRCUITS; + + MAX_NUMBER_OF_MUTLIINSTANCE_CIRCUITS * ERGS_PER_CIRCUIT as usize } fn extract(metrics: &ExecutionMetrics, _writes: &DeduplicatedWritesMetrics) -> usize { From 0e45dea27671d3b8ccb27c94ef95677c27aacb09 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Mon, 27 Nov 2023 13:45:42 +0100 Subject: [PATCH 043/115] chore(main): release core 18.2.0 (#528) :robot: I have created a release *beep* *boop* --- ## [18.2.0](https://github.com/matter-labs/zksync-era/compare/core-v18.1.0...core-v18.2.0) (2023-11-27) ### Features * **en:** Implement gossip fetcher ([#371](https://github.com/matter-labs/zksync-era/issues/371)) ([a49b61d](https://github.com/matter-labs/zksync-era/commit/a49b61d7769f9dd7b4cbc4905f8f8a23abfb541c)) * **state-keeper:** reapply computational gas limit ([#544](https://github.com/matter-labs/zksync-era/issues/544)) ([698dbc3](https://github.com/matter-labs/zksync-era/commit/698dbc355af3220bbcb608ced2559362e96204de)) * **state-keeper:** Remove computational gas limit from boojum protocol version ([#536](https://github.com/matter-labs/zksync-era/issues/536)) ([e59a7c6](https://github.com/matter-labs/zksync-era/commit/e59a7c6552a9c99e56f0d37103386acac6a9c1b5)) ### Bug Fixes * **core:** differentiate l2 to l1 logs tree size for pre and post boojum batches ([#538](https://github.com/matter-labs/zksync-era/issues/538)) ([1e9e556](https://github.com/matter-labs/zksync-era/commit/1e9e55651a95b509b5dfd644b8f9f3c718e41804)) * **proof_data_handler:** Feature flag state_diff_hash check ([#545](https://github.com/matter-labs/zksync-era/issues/545)) ([0cab378](https://github.com/matter-labs/zksync-era/commit/0cab378886d4408a28bb1b71afb3e46c0b82a7c6)) * **prover:** use a more performant query to get next job for FRI prover ([#527](https://github.com/matter-labs/zksync-era/issues/527)) ([2cddf3c](https://github.com/matter-labs/zksync-era/commit/2cddf3c0fa786394161060445aa8a085173e3f71)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index be5d4e35e28..d3d97bf7edd 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.1.0", + "core": "18.2.0", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index f64bd80a330..2132581d3ac 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## [18.2.0](https://github.com/matter-labs/zksync-era/compare/core-v18.1.0...core-v18.2.0) (2023-11-27) + + +### Features + +* **en:** Implement gossip fetcher ([#371](https://github.com/matter-labs/zksync-era/issues/371)) ([a49b61d](https://github.com/matter-labs/zksync-era/commit/a49b61d7769f9dd7b4cbc4905f8f8a23abfb541c)) +* **state-keeper:** reapply computational gas limit ([#544](https://github.com/matter-labs/zksync-era/issues/544)) ([698dbc3](https://github.com/matter-labs/zksync-era/commit/698dbc355af3220bbcb608ced2559362e96204de)) +* **state-keeper:** Remove computational gas limit from boojum protocol version ([#536](https://github.com/matter-labs/zksync-era/issues/536)) ([e59a7c6](https://github.com/matter-labs/zksync-era/commit/e59a7c6552a9c99e56f0d37103386acac6a9c1b5)) + + +### Bug Fixes + +* **core:** differentiate l2 to l1 logs tree size for pre and post boojum batches ([#538](https://github.com/matter-labs/zksync-era/issues/538)) ([1e9e556](https://github.com/matter-labs/zksync-era/commit/1e9e55651a95b509b5dfd644b8f9f3c718e41804)) +* **proof_data_handler:** Feature flag state_diff_hash check ([#545](https://github.com/matter-labs/zksync-era/issues/545)) ([0cab378](https://github.com/matter-labs/zksync-era/commit/0cab378886d4408a28bb1b71afb3e46c0b82a7c6)) +* **prover:** use a more performant query to get next job for FRI prover ([#527](https://github.com/matter-labs/zksync-era/issues/527)) ([2cddf3c](https://github.com/matter-labs/zksync-era/commit/2cddf3c0fa786394161060445aa8a085173e3f71)) + ## [18.1.0](https://github.com/matter-labs/zksync-era/compare/core-v18.0.3...core-v18.1.0) (2023-11-20) From 9cada1a17469e876cbf82d923e3d59f21576ec94 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Mon, 27 Nov 2023 18:49:04 +0100 Subject: [PATCH 044/115] fix(house_keeper): Emit the correct circuit_id for aggregation round 2 (#547) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This is us hardcoding the circuit_id to 2 for aggregation_round 2. This fix is useless as soon as we change the config (not expected anytime soon). A real fix will follow where we address what is saved in database layer and this patch will be removed altogether. ## Why ❔ This is necessary to enable autoscaler to work (emits the correct data for prover groups, which in turn can be picked by autoscaler). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../src/house_keeper/fri_prover_queue_monitor.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index f962cf94a2e..ba731ede944 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -34,6 +34,19 @@ impl PeriodicJob for FriProverStatsReporter { let stats = conn.fri_prover_jobs_dal().get_prover_jobs_stats().await; for ((circuit_id, aggregation_round), stats) in stats.into_iter() { + // BEWARE, HERE BE DRAGONS. + // In database, the circuit_id stored is the circuit for which the aggregation is done, + // not the circuit which is running. + // There is a single node level aggregation circuit, which is circuit 2. + // This can aggregate multiple leaf nodes (which may belong to different circuits). + // This reporting is a hacky forced way to use circuit_id 2 which will solve autoscalers. + // A proper fix will be later provided to solve this at database level. + let circuit_id = if aggregation_round == 2 { + 2 + } else { + circuit_id + }; + let group_id = self .config .get_group_id_for_circuit_id_and_aggregation_round(circuit_id, aggregation_round) From 198deda685a5bf44dc41911fe7b7797a219aa29c Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Tue, 28 Nov 2023 14:46:49 +0400 Subject: [PATCH 045/115] feat(contract-verifier): Add zkvyper 1.3.13 (#552) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds zkvyper 1.3.13 to contract verifier ## Why ❔ People need it! ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci-core-reusable.yml | 8 ++++---- core/tests/ts-integration/hardhat.config.ts | 2 +- .../tests/api/contract-verification.test.ts | 2 +- docker/contract-verifier/Dockerfile | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 91ecac350db..7ad0e54074c 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -128,10 +128,10 @@ jobs: mv vyper0.3.3 $(pwd)/etc/vyper-bin/0.3.3/vyper chmod +x $(pwd)/etc/vyper-bin/0.3.3/vyper - mkdir -p $(pwd)/etc/zkvyper-bin/v1.3.11 - wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-v1.3.11 - mv zkvyper-linux-amd64-musl-v1.3.11 $(pwd)/etc/zkvyper-bin/v1.3.11/zkvyper - chmod +x $(pwd)/etc/zkvyper-bin/v1.3.11/zkvyper + mkdir -p $(pwd)/etc/zkvyper-bin/v1.3.13 + wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-v1.3.13 + mv zkvyper-linux-amd64-musl-v1.3.13 $(pwd)/etc/zkvyper-bin/v1.3.13/zkvyper + chmod +x $(pwd)/etc/zkvyper-bin/v1.3.13/zkvyper - name: Start services run: | diff --git a/core/tests/ts-integration/hardhat.config.ts b/core/tests/ts-integration/hardhat.config.ts index 3619a67732c..166feea91d9 100644 --- a/core/tests/ts-integration/hardhat.config.ts +++ b/core/tests/ts-integration/hardhat.config.ts @@ -11,7 +11,7 @@ export default { } }, zkvyper: { - version: '1.3.11', + version: '1.3.13', compilerSource: 'binary' }, networks: { diff --git a/core/tests/ts-integration/tests/api/contract-verification.test.ts b/core/tests/ts-integration/tests/api/contract-verification.test.ts index e2da33ff5a6..cfda8a81074 100644 --- a/core/tests/ts-integration/tests/api/contract-verification.test.ts +++ b/core/tests/ts-integration/tests/api/contract-verification.test.ts @@ -12,7 +12,7 @@ const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; const ZKSOLC_VERSION = 'v1.3.16'; const SOLC_VERSION = '0.8.21'; -const ZKVYPER_VERSION = 'v1.3.11'; +const ZKVYPER_VERSION = 'v1.3.13'; const VYPER_VERSION = '0.3.3'; type HttpMethod = 'POST' | 'GET'; diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 618a9ba2fc1..1f244b38906 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -34,7 +34,7 @@ RUN skip_versions="v1.3.12 v1.3.15" && \ done # install zkvyper 1.3.x -RUN for VERSION in $(seq -f "v1.3.%g" 9 11); do \ +RUN for VERSION in $(seq -f "v1.3.%g" 9 13); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ From c12902ba93387790e08b0a819f41949b810d7856 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Tue, 28 Nov 2023 12:49:04 +0100 Subject: [PATCH 046/115] fix(prover_fri): Add index to speed up getting jobs (#509) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds index `idx_prover_jobs_fri_queued_order2`. ## Why ❔ Database CPU consumption went super high, given enough such queries. Making these queries faster will help with database utilization and give us more headroom to add more provers in the future. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- ...0231117091340_add-index-for-prover-jobs-fri-queueing.down.sql | 1 + .../20231117091340_add-index-for-prover-jobs-fri-queueing.up.sql | 1 + 2 files changed, 2 insertions(+) create mode 100644 core/lib/dal/migrations/20231117091340_add-index-for-prover-jobs-fri-queueing.down.sql create mode 100644 core/lib/dal/migrations/20231117091340_add-index-for-prover-jobs-fri-queueing.up.sql diff --git a/core/lib/dal/migrations/20231117091340_add-index-for-prover-jobs-fri-queueing.down.sql b/core/lib/dal/migrations/20231117091340_add-index-for-prover-jobs-fri-queueing.down.sql new file mode 100644 index 00000000000..c5001f7d2a8 --- /dev/null +++ b/core/lib/dal/migrations/20231117091340_add-index-for-prover-jobs-fri-queueing.down.sql @@ -0,0 +1 @@ +DROP INDEX idx_prover_jobs_fri_queued_order2; diff --git a/core/lib/dal/migrations/20231117091340_add-index-for-prover-jobs-fri-queueing.up.sql b/core/lib/dal/migrations/20231117091340_add-index-for-prover-jobs-fri-queueing.up.sql new file mode 100644 index 00000000000..7571d604e38 --- /dev/null +++ b/core/lib/dal/migrations/20231117091340_add-index-for-prover-jobs-fri-queueing.up.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS idx_prover_jobs_fri_queued_order2 ON public.prover_jobs_fri USING btree (l1_batch_number, aggregation_round DESC, id) WHERE (status = 'queued'::text) From a0688141f22cd98ab1fc17a715ccca46ac2a6d17 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 28 Nov 2023 12:51:21 +0100 Subject: [PATCH 047/115] chore(main): release core 18.3.0 (#549) :robot: I have created a release *beep* *boop* --- ## [18.3.0](https://github.com/matter-labs/zksync-era/compare/core-v18.2.0...core-v18.3.0) (2023-11-28) ### Features * **contract-verifier:** Add zkvyper 1.3.13 ([#552](https://github.com/matter-labs/zksync-era/issues/552)) ([198deda](https://github.com/matter-labs/zksync-era/commit/198deda685a5bf44dc41911fe7b7797a219aa29c)) ### Bug Fixes * **house_keeper:** Emit the correct circuit_id for aggregation round 2 ([#547](https://github.com/matter-labs/zksync-era/issues/547)) ([9cada1a](https://github.com/matter-labs/zksync-era/commit/9cada1a17469e876cbf82d923e3d59f21576ec94)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index d3d97bf7edd..7deaec6a597 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.2.0", + "core": "18.3.0", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 2132581d3ac..1b182d12dfc 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## [18.3.0](https://github.com/matter-labs/zksync-era/compare/core-v18.2.0...core-v18.3.0) (2023-11-28) + + +### Features + +* **contract-verifier:** Add zkvyper 1.3.13 ([#552](https://github.com/matter-labs/zksync-era/issues/552)) ([198deda](https://github.com/matter-labs/zksync-era/commit/198deda685a5bf44dc41911fe7b7797a219aa29c)) + + +### Bug Fixes + +* **house_keeper:** Emit the correct circuit_id for aggregation round 2 ([#547](https://github.com/matter-labs/zksync-era/issues/547)) ([9cada1a](https://github.com/matter-labs/zksync-era/commit/9cada1a17469e876cbf82d923e3d59f21576ec94)) + ## [18.2.0](https://github.com/matter-labs/zksync-era/compare/core-v18.1.0...core-v18.2.0) (2023-11-27) From 9b701e70a4046cf9a84ce264d71e4ae0e8835f88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Rodr=C3=ADguez=20Chatruc?= <49622509+jrchatruc@users.noreply.github.com> Date: Tue, 28 Nov 2023 15:56:04 +0300 Subject: [PATCH 048/115] docs: Improve CPU prover setup documentation (#258) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ This PR expands the documentation on how to run the stack with the CPU prover, also adding a small script that will automatically generate the setup data needed so it's more streamlined. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: juan.mv Co-authored-by: igamigo Co-authored-by: Nacho Co-authored-by: Juan-M-V <102986292+Juan-M-V@users.noreply.github.com> Co-authored-by: IAvecilla --- etc/env/base/fri_prover.toml | 2 +- prover/prover_fri/README.md | 205 +++++++++++++++++++------- prover/setup.sh | 44 ++++++ prover/witness_generator/src/main.rs | 206 +++++++++++++++------------ 4 files changed, 318 insertions(+), 139 deletions(-) create mode 100755 prover/setup.sh diff --git a/etc/env/base/fri_prover.toml b/etc/env/base/fri_prover.toml index 4e725673ab0..e714f66ed99 100644 --- a/etc/env/base/fri_prover.toml +++ b/etc/env/base/fri_prover.toml @@ -10,4 +10,4 @@ specialized_group_id=100 witness_vector_generator_thread_count=5 queue_capacity=10 witness_vector_receiver_port=4000 -shall_save_to_public_bucket=true \ No newline at end of file +shall_save_to_public_bucket=true diff --git a/prover/prover_fri/README.md b/prover/prover_fri/README.md index 240bd13d238..f46a89ae498 100644 --- a/prover/prover_fri/README.md +++ b/prover/prover_fri/README.md @@ -10,41 +10,90 @@ ## Proving a block using CPU prover locally +### Overview of the pipeline + +These are the main components to this process: + +- Sequencer +- Prover gateway +- Witness +- Prover +- Compressor + +All of them will be sharing information through a SQL database. The general idea is that the sequencer will produce +blocks and the gateway will place them into the database to be proven. Then, the rest of the components will pull jobs +from the database and do their part of the pipeline. + +```mermaid +flowchart LR + A["Operator"] --> |Produces block| F[Prover Gateway] + F --> |Inserts into DB| B["Postgress DB"] + B --> |Retrieves proven block \nafter compression| F + B --> C["Witness"] + C --- C1["Basic Circuits"] + C --- C2["Leaf Aggregation"] + C --- C3["Node Aggregation"] + C --- C4["Scheduler"] + C --> B + B --> D["Prover"] + D --> |Proven Block| B + B --> E["Compressor"] + E --> |Compressed block| B +``` + +### Prerequisites + +Make sure these dependencies are installed and available on your machine: [Installing dependencies](./setup-dev.md) Once +that is done, before starting, make sure you go into the root of the repository, then run + +``` +export ZKSYNC_HOME=$(pwd) +``` + +The whole setup below will NOT work if you don't have this environment variable properly set, as the entirety of the +`zk` CLI tool depends on it. + +### Block proving with CPU + Below steps can be used to prove a block on local machine using CPU prover. This is useful for debugging and testing Machine specs: - CPU: At least 8 physical cores -- RAM: 60GB of RAM(if you have lower RAM machine enable swap) +- RAM: 60GB of RAM (if you have lower RAM machine enable swap) - Disk: 400GB of free disk -1. Install the correct nightly version using command: `rustup install nightly-2023-08-21` -2. Generate the cpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. - Use these commands: +1. Install the correct nightly version using command: `rustup install nightly-2023-07-21` +2. Initialize DB and run migrations. Go into the root of the repository, then run + + ``` + zk init + ``` - ```markdown - for i in {1..13}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i - --is_base_layer; done +3. Generate the cpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. + For this, move to the `prover` directory, and run - for i in {1..15}; do zk f cargo run --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i; done + ``` + ./setup.sh ``` -3. Initialize DB and run migrations: `zk init` + For the following steps, we recommend using `tmux` to run every command on a separate session, so you can attach to + and monitor logs for each one. -4. Override the following configuration in your `dev.env`: +4. Run the sequencer/operator. In the root of the repository: ``` - ETH_SENDER_SENDER_PROOF_SENDING_MODE=OnlyRealProofs - ETH_SENDER_SENDER_PROOF_LOADING_MODE=FriProofFromGcs - OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/server/artifacts - PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/prover/artifacts - FRI_PROVER_SETUP_DATA_PATH=/path/to/above-generated/cpu-setup-data + zk server --components=api,eth,tree,state_keeper,housekeeper,proof_data_handler ``` -5. Run server `zk server --components=api,eth,tree,state_keeper,housekeeper,proof_data_handler` to produce blocks to be - proven -6. Run prover gateway to fetch blocks to be proven from server : - `zk f cargo run --release --bin zksync_prover_fri_gateway` -7. Run 4 witness generators to generate witness for each round: + to produce blocks to be proven + +5. Run prover gateway to fetch blocks to be proven from server: + + ``` + zk f cargo run --release --bin zksync_prover_fri_gateway + ``` + +6. Run 4 witness generators to generate witness for each round: ``` API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --round=basic_circuits @@ -53,49 +102,61 @@ Machine specs: API_PROMETHEUS_LISTENER_PORT=3119 zk f cargo run --release --bin zksync_witness_generator -- --round=scheduler ``` -8. Run prover to perform actual proving: `zk f cargo run --release --bin zksync_prover_fri` -9. Finally, run proof compressor to compress the proof to be sent on L1: - `zk f cargo run --release --bin zksync_proof_fri_compressor` + These 4 steps can be reduced to a single command + + ``` + API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --all_rounds + ``` + + Note that this will automatically open the three ports after the one specified in enviromental variable, in this case + 3117, 3118 and 3119. + +7. Run prover to perform actual proving: + + ``` + zk f cargo run --release --bin zksync_prover_fri + ``` + +8. Finally, run proof compressor to compress the proof to be sent on L1: + + ``` + zk f cargo run --release --bin zksync_proof_fri_compressor + ``` ## Proving a block using GPU prover locally -Below steps can be used to prove a block on local machine using GPU prover, It requires Cuda 12.0 installation as -pre-requisite. This is useful for debugging and testing Machine specs: +Below steps can be used to prove a block on local machine using GPU prover. Running a GPU prover requires a Cuda 12.0 +installation as a pre-requisite, alongside these machine specs: - CPU: At least 8 physical cores - RAM: 16GB of RAM(if you have lower RAM machine enable swap) - Disk: 30GB of free disk - GPU: 1x Nvidia L4/T4 with 16GB of GPU RAM -1. Install the correct nightly version using command: `rustup install nightly-2023-08-21` -2. Generate the gpu setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. - Use these commands: +1. Install the correct nightly version using command: `rustup install nightly-2023-07-21` +2. Initialize DB and run migrations: `zk init` +3. Generate the GPU setup data (no need to regenerate if it's already there). This will consume around 300Gb of disk. + For this, move to the `prover` directory, and run + + ``` + ./setup.sh gpu + ``` - ```markdown - for i in {1..13}; do zk f cargo run --features "gpu" --release --bin zksync_setup_data_generator_fri -- - --numeric-circuit $i --is_base_layer done +4. Run the sequencer/operator. In the root of the repository: - for i in {1..15}; do zk f cargo run --features "gpu" --release --bin zksync_setup_data_generator_fri -- - --numeric-circuit $i done + ``` + zk server --components=api,eth,tree,state_keeper,housekeeper,proof_data_handler ``` -3. Initialize DB and run migrations: `zk init` + to produce blocks to be proven -4. Override the following configuration in your `dev.env`: +5. Run prover gateway to fetch blocks to be proven from server: ``` - ETH_SENDER_SENDER_PROOF_SENDING_MODE=OnlyRealProofs - ETH_SENDER_SENDER_PROOF_LOADING_MODE=FriProofFromGcs - OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/server/artifacts - PROVER_OBJECT_STORE_FILE_BACKED_BASE_PATH=/path/to/prover/artifacts - FRI_PROVER_SETUP_DATA_PATH=/path/to/above-generated/gpu-setup-data + zk f cargo run --release --bin zksync_prover_fri_gateway ``` -5. Run server `zk server --components=api,eth,tree,state_keeper,housekeeper,proof_data_handler` to produce blocks to be - proven -6. Run prover gateway to fetch blocks to be proven from server : - `zk f cargo run --release --bin zksync_prover_fri_gateway` -7. Run 4 witness generators to generate witness for each round: +6. Run 4 witness generators to generate witness for each round: ``` API_PROMETHEUS_LISTENER_PORT=3116 zk f cargo run --release --bin zksync_witness_generator -- --round=basic_circuits @@ -104,8 +165,9 @@ pre-requisite. This is useful for debugging and testing Machine specs: API_PROMETHEUS_LISTENER_PORT=3119 zk f cargo run --release --bin zksync_witness_generator -- --round=scheduler ``` -8. Run prover to perform actual proving: `zk f cargo run --features "gpu" --release --bin zksync_prover_fri` -9. Run 5 witness vector generators to feed jobs to GPU prover: +7. Run prover to perform actual proving: `zk f cargo run --features "gpu" --release --bin zksync_prover_fri` + +8. Run 5 witness vector generators to feed jobs to GPU prover: ``` FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3416 zk f cargo run --release --bin zksync_witness_vector_generator @@ -115,8 +177,53 @@ pre-requisite. This is useful for debugging and testing Machine specs: FRI_WITNESS_VECTOR_GENERATOR_PROMETHEUS_LISTENER_PORT=3420 zk f cargo run --release --bin zksync_witness_vector_generator ``` -10. Finally, run proof compressor to compress the proof to be sent on L1: - `zk f cargo run --release --bin zksync_proof_fri_compressor` +9. Finally, run proof compressor to compress the proof to be sent on L1: + `zk f cargo run --release --bin zksync_proof_fri_compressor` + +## Checking the status of the prover + +Once everything is running (either with the CPU or GPU prover), the server should have at least three blocks, and you +can see the first one by running + +``` +curl -X POST -H 'content-type: application/json' localhost:3050 -d '{"jsonrpc": "2.0", "id": 1, "method": "zks_getBlockDetails", "params": [0]}' +``` + +and then similarly for blocks number `1` and `2` by changing the parameters. + +The prover gateway will then fetch block number 1 to prove and start the entire proving pipeline, which starts out by +generating the witness, then passing it to the prover, then to the compressor to wrap it inside a SNARK to send to L1. + +You can follow the status of this pipeline by running + +``` +zk status prover +``` + +This might take a while (around an hour and a half on my machine using the CPU prover), you can check on it once in a +while. A succesful flow should output something like + +``` +==== FRI Prover status ==== +State keeper: First batch: 0, recent batch: 1 +L1 state: block verified: 1, block committed: 1 +Verification key hash on contract is 0x4be443afd605a782b6e56d199df2460a025c81b3dea144e135bece83612563f2 +Verification key in database is 0x4be443afd605a782b6e56d199df2460a025c81b3dea144e135bece83612563f2 +Verifier hash matches. +Verifier params on contract are 0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080, 0x72167c43a46cf38875b267d67716edc4563861364a3c03ab7aee73498421e828, 0x0000000000000000000000000000000000000000000000000000000000000000 +Verifcation params match. +Next block that should be verified is: 2 +Checking status of the proofs... +Proof progress for 1 : 111 successful, 0 failed, 0 in progress, 0 queued. Compression job status: successful +``` + +The most important thing here is the following line + +``` +L1 state: block verified: 1, block committed: 1 +``` + +which means the proof for the block was verified on L1. ## Performing circuit upgrade diff --git a/prover/setup.sh b/prover/setup.sh new file mode 100755 index 00000000000..af18548111f --- /dev/null +++ b/prover/setup.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# This script sets up the necessary data needed by the CPU/GPU FRI prover to be used locally. + +GPU_FLAG="" +if [ "$1" = "gpu" ]; then + GPU_FLAG='--features gpu' +fi + +if [[ -z "${ZKSYNC_HOME}" ]]; then + echo "Environment variable ZKSYNC_HOME is not set. Make sure it's set and pointing to the root of this repository" + exit 1 +fi + +sed -i.backup 's/^proof_sending_mode=.*$/proof_sending_mode="OnlyRealProofs"/' ../etc/env/base/eth_sender.toml +sed -i.backup 's/^proof_loading_mode=.*$/proof_loading_mode="FriProofFromGcs"/' ../etc/env/base/eth_sender.toml +rm ../etc/env/base/eth_sender.toml.backup +sed -i.backup 's/^setup_data_path=.*$/setup_data_path="vk_setup_data_generator_server_fri\/data\/"/' ../etc/env/base/fri_prover.toml +rm ../etc/env/base/fri_prover.toml.backup +sed -i.backup 's/^universal_setup_path=.*$/universal_setup_path="..\/keys\/setup\/setup_2^26.key"/' ../etc/env/base/fri_proof_compressor.toml +rm ../etc/env/base/fri_proof_compressor.toml.backup + +zk config compile dev + +for i in {1..13} +do + if ! [ -f vk_setup_data_generator_server_fri/data/setup_basic_${i}_data.bin ]; then + zk f cargo run $GPU_FLAG --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i --is_base_layer + fi +done + +if ! [ -f vk_setup_data_generator_server_fri/data/setup_scheduler_data.bin ]; then + zk f cargo run $GPU_FLAG --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 1 +fi + +if ! [ -f vk_setup_data_generator_server_fri/data/setup_node_data.bin ]; then + zk f cargo run $GPU_FLAG --release --bin zksync_setup_data_generator_fri -- --numeric-circuit 2 +fi + +for i in {3..15} +do + if ! [ -f vk_setup_data_generator_server_fri/data/setup_leaf_${i}_data.bin ]; then + zk f cargo run $GPU_FLAG --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i + fi +done diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index d5c48cc13e3..e6226c40131 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -1,16 +1,14 @@ #![feature(generic_const_exprs)] -use anyhow::Context as _; +use anyhow::{anyhow, Context as _}; use prometheus_exporter::PrometheusExporterConfig; use std::time::Instant; use structopt::StructOpt; use tokio::sync::watch; use zksync_config::configs::{FriWitnessGeneratorConfig, PostgresConfig, PrometheusConfig}; +use zksync_config::ObjectStoreConfig; use zksync_dal::ConnectionPool; -use zksync_env_config::{ - object_store::{ProverObjectStoreConfig, PublicObjectStoreConfig}, - FromEnv, -}; +use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; use zksync_prover_utils::get_stop_signal_receiver; use zksync_queued_job_processor::JobProcessor; @@ -41,9 +39,14 @@ struct Opt { /// Number of times witness generator should be run. #[structopt(short = "b", long = "batch_size")] batch_size: Option, - /// aggregation round for the witness generator. + /// Aggregation rounds options, they can be run individually or together. + /// + /// Single aggregation round for the witness generator. #[structopt(short = "r", long = "round")] - round: AggregationRound, + round: Option, + /// Start all aggregation rounds for the witness generator. + #[structopt(short = "a", long = "all_rounds")] + all_rounds: bool, } #[tokio::main] @@ -106,13 +109,6 @@ async fn main() -> anyhow::Result<()> { .protocol_version_for(&vk_commitments) .await; - tracing::info!( - "initializing the {:?} witness generator, batch size: {:?} with protocol_versions: {:?}", - opt.round, - opt.batch_size, - protocol_versions - ); - // If batch_size is none, it means that the job is 'looping forever' (this is the usual setup in local network). // At the same time, we're reading the protocol_version only once at startup - so if there is no protocol version // read (this is often due to the fact, that the gateway was started too late, and it didn't put the updated protocol @@ -124,85 +120,117 @@ async fn main() -> anyhow::Result<()> { ); } - let prometheus_config = if use_push_gateway { - PrometheusExporterConfig::push( - prometheus_config.gateway_endpoint(), - prometheus_config.push_interval(), - ) - } else { - PrometheusExporterConfig::pull(prometheus_config.listener_port) - }; - let prometheus_task = prometheus_config.run(stop_receiver.clone()); - - let public_object_store_config = - PublicObjectStoreConfig::from_env().context("PublicObjectStoreConfig::from_env()")?; - let witness_generator_task = match opt.round { - AggregationRound::BasicCircuits => { - let public_blob_store = match config.shall_save_to_public_bucket { - false => None, - true => Some( - ObjectStoreFactory::new(public_object_store_config.0) - .create_store() - .await, - ), - }; - let generator = BasicWitnessGenerator::new( - config, - &store_factory, - public_blob_store, - connection_pool, - prover_connection_pool, - protocol_versions.clone(), - ) - .await; - generator.run(stop_receiver, opt.batch_size) - } - AggregationRound::LeafAggregation => { - let generator = LeafAggregationWitnessGenerator::new( - config, - &store_factory, - prover_connection_pool, - protocol_versions.clone(), - ) - .await; - generator.run(stop_receiver, opt.batch_size) - } - AggregationRound::NodeAggregation => { - let generator = NodeAggregationWitnessGenerator::new( - config, - &store_factory, - prover_connection_pool, - protocol_versions.clone(), - ) - .await; - generator.run(stop_receiver, opt.batch_size) + let rounds = match (opt.round, opt.all_rounds) { + (Some(round), false) => vec![round], + (None, true) => vec![ + AggregationRound::BasicCircuits, + AggregationRound::LeafAggregation, + AggregationRound::NodeAggregation, + AggregationRound::Scheduler, + ], + (Some(_), true) => { + return Err(anyhow!( + "Cannot set both the --all_rounds and --round flags. Choose one or the other." + )); } - AggregationRound::Scheduler => { - let generator = SchedulerWitnessGenerator::new( - config, - &store_factory, - prover_connection_pool, - protocol_versions, - ) - .await; - generator.run(stop_receiver, opt.batch_size) + (None, false) => { + return Err(anyhow!( + "Expected --all_rounds flag with no --round flag present" + )); } }; - let tasks = vec![ - tokio::spawn(prometheus_task), - tokio::spawn(witness_generator_task), - ]; - tracing::info!( - "initialized {:?} witness generator in {:?}", - opt.round, - started_at.elapsed() - ); - metrics::gauge!( - "server.init.latency", - started_at.elapsed(), - "stage" => format!("fri_witness_generator_{:?}", opt.round) - ); + let mut tasks = Vec::new(); + + for (i, round) in rounds.iter().enumerate() { + tracing::info!( + "initializing the {:?} witness generator, batch size: {:?} with protocol_versions: {:?}", + round, + opt.batch_size, + &protocol_versions + ); + + let prometheus_config = if use_push_gateway { + PrometheusExporterConfig::push( + prometheus_config.gateway_endpoint(), + prometheus_config.push_interval(), + ) + } else { + // u16 cast is safe since i is in range [0, 4) + PrometheusExporterConfig::pull(prometheus_config.listener_port + i as u16) + }; + let prometheus_task = prometheus_config.run(stop_receiver.clone()); + + let witness_generator_task = match round { + AggregationRound::BasicCircuits => { + let public_blob_store = match config.shall_save_to_public_bucket { + false => None, + true => Some( + ObjectStoreFactory::new( + ObjectStoreConfig::from_env() + .context("ObjectStoreConfig::from_env()")?, + ) + .create_store() + .await, + ), + }; + let generator = BasicWitnessGenerator::new( + config.clone(), + &store_factory, + public_blob_store, + connection_pool.clone(), + prover_connection_pool.clone(), + protocol_versions.clone(), + ) + .await; + generator.run(stop_receiver.clone(), opt.batch_size) + } + AggregationRound::LeafAggregation => { + let generator = LeafAggregationWitnessGenerator::new( + config.clone(), + &store_factory, + prover_connection_pool.clone(), + protocol_versions.clone(), + ) + .await; + generator.run(stop_receiver.clone(), opt.batch_size) + } + AggregationRound::NodeAggregation => { + let generator = NodeAggregationWitnessGenerator::new( + config.clone(), + &store_factory, + prover_connection_pool.clone(), + protocol_versions.clone(), + ) + .await; + generator.run(stop_receiver.clone(), opt.batch_size) + } + AggregationRound::Scheduler => { + let generator = SchedulerWitnessGenerator::new( + config.clone(), + &store_factory, + prover_connection_pool.clone(), + protocol_versions.clone(), + ) + .await; + generator.run(stop_receiver.clone(), opt.batch_size) + } + }; + + tasks.push(tokio::spawn(prometheus_task)); + tasks.push(tokio::spawn(witness_generator_task)); + + tracing::info!( + "initialized {:?} witness generator in {:?}", + round, + started_at.elapsed() + ); + metrics::gauge!( + "server.init.latency", + started_at.elapsed(), + "stage" => format!("fri_witness_generator_{:?}", round) + ); + } let mut stop_signal_receiver = get_stop_signal_receiver(); let graceful_shutdown = None::>; From 65a2cd91bbc71cf0d81b543f94c3123da93177d6 Mon Sep 17 00:00:00 2001 From: Igor Borodin Date: Tue, 28 Nov 2023 14:06:46 +0100 Subject: [PATCH 049/115] chore: Ability to override default CUDA arch for prover-gpu-fri (#553) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Adds the ability to build prover-gpu-fri Docker image for running on Nvidia GPUs that are not L4 (arch 89) - Uses lightweight CUDA runtime image for running the binary ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- docker/prover-gpu-fri/Dockerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 5b7787332cd..b3edd5758ab 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -2,6 +2,9 @@ FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder ARG DEBIAN_FRONTEND=noninteractive +ARG CUDA_ARCH=89 +ENV CUDAARCHS=${CUDA_ARCH} + RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ pkg-config build-essential libclang-dev && \ rm -rf /var/lib/apt/lists/* @@ -10,9 +13,6 @@ ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ PATH=/usr/local/cargo/bin:$PATH -# Building for Nvidia L4 -ENV CUDAARCHS=89 - RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup install nightly-2023-08-21 && \ rustup default nightly-2023-08-21 @@ -26,7 +26,7 @@ COPY . . RUN cargo build --release --features "gpu" -FROM nvidia/cuda:12.0.0-devel-ubuntu22.04 +FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* From 9ea02a1b2e7c861882f10c8cbe1997f6bb96d9cf Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Tue, 28 Nov 2023 18:05:35 +0100 Subject: [PATCH 050/115] fix(external-node): Check txs at insert time instead of read time (#555) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ENs are broken due to tx mismatch. This PR will allow ENs to restart and hopefully update the state. More information [here](https://www.notion.so/matterlabs/EN-is-broken-on-testnet-due-to-mismatched-txs-11c68bca2a4047cb8730e21e21f4ac41). This was discussed with @danil. ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../src/sync_layer/batch_status_updater.rs | 50 ++++++++++++------- 1 file changed, 32 insertions(+), 18 deletions(-) diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs index c7a14134113..bbf1e45c310 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs @@ -139,24 +139,10 @@ impl BatchStatusUpdater { .unwrap() .number; - // We don't want to change the internal state until we actually persist the changes. let mut last_committed_l1_batch = self.last_committed_l1_batch; let mut last_proven_l1_batch = self.last_proven_l1_batch; let mut last_executed_l1_batch = self.last_executed_l1_batch; - assert!( - last_executed_l1_batch <= last_proven_l1_batch, - "Incorrect local state: executed batch must be proven" - ); - assert!( - last_proven_l1_batch <= last_committed_l1_batch, - "Incorrect local state: proven batch must be committed" - ); - assert!( - last_committed_l1_batch <= last_sealed_batch, - "Incorrect local state: unkonwn batch marked as committed" - ); - let mut batch = last_executed_l1_batch.next(); // In this loop we try to progress on the batch statuses, utilizing the same request to the node to potentially // update all three statuses (e.g. if the node is still syncing), but also skipping the gaps in the statuses @@ -289,7 +275,16 @@ impl BatchStatusUpdater { /// tables to be ever accessed by the `eth_sender` module. async fn apply_status_changes(&mut self, changes: StatusChanges) { let total_latency = EN_METRICS.batch_status_updater_loop_iteration.start(); - let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); + let mut connection = self.pool.access_storage_tagged("sync_layer").await.unwrap(); + + let mut transaction = connection.start_transaction().await.unwrap(); + + let last_sealed_batch = transaction + .blocks_dal() + .get_newest_l1_batch_header() + .await + .unwrap() + .number; for change in changes.commit.into_iter() { tracing::info!( @@ -298,7 +293,13 @@ impl BatchStatusUpdater { change.l1_tx_hash, change.happened_at ); - storage + + assert!( + change.number <= last_sealed_batch, + "Incorrect update state: unknown batch marked as committed" + ); + + transaction .eth_sender_dal() .insert_bogus_confirmed_eth_tx( change.number, @@ -317,7 +318,13 @@ impl BatchStatusUpdater { change.l1_tx_hash, change.happened_at ); - storage + + assert!( + change.number <= self.last_committed_l1_batch, + "Incorrect update state: proven batch must be committed" + ); + + transaction .eth_sender_dal() .insert_bogus_confirmed_eth_tx( change.number, @@ -337,7 +344,12 @@ impl BatchStatusUpdater { change.happened_at ); - storage + assert!( + change.number <= self.last_proven_l1_batch, + "Incorrect update state: executed batch must be proven" + ); + + transaction .eth_sender_dal() .insert_bogus_confirmed_eth_tx( change.number, @@ -350,6 +362,8 @@ impl BatchStatusUpdater { self.last_executed_l1_batch = change.number; } + transaction.commit().await.unwrap(); + total_latency.observe(); } } From ac400b14a72669bc214a37a5b79ba8518c7a063d Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Tue, 28 Nov 2023 18:21:23 +0100 Subject: [PATCH 051/115] ci: Adds manual trigger for release-please (#557) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds manual trigger for release-please. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/release-please.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 266a4db8158..c6f1fa5454f 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -2,6 +2,7 @@ on: push: branches: - main + workflow_dispatch: permissions: contents: write From 339e45035e85eba7d60b533221be92ce78643705 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Tue, 28 Nov 2023 19:53:30 +0100 Subject: [PATCH 052/115] fix: Update comments post-hotfix (#556) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil Co-authored-by: Danil --- core/lib/zksync_core/src/sync_layer/batch_status_updater.rs | 3 --- deny.toml | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs index bbf1e45c310..8e7ebe7a985 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs @@ -267,9 +267,6 @@ impl BatchStatusUpdater { } /// Inserts the provided status changes into the database. - /// This method is not transactional, so it can save only a part of the changes, which is fine: - /// after the restart the updater will continue from the last saved state. - /// /// The status changes are applied to the database by inserting bogus confirmed transactions (with /// some fields missing/substituted) only to satisfy API needs; this component doesn't expect the updated /// tables to be ever accessed by the `eth_sender` module. diff --git a/deny.toml b/deny.toml index b50b165b72f..7fa3c835088 100644 --- a/deny.toml +++ b/deny.toml @@ -8,6 +8,7 @@ yanked = "warn" notice = "warn" ignore = [ "RUSTSEC-2023-0018", + "RUSTSEC-2023-0071" ] [licenses] From f339d0af4539bea71469cef3c884fca8f3fc04bf Mon Sep 17 00:00:00 2001 From: koloz193 Date: Tue, 28 Nov 2023 14:16:40 -0500 Subject: [PATCH 053/115] chore: Testnet Boojum Upgrade (#514) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Preparing the upgrade for testnet to support boojum ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Stanislav Breadless --- contracts | 2 +- .../1699353977-boojum/testnet2/crypto.json | 11 + .../1699353977-boojum/testnet2/facetCuts.json | 177 ++++++++++ .../1699353977-boojum/testnet2/facets.json | 18 + .../1699353977-boojum/testnet2/l2Upgrade.json | 323 ++++++++++++++++++ .../testnet2/transactions.json | 235 +++++++++++++ 6 files changed, 765 insertions(+), 1 deletion(-) create mode 100644 etc/upgrades/1699353977-boojum/testnet2/crypto.json create mode 100644 etc/upgrades/1699353977-boojum/testnet2/facetCuts.json create mode 100644 etc/upgrades/1699353977-boojum/testnet2/facets.json create mode 100644 etc/upgrades/1699353977-boojum/testnet2/l2Upgrade.json create mode 100644 etc/upgrades/1699353977-boojum/testnet2/transactions.json diff --git a/contracts b/contracts index ff745288455..3e2bee96e41 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit ff74528845586bd175d74edc45dca1f1ae2ea454 +Subproject commit 3e2bee96e412bac7c0a58c4b919837b59e9af36e diff --git a/etc/upgrades/1699353977-boojum/testnet2/crypto.json b/etc/upgrades/1699353977-boojum/testnet2/crypto.json new file mode 100644 index 00000000000..a78fec325b0 --- /dev/null +++ b/etc/upgrades/1699353977-boojum/testnet2/crypto.json @@ -0,0 +1,11 @@ +{ + "verifier": { + "address": "0xB465882F67d236DcC0D090F78ebb0d838e9719D8", + "txHash": "0x72973954279049aa8f9e04f4fb61e628248cd9ccebe51ae93851aaecb0689979" + }, + "keys": { + "recursionNodeLevelVkHash": "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + "recursionLeafLevelVkHash": "0x14628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/testnet2/facetCuts.json b/etc/upgrades/1699353977-boojum/testnet2/facetCuts.json new file mode 100644 index 00000000000..f2f6d4affa0 --- /dev/null +++ b/etc/upgrades/1699353977-boojum/testnet2/facetCuts.json @@ -0,0 +1,177 @@ +[ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x33ce93fe", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xe58bb639", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xD059478a564dF1353A54AC0D0e7Fc55A90b92246", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 0, + "isFreezable": true + } +] \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/testnet2/facets.json b/etc/upgrades/1699353977-boojum/testnet2/facets.json new file mode 100644 index 00000000000..c21934fa6ae --- /dev/null +++ b/etc/upgrades/1699353977-boojum/testnet2/facets.json @@ -0,0 +1,18 @@ +{ + "ExecutorFacet": { + "address": "0xD059478a564dF1353A54AC0D0e7Fc55A90b92246", + "txHash": "0xe57894bba732935fcbdd52f373532f60e91c4f79157afc69a082d561ff6f2cbb" + }, + "AdminFacet": { + "address": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "txHash": "0xce0c1b354d1eb7d3abecadc5d70b091ab1775e66f9b87ca7553b6d718cec4704" + }, + "GettersFacet": { + "address": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "txHash": "0xedea3c9fa4bb30115401e8f16477f712af8b0065e2682a3fcaf6249058b1442e" + }, + "MailboxFacet": { + "address": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "txHash": "0xd44c9cc34e5bbb112f8df1d3d60f82257bb2e44aa652dda0c9c82f9cb1a2df00" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/testnet2/l2Upgrade.json b/etc/upgrades/1699353977-boojum/testnet2/l2Upgrade.json new file mode 100644 index 00000000000..19977b5cc2a --- /dev/null +++ b/etc/upgrades/1699353977-boojum/testnet2/l2Upgrade.json @@ -0,0 +1,323 @@ +{ + "systemContracts": [ + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7" + ], + "address": "0x0000000000000000000000000000000000000000" + }, + { + "name": "Ecrecover", + "bytecodeHashes": [ + "0x010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c" + ], + "address": "0x0000000000000000000000000000000000000001" + }, + { + "name": "SHA256", + "bytecodeHashes": [ + "0x010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d" + ], + "address": "0x0000000000000000000000000000000000000002" + }, + { + "name": "EcAdd", + "bytecodeHashes": [ + "0x010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d433" + ], + "address": "0x0000000000000000000000000000000000000006" + }, + { + "name": "EcMul", + "bytecodeHashes": [ + "0x0100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba571350675" + ], + "address": "0x0000000000000000000000000000000000000007" + }, + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7" + ], + "address": "0x0000000000000000000000000000000000008001" + }, + { + "name": "AccountCodeStorage", + "bytecodeHashes": [ + "0x0100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c" + ], + "address": "0x0000000000000000000000000000000000008002" + }, + { + "name": "NonceHolder", + "bytecodeHashes": [ + "0x0100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd470" + ], + "address": "0x0000000000000000000000000000000000008003" + }, + { + "name": "KnownCodesStorage", + "bytecodeHashes": [ + "0x0100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e" + ], + "address": "0x0000000000000000000000000000000000008004" + }, + { + "name": "ImmutableSimulator", + "bytecodeHashes": [ + "0x01000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c5" + ], + "address": "0x0000000000000000000000000000000000008005" + }, + { + "name": "ContractDeployer", + "bytecodeHashes": [ + "0x010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb4212" + ], + "address": "0x0000000000000000000000000000000000008006" + }, + { + "name": "L1Messenger", + "bytecodeHashes": [ + "0x01000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa" + ], + "address": "0x0000000000000000000000000000000000008008" + }, + { + "name": "MsgValueSimulator", + "bytecodeHashes": [ + "0x0100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb" + ], + "address": "0x0000000000000000000000000000000000008009" + }, + { + "name": "L2EthToken", + "bytecodeHashes": [ + "0x01000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3" + ], + "address": "0x000000000000000000000000000000000000800a" + }, + { + "name": "SystemContext", + "bytecodeHashes": [ + "0x0100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436" + ], + "address": "0x000000000000000000000000000000000000800b" + }, + { + "name": "BootloaderUtilities", + "bytecodeHashes": [ + "0x010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0" + ], + "address": "0x000000000000000000000000000000000000800c" + }, + { + "name": "EventWriter", + "bytecodeHashes": [ + "0x01000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339" + ], + "address": "0x000000000000000000000000000000000000800d" + }, + { + "name": "Compressor", + "bytecodeHashes": [ + "0x010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496" + ], + "address": "0x000000000000000000000000000000000000800e" + }, + { + "name": "ComplexUpgrader", + "bytecodeHashes": [ + "0x0100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc" + ], + "address": "0x000000000000000000000000000000000000800f" + }, + { + "name": "Keccak256", + "bytecodeHashes": [ + "0x0100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a89" + ], + "address": "0x0000000000000000000000000000000000008010" + } + ], + "defaultAA": { + "name": "DefaultAccount", + "bytecodeHashes": [ + "0x01000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d" + ] + }, + "bootloader": { + "name": "Bootloader", + "bytecodeHashes": [ + "0x01000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b88" + ] + }, + "forcedDeployments": [ + { + "bytecodeHash": "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7", + "newAddress": "0x0000000000000000000000000000000000000000", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c", + "newAddress": "0x0000000000000000000000000000000000000001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d", + "newAddress": "0x0000000000000000000000000000000000000002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d433", + "newAddress": "0x0000000000000000000000000000000000000006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba571350675", + "newAddress": "0x0000000000000000000000000000000000000007", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7", + "newAddress": "0x0000000000000000000000000000000000008001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c", + "newAddress": "0x0000000000000000000000000000000000008002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd470", + "newAddress": "0x0000000000000000000000000000000000008003", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e", + "newAddress": "0x0000000000000000000000000000000000008004", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c5", + "newAddress": "0x0000000000000000000000000000000000008005", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb4212", + "newAddress": "0x0000000000000000000000000000000000008006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa", + "newAddress": "0x0000000000000000000000000000000000008008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb", + "newAddress": "0x0000000000000000000000000000000000008009", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3", + "newAddress": "0x000000000000000000000000000000000000800a", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436", + "newAddress": "0x000000000000000000000000000000000000800b", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0", + "newAddress": "0x000000000000000000000000000000000000800c", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339", + "newAddress": "0x000000000000000000000000000000000000800d", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496", + "newAddress": "0x000000000000000000000000000000000000800e", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc", + "newAddress": "0x000000000000000000000000000000000000800f", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a89", + "newAddress": "0x0000000000000000000000000000000000008010", + "value": 0, + "input": "0x", + "callConstructor": false + } + ], + "forcedDeploymentCalldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "calldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "tx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "18", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/testnet2/transactions.json b/etc/upgrades/1699353977-boojum/testnet2/transactions.json new file mode 100644 index 00000000000..8abc06de518 --- /dev/null +++ b/etc/upgrades/1699353977-boojum/testnet2/transactions.json @@ -0,0 +1,235 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "18", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x01000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b88", + "defaultAccountHash": "0x01000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d", + "verifier": "0xB465882F67d236DcC0D090F78ebb0d838e9719D8", + "verifierParams": { + "recursionNodeLevelVkHash": "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + "recursionLeafLevelVkHash": "0x14628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x656491a9" + }, + "factoryDeps": [], + "newProtocolVersion": "18", + "newAllowList": "0x1ad02481F1F9E779Ec0C229799B05365E453Ce30" + }, + "l1upgradeCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c000000000000000000000000000000000000000000000000000000000656491a900000000000000000000000000000000000000000000000000000000000000120000000000000000000000001ad02481f1f9e779ec0c229799b05365e453ce3000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0x02B24CAabB9f1337a48A482BF5296449dDAAdA52", + "protocolVersion": "18", + "diamondUpgradeProposalId": { + "type": "BigNumber", + "hex": "0x0e" + }, + "upgradeTimestamp": "1701089705", + "transparentUpgrade": { + "facetCuts": [ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x33ce93fe", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xe58bb639", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xD059478a564dF1353A54AC0D0e7Fc55A90b92246", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 0, + "isFreezable": true + } + ], + "initAddress": "0x02B24CAabB9f1337a48A482BF5296449dDAAdA52", + "initCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c000000000000000000000000000000000000000000000000000000000656491a900000000000000000000000000000000000000000000000000000000000000120000000000000000000000001ad02481f1f9e779ec0c229799b05365e453ce3000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "proposeTransparentUpgradeCalldata": "0x8043760a0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000006000000000000000000000000002b24caabb9f1337a48a482bf5296449ddaada5200000000000000000000000000000000000000000000000000000000000015400000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000007c000000000000000000000000000000000000000000000000000000000000009200000000000000000000000000000000000000000000000000000000000000a400000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000d60000000000000000000000000000000000000000000000000000000000000124000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000023cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff0000000000000000000000000000000000000000000000000000000033ce93fe000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005e58bb63900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000000000000000000000000000409560de546e057ce5bd5db487edf2bb5e785bab000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000f3acf6a03ea4a914b78ec788624b25cec37c14a40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000022cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b70000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d0000000000000000000000000000000000000000000000000000000000000000000000000000000063b5ec36b09384ffa7106a80ec7cfdfca521fd0800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000d059478a564df1353a54ac0d0e7fc55a90b922460000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000017041ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c000000000000000000000000000000000000000000000000000000000656491a900000000000000000000000000000000000000000000000000000000000000120000000000000000000000001ad02481f1f9e779ec0c229799b05365e453ce3000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeUpgradeCalldata": "0x36d4eb8400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000002b24caabb9f1337a48a482bf5296449ddaada5200000000000000000000000000000000000000000000000000000000000015400000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000007c000000000000000000000000000000000000000000000000000000000000009200000000000000000000000000000000000000000000000000000000000000a400000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000d60000000000000000000000000000000000000000000000000000000000000124000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000023cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff0000000000000000000000000000000000000000000000000000000033ce93fe000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005e58bb63900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000000000000000000000000000409560de546e057ce5bd5db487edf2bb5e785bab000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000f3acf6a03ea4a914b78ec788624b25cec37c14a40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000022cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b70000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d0000000000000000000000000000000000000000000000000000000000000000000000000000000063b5ec36b09384ffa7106a80ec7cfdfca521fd0800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000d059478a564df1353a54ac0d0e7fc55a90b922460000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000017041ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c000000000000000000000000000000000000000000000000000000000656491a900000000000000000000000000000000000000000000000000000000000000120000000000000000000000001ad02481f1f9e779ec0c229799b05365e453ce3000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file From bd60f1cb8ae5a4a7545a5478fd2accd71e144b62 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 28 Nov 2023 20:34:43 +0100 Subject: [PATCH 054/115] chore(main): release core 18.3.1 (#558) :robot: I have created a release *beep* *boop* --- ## [18.3.1](https://github.com/matter-labs/zksync-era/compare/core-v18.3.0...core-v18.3.1) (2023-11-28) ### Bug Fixes * **external-node:** Check txs at insert time instead of read time ([#555](https://github.com/matter-labs/zksync-era/issues/555)) ([9ea02a1](https://github.com/matter-labs/zksync-era/commit/9ea02a1b2e7c861882f10c8cbe1997f6bb96d9cf)) * Update comments post-hotfix ([#556](https://github.com/matter-labs/zksync-era/issues/556)) ([339e450](https://github.com/matter-labs/zksync-era/commit/339e45035e85eba7d60b533221be92ce78643705)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 7deaec6a597..f5774af5944 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.3.0", + "core": "18.3.1", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 1b182d12dfc..6999f1448d6 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## [18.3.1](https://github.com/matter-labs/zksync-era/compare/core-v18.3.0...core-v18.3.1) (2023-11-28) + + +### Bug Fixes + +* **external-node:** Check txs at insert time instead of read time ([#555](https://github.com/matter-labs/zksync-era/issues/555)) ([9ea02a1](https://github.com/matter-labs/zksync-era/commit/9ea02a1b2e7c861882f10c8cbe1997f6bb96d9cf)) +* Update comments post-hotfix ([#556](https://github.com/matter-labs/zksync-era/issues/556)) ([339e450](https://github.com/matter-labs/zksync-era/commit/339e45035e85eba7d60b533221be92ce78643705)) + ## [18.3.0](https://github.com/matter-labs/zksync-era/compare/core-v18.2.0...core-v18.3.0) (2023-11-28) From e8fd805c8be7980de7676bca87cfc2d445aab9e1 Mon Sep 17 00:00:00 2001 From: igamigo Date: Wed, 29 Nov 2023 02:36:00 -0300 Subject: [PATCH 055/115] fix: Change no pending batches 404 error into a success response (#279) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What ❔ When no pending batches were found on the server API, the response was a 404 which was not handled on the client (gateway) side and was treated as an HTTP client error, causing a misleading log line: ```2023-10-19T18:30:46.726413Z ERROR zksync_prover_fri_gateway::api_data_fetcher: HTTP request failed due to error: HTTP status client error (404 Not Found) for url (http://127.0.0.1:3320/proof_generation_data)``` When testing out the gpu proving pipeline for the ZK stack effort, the error was hit soon after starting all services. This PR changes this so in info trace is printed stating that there are currently no pending batches to be proven. ## Why ❔ There is some semantic dissonance on receiving an HTTP client error and a log line when calling the API, since it's possible and reasonable that there are no pending batches at some points. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/types/src/prover_server_api/mod.rs | 2 +- .../proof_data_handler/request_processor.rs | 19 ++++++++++--------- .../src/api_data_fetcher.rs | 1 + .../src/proof_gen_data_fetcher.rs | 6 +++++- 4 files changed, 17 insertions(+), 11 deletions(-) diff --git a/core/lib/types/src/prover_server_api/mod.rs b/core/lib/types/src/prover_server_api/mod.rs index 84262b182c6..dc226f11d26 100644 --- a/core/lib/types/src/prover_server_api/mod.rs +++ b/core/lib/types/src/prover_server_api/mod.rs @@ -19,7 +19,7 @@ pub struct ProofGenerationDataRequest {} #[derive(Debug, Serialize, Deserialize)] pub enum ProofGenerationDataResponse { - Success(ProofGenerationData), + Success(Option), Error(String), } diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index f091993812b..866990b31c9 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -31,7 +31,6 @@ pub(crate) struct RequestProcessor { } pub(crate) enum RequestProcessorError { - NoPendingBatches, ObjectStore(ObjectStoreError), Sqlx(SqlxError), } @@ -39,10 +38,6 @@ pub(crate) enum RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { - Self::NoPendingBatches => ( - StatusCode::NOT_FOUND, - "No pending batches to process".to_owned(), - ), RequestProcessorError::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( @@ -88,15 +83,19 @@ impl RequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let l1_batch_number = self + let l1_batch_number_result = self .pool .access_storage() .await .unwrap() .proof_generation_dal() .get_next_block_to_be_proven(self.config.proof_generation_timeout()) - .await - .ok_or(RequestProcessorError::NoPendingBatches)?; + .await; + + let l1_batch_number = match l1_batch_number_result { + Some(number) => number, + None => return Ok(Json(ProofGenerationDataResponse::Success(None))), // no batches pending to be proven + }; let blob = self .blob_store @@ -125,7 +124,9 @@ impl RequestProcessor { l1_verifier_config, }; - Ok(Json(ProofGenerationDataResponse::Success(proof_gen_data))) + Ok(Json(ProofGenerationDataResponse::Success(Some( + proof_gen_data, + )))) } pub(crate) async fn submit_proof( diff --git a/prover/prover_fri_gateway/src/api_data_fetcher.rs b/prover/prover_fri_gateway/src/api_data_fetcher.rs index 7b3a814837c..a009f1783f2 100644 --- a/prover/prover_fri_gateway/src/api_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/api_data_fetcher.rs @@ -33,6 +33,7 @@ impl PeriodicApiStruct { Resp: DeserializeOwned, { tracing::info!("Sending request to {}", endpoint); + self.client .post(endpoint) .json(&request) diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index e2ac2e42dd9..1f00c7f7429 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -33,6 +33,7 @@ impl PeriodicApiStruct { impl PeriodicApi for PeriodicApiStruct { type JobId = (); type Response = ProofGenerationDataResponse; + const SERVICE_NAME: &'static str = "ProofGenDataFetcher"; async fn get_next_request(&self) -> Option<(Self::JobId, ProofGenerationDataRequest)> { @@ -49,7 +50,10 @@ impl PeriodicApi for PeriodicApiStruct { async fn handle_response(&self, _: (), response: Self::Response) { match response { - ProofGenerationDataResponse::Success(data) => { + ProofGenerationDataResponse::Success(None) => { + tracing::info!("There are currently no pending batches to be proven"); + } + ProofGenerationDataResponse::Success(Some(data)) => { tracing::info!("Received proof gen data for: {:?}", data.l1_batch_number); self.save_proof_gen_data(data).await; } From beac0a85bb1535b05c395057171f197cd976bf82 Mon Sep 17 00:00:00 2001 From: Dustin Brickwood Date: Wed, 29 Nov 2023 10:00:27 -0600 Subject: [PATCH 056/115] feat: adds spellchecker workflow, and corrects misspelled words (#559) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Finishes the work started by @Deniallugo in https://github.com/matter-labs/zksync-era/pull/437 - Adds spellchecker workflow to prevent further misspellings - Corrects existing misspelled words ## Why ❔ - Ensures comments and inline documentation does not contain misspelled words for improved readability ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil Co-authored-by: Danil --- .github/pull_request_template.md | 1 + .github/workflows/check-spelling.yml | 24 + core/CHANGELOG.md | 2 +- core/bin/block_reverter/src/main.rs | 2 +- core/bin/external_node/src/config/mod.rs | 2 +- core/lib/basic_types/src/lib.rs | 2 +- core/lib/config/src/configs/api.rs | 2 +- core/lib/config/src/configs/chain.rs | 2 +- core/lib/constants/src/crypto.rs | 2 +- core/lib/constants/src/ethereum.rs | 2 +- core/lib/contracts/src/lib.rs | 6 +- core/lib/dal/src/connection/mod.rs | 8 +- core/lib/dal/src/contract_verification_dal.rs | 2 +- core/lib/dal/src/lib.rs | 2 +- core/lib/dal/src/storage_dal.rs | 2 +- core/lib/dal/src/transactions_dal.rs | 2 +- core/lib/dal/src/witness_generator_dal.rs | 2 +- core/lib/eth_client/src/lib.rs | 10 +- core/lib/eth_signer/src/json_rpc_signer.rs | 6 +- core/lib/eth_signer/src/pk_signer.rs | 2 +- core/lib/mempool/src/mempool_store.rs | 2 +- core/lib/merkle_tree/src/pruning.rs | 2 +- core/lib/merkle_tree/src/recovery.rs | 2 +- core/lib/merkle_tree/src/storage/patch.rs | 4 +- core/lib/merkle_tree/src/types/mod.rs | 6 +- core/lib/multivm/src/glue/mod.rs | 2 +- core/lib/multivm/src/glue/tracers/mod.rs | 20 +- .../types/errors/vm_revert_reason.rs | 2 +- core/lib/multivm/src/lib.rs | 2 +- .../src/versions/vm_1_3_2/bootloader_state.rs | 2 +- .../vm_1_3_2/errors/vm_revert_reason.rs | 2 +- .../src/versions/vm_1_3_2/oracle_tools.rs | 2 +- .../versions/vm_1_3_2/oracles/decommitter.rs | 4 +- .../vm_1_3_2/oracles/tracer/bootloader.rs | 2 +- .../versions/vm_1_3_2/oracles/tracer/call.rs | 2 +- .../versions/vm_1_3_2/oracles/tracer/utils.rs | 2 +- .../src/versions/vm_1_3_2/test_utils.rs | 4 +- .../src/versions/vm_1_3_2/transaction_data.rs | 56 +- .../src/versions/vm_1_3_2/vm_instance.rs | 2 +- .../vm_latest/bootloader_state/snapshot.rs | 6 +- .../versions/vm_latest/bootloader_state/tx.rs | 2 +- .../src/versions/vm_latest/constants.rs | 6 +- .../vm_latest/implementation/statistics.rs | 2 +- .../vm_latest/old_vm/oracles/decommitter.rs | 4 +- .../vm_latest/tests/tester/inner_state.rs | 4 +- .../src/versions/vm_latest/tracers/utils.rs | 2 +- .../types/internals/transaction_data.rs | 6 +- .../src/versions/vm_latest/utils/fee.rs | 2 +- .../src/versions/vm_latest/utils/overhead.rs | 54 +- core/lib/multivm/src/versions/vm_latest/vm.rs | 2 +- .../src/versions/vm_m5/bootloader_state.rs | 2 +- .../versions/vm_m5/errors/vm_revert_reason.rs | 2 +- .../src/versions/vm_m5/oracles/tracer.rs | 4 +- .../multivm/src/versions/vm_m5/test_utils.rs | 4 +- .../multivm/src/versions/vm_m5/vm_instance.rs | 2 +- .../src/versions/vm_m6/bootloader_state.rs | 2 +- .../versions/vm_m6/errors/vm_revert_reason.rs | 2 +- .../src/versions/vm_m6/oracle_tools.rs | 2 +- .../src/versions/vm_m6/oracles/decommitter.rs | 4 +- .../vm_m6/oracles/tracer/bootloader.rs | 2 +- .../src/versions/vm_m6/oracles/tracer/call.rs | 2 +- .../versions/vm_m6/oracles/tracer/utils.rs | 2 +- .../multivm/src/versions/vm_m6/test_utils.rs | 4 +- .../src/versions/vm_m6/transaction_data.rs | 56 +- .../multivm/src/versions/vm_m6/vm_instance.rs | 2 +- .../bootloader_state/snapshot.rs | 6 +- .../bootloader_state/tx.rs | 2 +- .../vm_refunds_enhancement/constants.rs | 4 +- .../implementation/statistics.rs | 2 +- .../old_vm/oracles/decommitter.rs | 4 +- .../tests/tester/inner_state.rs | 4 +- .../vm_refunds_enhancement/tracers/utils.rs | 2 +- .../types/internals/transaction_data.rs | 8 +- .../vm_refunds_enhancement/utils/fee.rs | 2 +- .../vm_refunds_enhancement/utils/overhead.rs | 54 +- .../src/versions/vm_refunds_enhancement/vm.rs | 2 +- .../bootloader_state/snapshot.rs | 6 +- .../vm_virtual_blocks/bootloader_state/tx.rs | 2 +- .../versions/vm_virtual_blocks/constants.rs | 4 +- .../implementation/statistics.rs | 2 +- .../old_vm/oracles/decommitter.rs | 4 +- .../tests/tester/inner_state.rs | 4 +- .../vm_virtual_blocks/tracers/utils.rs | 2 +- .../types/internals/transaction_data.rs | 6 +- .../versions/vm_virtual_blocks/utils/fee.rs | 2 +- .../vm_virtual_blocks/utils/overhead.rs | 54 +- .../src/versions/vm_virtual_blocks/vm.rs | 2 +- core/lib/state/src/cache/metrics.rs | 2 +- core/lib/state/src/in_memory.rs | 2 +- core/lib/state/src/lib.rs | 2 +- core/lib/storage/src/db.rs | 4 +- core/lib/storage/src/metrics.rs | 2 +- core/lib/types/src/api/mod.rs | 6 +- core/lib/types/src/block.rs | 2 +- core/lib/types/src/storage/writes/mod.rs | 4 +- core/lib/types/src/transaction_request.rs | 8 +- .../eip712_signature/typed_structure.rs | 2 +- .../src/tx/primitives/packed_eth_signature.rs | 8 +- core/lib/utils/src/bytecode.rs | 2 +- core/lib/utils/src/convert.rs | 2 +- core/lib/vlog/src/lib.rs | 2 +- .../contract_verification/api_decl.rs | 2 +- .../api_server/execution_sandbox/execute.rs | 2 +- .../api_server/execution_sandbox/tracers.rs | 4 +- .../api_server/execution_sandbox/validate.rs | 2 +- .../src/api_server/tx_sender/mod.rs | 8 +- .../src/eth_sender/eth_tx_manager.rs | 2 +- .../lib/zksync_core/src/eth_sender/metrics.rs | 2 +- .../house_keeper/fri_prover_queue_monitor.rs | 2 +- core/lib/zksync_core/src/lib.rs | 2 +- .../src/metadata_calculator/helpers.rs | 2 +- core/lib/zksync_core/src/metrics.rs | 4 +- .../lib/zksync_core/src/reorg_detector/mod.rs | 10 +- .../src/state_keeper/batch_executor/mod.rs | 2 +- .../zksync_core/src/state_keeper/io/mod.rs | 2 +- .../zksync_core/src/state_keeper/metrics.rs | 6 +- .../zksync_core/src/witness_generator/mod.rs | 8 +- core/tests/loadnext/src/account/mod.rs | 2 +- core/tests/loadnext/src/account_pool.rs | 2 +- core/tests/loadnext/src/command/api.rs | 2 +- docs/advanced/pubdata.md | 2 +- prover/witness_generator/README.md | 4 +- spellcheck/era.cfg | 69 ++ spellcheck/era.dic | 605 ++++++++++++++++++ 124 files changed, 1029 insertions(+), 328 deletions(-) create mode 100644 .github/workflows/check-spelling.yml create mode 100644 spellcheck/era.cfg create mode 100644 spellcheck/era.dic diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index dba6efd2fdf..7b828d1ca89 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -18,3 +18,4 @@ - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. +- [ ] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. diff --git a/.github/workflows/check-spelling.yml b/.github/workflows/check-spelling.yml new file mode 100644 index 00000000000..76fd6352c8e --- /dev/null +++ b/.github/workflows/check-spelling.yml @@ -0,0 +1,24 @@ +name: Check Spelling + +on: + push: + branches: + - main + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + spellcheck: + runs-on: ubuntu-latest + steps: + - name: Install cargo-spellcheck + uses: taiki-e/install-action@v2 + with: + tool: cargo-spellcheck + + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4 + + - name: Run cargo-spellcheck + run: cargo spellcheck --cfg=./spellcheck/era.cfg --code 1 diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 6999f1448d6..d34f9d4faf5 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -244,7 +244,7 @@ * **prover-fri:** added picked-by column in prover fri related tables ([#2600](https://github.com/matter-labs/zksync-2-dev/issues/2600)) ([9e604ab](https://github.com/matter-labs/zksync-2-dev/commit/9e604abf3bae11b6f583f2abd39c07a85dc20f0a)) * update verification keys, protocol version 15 ([#2602](https://github.com/matter-labs/zksync-2-dev/issues/2602)) ([2fff59b](https://github.com/matter-labs/zksync-2-dev/commit/2fff59bab00849996864b68e932739135337ebd7)) * **vlog:** Rework the observability configuration subsystem ([#2608](https://github.com/matter-labs/zksync-2-dev/issues/2608)) ([377f0c5](https://github.com/matter-labs/zksync-2-dev/commit/377f0c5f734c979bc990b429dff0971466872e71)) -* **vm:** Multivm tracer support ([#2601](https://github.com/matter-labs/zksync-2-dev/issues/2601)) ([4a7467b](https://github.com/matter-labs/zksync-2-dev/commit/4a7467b1b1556bfd795792dbe280bcf28c93a58f)) +* **vm:** MultiVM tracer support ([#2601](https://github.com/matter-labs/zksync-2-dev/issues/2601)) ([4a7467b](https://github.com/matter-labs/zksync-2-dev/commit/4a7467b1b1556bfd795792dbe280bcf28c93a58f)) ## [8.7.0](https://github.com/matter-labs/zksync-2-dev/compare/core-v8.6.0...core-v8.7.0) (2023-09-19) diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index 3958f4dec11..bc49b731d14 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -33,7 +33,7 @@ enum Command { /// L1 batch number used to rollback to. #[arg(long)] l1_batch_number: u32, - /// Priority fee used for rollback ethereum transaction. + /// Priority fee used for rollback Ethereum transaction. // We operate only by priority fee because we want to use base fee from ethereum // and send transaction as soon as possible without any resend logic #[arg(long)] diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 00ae9d1da1b..3f26a334ea3 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -105,7 +105,7 @@ pub struct OptionalENConfig { /// Max possible size of an ABI encoded tx (in bytes). #[serde(default = "OptionalENConfig::default_max_tx_size")] pub max_tx_size: usize, - /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the api server panics. + /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the API server panics. /// This is a temporary solution to mitigate API request resulting in thousands of DB queries. pub vm_execution_cache_misses_limit: Option, /// Inbound transaction limit used for throttling. diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 86cc8c59221..6c6223fbb17 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -77,7 +77,7 @@ impl TryFrom for AccountTreeId { } } -/// ChainId in the ZkSync network. +/// ChainId in the zkSync network. #[derive(Copy, Clone, Debug, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct L2ChainId(u64); diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 3b23abea43c..14b3d81520c 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -57,7 +57,7 @@ pub struct Web3JsonRpcConfig { pub estimate_gas_acceptable_overestimation: u32, /// Max possible size of an ABI encoded tx (in bytes). pub max_tx_size: usize, - /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the api server panics. + /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the API server panics. /// This is a temporary solution to mitigate API request resulting in thousands of DB queries. pub vm_execution_cache_misses_limit: Option, /// Max number of VM instances to be concurrently spawned by the API server. diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 95392c8df83..f09b5bb292c 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -76,7 +76,7 @@ pub struct StateKeeperConfig { pub close_block_at_geometry_percentage: f64, /// Denotes the percentage of L1 params used in L2 block that triggers L2 block seal. pub close_block_at_eth_params_percentage: f64, - /// Denotes the percentage of L1 gas used in l2 block that triggers L2 block seal. + /// Denotes the percentage of L1 gas used in L2 block that triggers L2 block seal. pub close_block_at_gas_percentage: f64, pub fee_account_addr: Address, diff --git a/core/lib/constants/src/crypto.rs b/core/lib/constants/src/crypto.rs index e9ed44ff308..53a5bb98b79 100644 --- a/core/lib/constants/src/crypto.rs +++ b/core/lib/constants/src/crypto.rs @@ -26,7 +26,7 @@ pub const MAX_NEW_FACTORY_DEPS: usize = 32; pub const PAD_MSG_BEFORE_HASH_BITS_LEN: usize = 736; /// The size of the bootloader memory in bytes which is used by the protocol. -/// While the maximal possible size is a lot higher, we restric ourselves to a certain limit to reduce +/// While the maximal possible size is a lot higher, we restrict ourselves to a certain limit to reduce /// the requirements on RAM. pub const USED_BOOTLOADER_MEMORY_BYTES: usize = 1 << 24; pub const USED_BOOTLOADER_MEMORY_WORDS: usize = USED_BOOTLOADER_MEMORY_BYTES / 32; diff --git a/core/lib/constants/src/ethereum.rs b/core/lib/constants/src/ethereum.rs index 13cdd32d5c1..299b08e3d0d 100644 --- a/core/lib/constants/src/ethereum.rs +++ b/core/lib/constants/src/ethereum.rs @@ -12,7 +12,7 @@ pub const GUARANTEED_PUBDATA_PER_L1_BATCH: u64 = 4000; /// The maximum number of pubdata per L1 batch. This limit is due to the fact that the Ethereum /// nodes do not accept transactions that have more than 128kb of pubdata. -/// The 18kb margin is left in case of any inpreciseness of the pubdata calculation. +/// The 18kb margin is left in case of any impreciseness of the pubdata calculation. pub const MAX_PUBDATA_PER_L1_BATCH: u64 = 110000; // TODO: import from zkevm_opcode_defs once VM1.3 is supported diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 01dce6a98f9..766d2464d34 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -194,7 +194,7 @@ pub struct SystemContractsRepo { } impl SystemContractsRepo { - /// Returns the default system contracts repo with directory based on the ZKSYNC_HOME environment variable. + /// Returns the default system contracts repository with directory based on the ZKSYNC_HOME environment variable. pub fn from_env() -> Self { let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); let zksync_home = PathBuf::from(zksync_home); @@ -336,7 +336,7 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } - /// BaseSystemContracts with playground bootloader - used for handling 'eth_calls'. + /// BaseSystemContracts with playground bootloader - used for handling eth_calls. pub fn playground() -> Self { let bootloader_bytecode = read_playground_batch_bootloader_bytecode(); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) @@ -364,7 +364,7 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } - /// BaseSystemContracts with playground bootloader - used for handling 'eth_calls'. + /// BaseSystemContracts with playground bootloader - used for handling eth_calls. pub fn estimate_gas() -> Self { let bootloader_bytecode = read_bootloader_code("fee_estimate"); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) diff --git a/core/lib/dal/src/connection/mod.rs b/core/lib/dal/src/connection/mod.rs index ad761a7edf0..845dbc64dc4 100644 --- a/core/lib/dal/src/connection/mod.rs +++ b/core/lib/dal/src/connection/mod.rs @@ -72,13 +72,13 @@ impl<'a> ConnectionPoolBuilder<'a> { } } -/// Constructucts a new temporary database (with a randomized name) +/// Constructs a new temporary database (with a randomized name) /// by cloning the database template pointed by TEST_DATABASE_URL env var. /// The template is expected to have all migrations from dal/migrations applied. -/// For efficiency, the postgres container of TEST_DATABASE_URL should be +/// For efficiency, the Postgres container of TEST_DATABASE_URL should be /// configured with option "fsync=off" - it disables waiting for disk synchronization -/// whenever you write to the DBs, therefore making it as fast as an inmem postgres instance. -/// The database is not cleaned up automatically, but rather the whole postgres +/// whenever you write to the DBs, therefore making it as fast as an in-memory Postgres instance. +/// The database is not cleaned up automatically, but rather the whole Postgres /// container is recreated whenever you call "zk test rust". pub(super) async fn create_test_db() -> anyhow::Result { use rand::Rng as _; diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index 59e0c6996f9..a6c549f482b 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -91,7 +91,7 @@ impl ContractVerificationDal<'_, '_> { /// Returns the next verification request for processing. /// Considering the situation where processing of some request /// can be interrupted (panic, pod restart, etc..), - /// `processing_timeout` parameter is added to avoid stucking of requests. + /// `processing_timeout` parameter is added to avoid stuck requests. pub async fn get_next_queued_verification_request( &mut self, processing_timeout: Duration, diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 788ce2d98bd..9dfc9458202 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -85,7 +85,7 @@ mod tests; /// Storage processor is the main storage interaction point. /// It holds down the connection (either direct or pooled) to the database -/// and provide methods to obtain different storage schemas. +/// and provide methods to obtain different storage schema. #[derive(Debug)] pub struct StorageProcessor<'a> { conn: ConnectionHolder<'a>, diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index fdaaea38617..8ec6d916493 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -43,7 +43,7 @@ impl StorageDal<'_, '_> { .unwrap(); } - /// Returns bytecode for a factory dep with the specified bytecode `hash`. + /// Returns bytecode for a factory dependency with the specified bytecode `hash`. pub async fn get_factory_dep(&mut self, hash: H256) -> Option> { sqlx::query!( "SELECT bytecode FROM factory_deps WHERE bytecode_hash = $1", diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 5b235689961..cbca986b16c 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -969,7 +969,7 @@ impl TransactionsDal<'_, '_> { } } - /// Returns miniblocks with their transactions that state_keeper needs to reexecute on restart. + /// Returns miniblocks with their transactions that state_keeper needs to re-execute on restart. /// These are the transactions that are included to some miniblock, /// but not included to L1 batch. The order of the transactions is the same as it was /// during the previous execution. diff --git a/core/lib/dal/src/witness_generator_dal.rs b/core/lib/dal/src/witness_generator_dal.rs index ab73c525c76..983112ab35c 100644 --- a/core/lib/dal/src/witness_generator_dal.rs +++ b/core/lib/dal/src/witness_generator_dal.rs @@ -527,7 +527,7 @@ impl WitnessGeneratorDal<'_, '_> { /// Saves artifacts in node_aggregation_job /// and advances it to `waiting_for_proofs` status /// it will be advanced to `queued` by the prover when all the dependency proofs are computed. - /// If the node aggregation job was already `queued` in case of connrecunt run of same leaf aggregation job + /// If the node aggregation job was already `queued` in case of connector run of same leaf aggregation job /// we keep the status as is to prevent data race. pub async fn save_leaf_aggregation_artifacts( &mut self, diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index 2291f721470..a0350368325 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -21,7 +21,7 @@ use zksync_types::{ }; /// Common Web3 interface, as seen by the core applications. -/// Encapsulates the raw Web3 interction, providing a high-level interface. +/// Encapsulates the raw Web3 interaction, providing a high-level interface. /// /// ## Trait contents /// @@ -34,7 +34,7 @@ use zksync_types::{ /// /// Most of the trait methods support the `component` parameter. This parameter is used to /// describe the caller of the method. It may be useful to find the component that makes an -/// unnecessary high amount of Web3 calls. Implementations are advices to count invocations +/// unnecessary high amount of Web3 calls. Implementations are advice to count invocations /// per component and expose them to Prometheus. #[async_trait] pub trait EthInterface: Sync + Send { @@ -139,7 +139,7 @@ pub trait EthInterface: Sync + Send { /// An extension of `EthInterface` trait, which is used to perform queries that are bound to /// a certain contract and account. /// -/// THe example use cases for this trait would be: +/// The example use cases for this trait would be: /// - An operator that sends transactions and interacts with zkSync contract. /// - A wallet implementation in the SDK that is tied to a user's account. /// @@ -149,10 +149,10 @@ pub trait EthInterface: Sync + Send { /// implementation that invokes `contract` / `contract_addr` / `sender_account` methods. #[async_trait] pub trait BoundEthInterface: EthInterface { - /// ABI of the contract that is used by the implementor. + /// ABI of the contract that is used by the implementer. fn contract(&self) -> ðabi::Contract; - /// Address of the contract that is used by the implementor. + /// Address of the contract that is used by the implementer. fn contract_addr(&self) -> H160; /// Chain ID of the L1 network the client is *configured* to connected to. diff --git a/core/lib/eth_signer/src/json_rpc_signer.rs b/core/lib/eth_signer/src/json_rpc_signer.rs index da81ff51dba..b6619f5e831 100644 --- a/core/lib/eth_signer/src/json_rpc_signer.rs +++ b/core/lib/eth_signer/src/json_rpc_signer.rs @@ -85,7 +85,7 @@ impl EthereumSigner for JsonRpcSigner { } } - /// Signs typed struct using ethereum private key by EIP-712 signature standard. + /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// Result of this function is the equivalent of RPC calling `eth_signTypedData`. async fn sign_typed_data( &self, @@ -192,7 +192,7 @@ impl JsonRpcSigner { self.address.ok_or(SignerError::DefineAddress) } - /// Specifies the Ethreum address which sets the address for which all other requests will be processed. + /// Specifies the Ethereum address which sets the address for which all other requests will be processed. /// If the address has already been set, then it will all the same change to a new one. pub async fn detect_address( &mut self, @@ -376,7 +376,7 @@ mod messages { Self::create("eth_sign", params) } - /// Signs typed struct using ethereum private key by EIP-712 signature standard. + /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// The address to sign with must be unlocked. pub fn sign_typed_data( address: Address, diff --git a/core/lib/eth_signer/src/pk_signer.rs b/core/lib/eth_signer/src/pk_signer.rs index 4a5bfb838de..680d87d62d0 100644 --- a/core/lib/eth_signer/src/pk_signer.rs +++ b/core/lib/eth_signer/src/pk_signer.rs @@ -41,7 +41,7 @@ impl EthereumSigner for PrivateKeySigner { Ok(signature) } - /// Signs typed struct using ethereum private key by EIP-712 signature standard. + /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// Result of this function is the equivalent of RPC calling `eth_signTypedData`. async fn sign_typed_data( &self, diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index f900523517c..a8b02bee0cb 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -29,7 +29,7 @@ pub struct MempoolStore { /// Next priority operation next_priority_id: PriorityOpId, stashed_accounts: Vec
, - /// Number of l2 transactions in the mempool. + /// Number of L2 transactions in the mempool. size: u64, capacity: u64, } diff --git a/core/lib/merkle_tree/src/pruning.rs b/core/lib/merkle_tree/src/pruning.rs index bf60b8cf956..21a3e8712fd 100644 --- a/core/lib/merkle_tree/src/pruning.rs +++ b/core/lib/merkle_tree/src/pruning.rs @@ -89,7 +89,7 @@ impl MerkleTreePruner { /// Sets the sleep duration when the pruner cannot progress. This time should be enough /// for the tree to produce enough stale keys. /// - /// The default value is 60s. + /// The default value is 60 seconds. pub fn set_poll_interval(&mut self, poll_interval: Duration) { self.poll_interval = poll_interval; } diff --git a/core/lib/merkle_tree/src/recovery.rs b/core/lib/merkle_tree/src/recovery.rs index 6f57b64ee81..85ac578cc0a 100644 --- a/core/lib/merkle_tree/src/recovery.rs +++ b/core/lib/merkle_tree/src/recovery.rs @@ -8,7 +8,7 @@ //! afterwards will have the same outcome as if they were applied to the original tree. //! //! Importantly, a recovered tree is only *observably* identical to the original tree; it differs -//! in (currently unobservable) node versions. In a recovered tree, all nodes will initially have +//! in (currently un-observable) node versions. In a recovered tree, all nodes will initially have //! the same version (the snapshot version), while in the original tree, node versions are distributed //! from 0 to the snapshot version (both inclusive). //! diff --git a/core/lib/merkle_tree/src/storage/patch.rs b/core/lib/merkle_tree/src/storage/patch.rs index 9e251bf0178..6d0c38d6c9f 100644 --- a/core/lib/merkle_tree/src/storage/patch.rs +++ b/core/lib/merkle_tree/src/storage/patch.rs @@ -344,7 +344,7 @@ impl WorkingPatchSet { } } - /// Computes hashes and serializes this changeset. + /// Computes hashes and serializes this change set. pub(super) fn finalize( self, manifest: Manifest, @@ -597,7 +597,7 @@ impl WorkingPatchSet { Some(Node::Internal(node)) => { let (next_nibble, child_ref) = node.last_child_ref(); nibbles = nibbles.push(next_nibble).unwrap(); - // ^ `unwrap()` is safe; there can be no internal nodes on the bottommost tree level + // ^ `unwrap()` is safe; there can be no internal nodes on the bottom-most tree level let child_key = nibbles.with_version(child_ref.version); let child_node = db.tree_node(&child_key, child_ref.is_leaf).unwrap(); // ^ `unwrap()` is safe by construction diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index 6988735ec02..de35d9024b7 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -42,7 +42,7 @@ impl TreeEntry { } } - /// Returns `true` iff this entry encodes lack of a value. + /// Returns `true` if and only if this entry encodes lack of a value. pub fn is_empty(&self) -> bool { self.leaf_index == 0 && self.value_hash.is_zero() } @@ -63,7 +63,7 @@ pub struct TreeEntryWithProof { /// Proof of the value authenticity. /// /// If specified, a proof is the Merkle path consisting of up to 256 hashes - /// ordered starting the bottommost level of the tree (one with leaves) and ending before + /// ordered starting the bottom-most level of the tree (one with leaves) and ending before /// the root level. /// /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning @@ -152,7 +152,7 @@ pub struct TreeLogEntryWithProof

> { /// Log entry about an atomic operation on the tree. pub base: TreeLogEntry, /// Merkle path to prove log authenticity. The path consists of up to 256 hashes - /// ordered starting the bottommost level of the tree (one with leaves) and ending before + /// ordered starting the bottom-most level of the tree (one with leaves) and ending before /// the root level. /// /// If the path is not full (contains <256 hashes), it means that the hashes at the beginning diff --git a/core/lib/multivm/src/glue/mod.rs b/core/lib/multivm/src/glue/mod.rs index 0904661a73c..299093532bd 100644 --- a/core/lib/multivm/src/glue/mod.rs +++ b/core/lib/multivm/src/glue/mod.rs @@ -11,7 +11,7 @@ pub(crate) mod history_mode; pub mod tracers; mod types; -/// This trait is a workaround on the Rust'c [orphan rule](orphan_rule). +/// This trait is a workaround on the Rust's [orphan rule](orphan_rule). /// We need to convert a lot of types that come from two different versions of some crate, /// and `From`/`Into` traits are natural way of doing so. Unfortunately, we can't implement an /// external trait on a pair of external types, so we're unable to use these traits. diff --git a/core/lib/multivm/src/glue/tracers/mod.rs b/core/lib/multivm/src/glue/tracers/mod.rs index b9c0e083b84..a504d5d2c8f 100644 --- a/core/lib/multivm/src/glue/tracers/mod.rs +++ b/core/lib/multivm/src/glue/tracers/mod.rs @@ -1,4 +1,4 @@ -//! # Multivm Tracing +//! # MultiVM Tracing //! //! The MultiVM tracing module enables support for Tracers in different versions of virtual machines. //! @@ -7,7 +7,7 @@ //! Different VM versions may have distinct requirements and types for Tracers. To accommodate these differences, //! this module defines one primary trait: //! -//! - `MultivmTracer`: This trait represents a tracer that can be converted into a tracer for +//! - `MultiVMTracer`: This trait represents a tracer that can be converted into a tracer for //! a specific VM version. //! //! Specific traits for each VM version, which support Custom Tracers: @@ -19,23 +19,23 @@ //! into a form compatible with the vm_virtual_blocks version. //! It defines a method `vm_virtual_blocks` for obtaining a boxed tracer. //! -//! For `MultivmTracer` to be implemented, the Tracer must implement all N currently +//! For `MultiVMTracer` to be implemented, the Tracer must implement all N currently //! existing sub-traits. //! //! ## Adding a new VM version //! -//! To add support for one more VM version to MultivmTracer, one needs to: +//! To add support for one more VM version to MultiVMTracer, one needs to: //! - Create a new trait performing conversion to the specified VM tracer, e.g., `IntoTracer`. -//! - Add this trait as a trait bound to the `MultivmTracer`. -//! - Add this trait as a trait bound for `T` in `MultivmTracer` implementation. -//! — Implement the trait for `T` with a bound to `VmTracer` for a specific version. +//! - Add this trait as a trait bound to the `MultiVMTracer`. +//! - Add this trait as a trait bound for `T` in `MultiVMTracer` implementation. +//! - Implement the trait for `T` with a bound to `VmTracer` for a specific version. //! use crate::HistoryMode; use zksync_state::WriteStorage; -pub type MultiVmTracerPointer = Box>; +pub type MultiVmTracerPointer = Box>; -pub trait MultivmTracer: +pub trait MultiVMTracer: IntoLatestTracer + IntoVmVirtualBlocksTracer + IntoVmRefundsEnhancementTracer { fn into_tracer_pointer(self) -> MultiVmTracerPointer @@ -102,7 +102,7 @@ where } } -impl MultivmTracer for T +impl MultiVMTracer for T where S: WriteStorage, H: HistoryMode, diff --git a/core/lib/multivm/src/interface/types/errors/vm_revert_reason.rs b/core/lib/multivm/src/interface/types/errors/vm_revert_reason.rs index 531d8b5507f..4a645749126 100644 --- a/core/lib/multivm/src/interface/types/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/interface/types/errors/vm_revert_reason.rs @@ -12,7 +12,7 @@ pub enum VmRevertReasonParsingError { IncorrectStringLength(Vec), } -/// Rich Revert Reasons https://github.com/0xProject/ZEIPs/issues/32 +/// Rich Revert Reasons `https://github.com/0xProject/ZEIPs/issues/32` #[derive(Debug, Clone, PartialEq)] pub enum VmRevertReason { General { diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 1e45443c0f2..adb9358980f 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -6,7 +6,7 @@ pub use crate::{ glue::{ history_mode::HistoryMode, - tracers::{MultiVmTracerPointer, MultivmTracer}, + tracers::{MultiVMTracer, MultiVmTracerPointer}, }, vm_instance::VmInstance, }; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/bootloader_state.rs b/core/lib/multivm/src/versions/vm_1_3_2/bootloader_state.rs index a5584662323..f0324137bdc 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/bootloader_state.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/bootloader_state.rs @@ -5,7 +5,7 @@ use crate::vm_1_3_2::vm_with_bootloader::TX_DESCRIPTION_OFFSET; /// Required to process transactions one by one (since we intercept the VM execution to execute /// transactions and add new ones to the memory on the fly). /// Think about it like a two-pointer scheme: one pointer (`free_tx_index`) tracks the end of the -/// initialized memory; while another (`tx_to_execute`) tracks our progess in this initialized memory. +/// initialized memory; while another (`tx_to_execute`) tracks our progress in this initialized memory. /// This is required since it's possible to push several transactions to the bootloader memory and then /// execute it one by one. /// diff --git a/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs index e1d50f72448..c127a9e6f2d 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs @@ -15,7 +15,7 @@ pub enum VmRevertReasonParsingError { IncorrectStringLength(Vec), } -/// Rich Revert Reasons https://github.com/0xProject/ZEIPs/issues/32 +/// Rich Revert Reasons `https://github.com/0xProject/ZEIPs/issues/32` #[derive(Debug, Clone, PartialEq)] pub enum VmRevertReason { General { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracle_tools.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracle_tools.rs index 0f1feef4f94..9f0f2600c5b 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracle_tools.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracle_tools.rs @@ -12,7 +12,7 @@ use zksync_state::{StoragePtr, WriteStorage}; /// zkEVM requires a bunch of objects implementing given traits to work. /// For example: Storage, Memory, PrecompilerProcessor etc -/// (you can find all these traites in zk_evm crate -> src/abstractions/mod.rs) +/// (you can find all these traits in zk_evm crate -> src/abstractions/mod.rs) /// For each of these traits, we have a local implementation (for example StorageOracle) /// that also support additional features (like rollbacks & history). /// The OracleTools struct, holds all these things together in one place. diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs index e5c30c305bc..17583b70dc9 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs @@ -68,7 +68,7 @@ impl DecommitterOracle } } - /// Adds additional bytecodes. They will take precendent over the bytecodes from storage. + /// Adds additional bytecodes. They will take precedent over the bytecodes from storage. pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { for (hash, bytecode) in bytecodes { self.known_bytecodes.insert(hash, bytecode, timestamp); @@ -178,7 +178,7 @@ impl DecommittmentProcessor > { self.decommitment_requests.push((), partial_query.timestamp); // First - check if we didn't fetch this bytecode in the past. - // If we did - we can just return the page that we used before (as the memory is read only). + // If we did - we can just return the page that we used before (as the memory is readonly). if let Some(memory_page) = self .decommitted_code_hashes .inner() diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/bootloader.rs index 20d8621e829..16b1efdff54 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/bootloader.rs @@ -16,7 +16,7 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::{Opcode, RetOpcode}, }; -/// Tells the VM to end the execution before `ret` from the booloader if there is no panic or revert. +/// Tells the VM to end the execution before `ret` from the bootloader if there is no panic or revert. /// Also, saves the information if this `ret` was caused by "out of gas" panic. #[derive(Debug, Clone, Default)] pub struct BootloaderTracer { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs index b50ee5f925c..72701f6e0f2 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs @@ -94,7 +94,7 @@ impl Tracer for CallTracer { } impl CallTracer { - /// We use parent gas for propery calculation of gas used in the trace. + /// We use parent gas for property calculation of gas used in the trace. /// This method updates parent gas for the current call. fn update_parent_gas(&mut self, state: &VmLocalStateData<'_>, current_call: &mut Call) { let current = state.vm_local_state.callstack.current; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs index 2914faf5120..9c9e87c065d 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs @@ -99,7 +99,7 @@ pub(crate) fn get_debug_log( } /// Reads the memory slice represented by the fat pointer. -/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +/// Note, that the fat pointer must point to the accessible memory (i.e. not cleared up yet). pub(crate) fn read_pointer( memory: &SimpleMemory, pointer: FatPointer, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs index 6738b070482..e697e3b310d 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs @@ -59,7 +59,7 @@ impl PartialEq for ModifiedKeysMap { #[derive(Clone, PartialEq, Debug)] pub struct DecommitterTestInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, pub known_bytecodes: HistoryRecorder>, H>, @@ -68,7 +68,7 @@ pub struct DecommitterTestInnerState { #[derive(Clone, PartialEq, Debug)] pub struct StorageOracleInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs index fc931f2ad9a..2d9dd1cb7aa 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs @@ -212,12 +212,12 @@ impl TransactionData { self.reserved_dynamic.len() as u64, ); - let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + let coefficients = OverheadCoefficients::from_tx_type(self.tx_type); get_amortized_overhead( total_gas_limit, gas_price_per_pubdata, encoded_len, - coeficients, + coefficients, ) } @@ -231,7 +231,7 @@ pub fn derive_overhead( gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Even if the gas limit is greater than the MAX_TX_ERGS_LIMIT, we assume that everything beyond MAX_TX_ERGS_LIMIT // will be spent entirely on publishing bytecodes and so we derive the overhead solely based on the capped value @@ -268,31 +268,31 @@ pub fn derive_overhead( // ); vec![ - (coeficients.ergs_limit_overhead_coeficient + (coefficients.ergs_limit_overhead_coeficient * overhead_for_single_instance_circuits.as_u32() as f64) .floor() as u32, - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) .floor() as u32, - (coeficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, + (coefficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, ] .into_iter() .max() .unwrap() } -/// Contains the coeficients with which the overhead for transactions will be calculated. -/// All of the coeficients should be <= 1. There are here to provide a certain "discount" for normal transactions +/// Contains the coefficients with which the overhead for transactions will be calculated. +/// All of the coefficients should be <= 1. There are here to provide a certain "discount" for normal transactions /// at the risk of malicious transactions that may close the block prematurely. -/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coeficients.ergs_limit_overhead_coeficient` MUST +/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coefficients.ergs_limit_overhead_coeficient` MUST /// result in an integer number #[derive(Debug, Clone, Copy)] -pub struct OverheadCoeficients { +pub struct OverheadCoefficients { slot_overhead_coeficient: f64, bootloader_memory_overhead_coeficient: f64, ergs_limit_overhead_coeficient: f64, } -impl OverheadCoeficients { +impl OverheadCoefficients { // This method ensures that the parameters keep the required invariants fn new_checked( slot_overhead_coeficient: f64, @@ -314,11 +314,11 @@ impl OverheadCoeficients { // L1->L2 do not receive any discounts fn new_l1() -> Self { - OverheadCoeficients::new_checked(1.0, 1.0, 1.0) + OverheadCoefficients::new_checked(1.0, 1.0, 1.0) } fn new_l2() -> Self { - OverheadCoeficients::new_checked( + OverheadCoefficients::new_checked( 1.0, 1.0, // For L2 transactions we allow a certain default discount with regard to the number of ergs. // Multiinstance circuits can in theory be spawned infinite times, while projected future limitations @@ -342,7 +342,7 @@ pub fn get_amortized_overhead( total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Using large U256 type to prevent overflows. let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); @@ -379,7 +379,7 @@ pub fn get_amortized_overhead( let tx_slot_overhead = { let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()).as_u32(); - (coeficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 + (coefficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 }; // 2. The overhead for occupying the bootloader memory can be derived from encoded_len @@ -390,7 +390,7 @@ pub fn get_amortized_overhead( ) .as_u32(); - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() as u32 }; @@ -435,7 +435,7 @@ pub fn get_amortized_overhead( let overhead_for_gas = { let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); let denominator: U256 = U256::from( - (MAX_TX_ERGS_LIMIT as f64 / coeficients.ergs_limit_overhead_coeficient) as u64, + (MAX_TX_ERGS_LIMIT as f64 / coefficients.ergs_limit_overhead_coeficient) as u64, ) + overhead_for_block_gas; let overhead_for_gas = (numerator - 1) / denominator; @@ -460,7 +460,7 @@ pub fn get_amortized_overhead( MAX_L2_TX_GAS_LIMIT as u32, gas_per_pubdata_byte_limit, encoded_len.as_usize(), - coeficients, + coefficients, ) } else { overhead @@ -483,7 +483,7 @@ mod tests { total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { total_gas_limit - MAX_TX_ERGS_LIMIT @@ -501,7 +501,7 @@ mod tests { total_gas_limit - suggested_overhead, gas_per_pubdata_byte_limit, encoded_len, - coeficients, + coefficients, ); derived_overhead >= suggested_overhead @@ -530,41 +530,41 @@ mod tests { let test_params = |total_gas_limit: u32, gas_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients| { + coefficients: OverheadCoefficients| { let result_by_efficient_search = - get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coeficients); + get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coefficients); let result_by_binary_search = get_maximal_allowed_overhead_bin_search( total_gas_limit, gas_per_pubdata, encoded_len, - coeficients, + coefficients, ); assert_eq!(result_by_efficient_search, result_by_binary_search); }; // Some arbitrary test - test_params(60_000_000, 800, 2900, OverheadCoeficients::new_l2()); + test_params(60_000_000, 800, 2900, OverheadCoefficients::new_l2()); // Very small parameters - test_params(0, 1, 12, OverheadCoeficients::new_l2()); + test_params(0, 1, 12, OverheadCoefficients::new_l2()); // Relatively big parameters let max_tx_overhead = derive_overhead( MAX_TX_ERGS_LIMIT, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); test_params( MAX_TX_ERGS_LIMIT + max_tx_overhead, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); - test_params(115432560, 800, 2900, OverheadCoeficients::new_l1()); + test_params(115432560, 800, 2900, OverheadCoefficients::new_l1()); } #[test] diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs index d84cd1cc7b6..3e157e74c02 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs @@ -364,7 +364,7 @@ impl VmInstance { } } - /// Removes the latest snapshot without rollbacking to it. + /// Removes the latest snapshot without rolling it back. /// This function expects that there is at least one snapshot present. pub fn pop_snapshot_no_rollback(&mut self) { self.snapshots.pop().unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/snapshot.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/snapshot.rs index 683fc28a69e..8f1cec3cb7f 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/snapshot.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/snapshot.rs @@ -4,9 +4,9 @@ use zksync_types::H256; pub(crate) struct BootloaderStateSnapshot { /// ID of the next transaction to be executed. pub(crate) tx_to_execute: usize, - /// Stored l2 blocks in bootloader memory + /// Stored L2 blocks in bootloader memory pub(crate) l2_blocks_len: usize, - /// Snapshot of the last l2 block. Only this block could be changed during the rollback + /// Snapshot of the last L2 block. Only this block could be changed during the rollback pub(crate) last_l2_block: L2BlockSnapshot, /// The number of 32-byte words spent on the already included compressed bytecodes. pub(crate) compressed_bytecodes_encoding: usize, @@ -20,6 +20,6 @@ pub(crate) struct BootloaderStateSnapshot { pub(crate) struct L2BlockSnapshot { /// The rolling hash of all the transactions in the miniblock pub(crate) txs_rolling_hash: H256, - /// The number of transactions in the last l2 block + /// The number of transactions in the last L2 block pub(crate) txs_len: usize, } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs index 6d322e5877d..dce0ecce3fb 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs @@ -14,7 +14,7 @@ pub(super) struct BootloaderTx { pub(super) refund: u32, /// Gas overhead pub(super) gas_overhead: u32, - /// Gas Limit for this transaction. It can be different from the gaslimit inside the transaction + /// Gas Limit for this transaction. It can be different from the gas limit inside the transaction pub(super) trusted_gas_limit: U256, /// Offset of the tx in bootloader memory pub(super) offset: usize, diff --git a/core/lib/multivm/src/versions/vm_latest/constants.rs b/core/lib/multivm/src/versions/vm_latest/constants.rs index c67156681a0..4d1c7705423 100644 --- a/core/lib/multivm/src/versions/vm_latest/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/constants.rs @@ -60,7 +60,7 @@ pub const OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET: usize = /// One of "worst case" scenarios for the number of state diffs in a batch is when 120kb of pubdata is spent /// on repeated writes, that are all zeroed out. In this case, the number of diffs is 120k / 5 = 24k. This means that they will have /// accommodate 6528000 bytes of calldata for the uncompressed state diffs. Adding 120k on top leaves us with -/// roughly 6650000 bytes needed for calldata. 207813 slots are needed to accomodate this amount of data. +/// roughly 6650000 bytes needed for calldata. 207813 slots are needed to accommodate this amount of data. /// We round up to 208000 slots just in case. /// /// In theory though much more calldata could be used (if for instance 1 byte is used for enum index). It is the responsibility of the @@ -92,10 +92,10 @@ pub const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; pub const BLOCK_OVERHEAD_PUBDATA: u32 = BLOCK_OVERHEAD_L1_GAS / L1_GAS_PER_PUBDATA_BYTE; /// VM Hooks are used for communication between bootloader and tracers. -/// The 'type'/'opcode' is put into VM_HOOK_POSITION slot, +/// The 'type' / 'opcode' is put into VM_HOOK_POSITION slot, /// and VM_HOOKS_PARAMS_COUNT parameters (each 32 bytes) are put in the slots before. /// So the layout looks like this: -/// [param 0][param 1][vmhook opcode] +/// `[param 0][param 1][vmhook opcode]` pub const VM_HOOK_POSITION: u32 = RESULT_SUCCESS_FIRST_SLOT - 1; pub const VM_HOOK_PARAMS_COUNT: u32 = 2; pub const VM_HOOK_PARAMS_START_POSITION: u32 = VM_HOOK_POSITION - VM_HOOK_PARAMS_COUNT; diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index ddbc7aec2f7..92604479a88 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -43,7 +43,7 @@ impl Vm { } } - /// Returns the hashes the bytecodes that have been decommitted by the decomittment processor. + /// Returns the hashes the bytecodes that have been decommitted by the decommitment processor. pub(crate) fn get_used_contracts(&self) -> Vec { self.state .decommittment_processor diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index fe5416cd120..c679532fa76 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -70,7 +70,7 @@ impl DecommitterOracle { } } - /// Adds additional bytecodes. They will take precendent over the bytecodes from storage. + /// Adds additional bytecodes. They will take precedent over the bytecodes from storage. pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { for (hash, bytecode) in bytecodes { self.known_bytecodes.insert(hash, bytecode, timestamp); @@ -180,7 +180,7 @@ impl DecommittmentProcess > { self.decommitment_requests.push((), partial_query.timestamp); // First - check if we didn't fetch this bytecode in the past. - // If we did - we can just return the page that we used before (as the memory is read only). + // If we did - we can just return the page that we used before (as the memory is readonly). if let Some(memory_page) = self .decommitted_code_hashes .inner() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs index ec9ffe785f9..4767f934479 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs @@ -34,7 +34,7 @@ impl PartialEq for ModifiedKeysMap { #[derive(Clone, PartialEq, Debug)] pub(crate) struct DecommitterTestInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub(crate) modified_storage_keys: ModifiedKeysMap, pub(crate) known_bytecodes: HistoryRecorder>, H>, @@ -43,7 +43,7 @@ pub(crate) struct DecommitterTestInnerState { #[derive(Clone, PartialEq, Debug)] pub(crate) struct StorageOracleInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub(crate) modified_storage_keys: ModifiedKeysMap, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index 5b34eee4742..c91d2f3ce0c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -109,7 +109,7 @@ pub(crate) fn get_debug_log( } /// Reads the memory slice represented by the fat pointer. -/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +/// Note, that the fat pointer must point to the accessible memory (i.e. not cleared up yet). pub(crate) fn read_pointer( memory: &SimpleMemory, pointer: FatPointer, diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index f81741d2a43..3c7b9bcac03 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -11,7 +11,7 @@ use zksync_types::{ use zksync_utils::address_to_h256; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_latest::utils::overhead::{get_amortized_overhead, OverheadCoeficients}; +use crate::vm_latest::utils::overhead::{get_amortized_overhead, OverheadCoefficients}; /// This structure represents the data that is used by /// the Bootloader to describe the transaction. @@ -212,12 +212,12 @@ impl TransactionData { self.reserved_dynamic.len() as u64, ); - let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + let coefficients = OverheadCoefficients::from_tx_type(self.tx_type); get_amortized_overhead( total_gas_limit, gas_price_per_pubdata, encoded_len, - coeficients, + coefficients, ) } diff --git a/core/lib/multivm/src/versions/vm_latest/utils/fee.rs b/core/lib/multivm/src/versions/vm_latest/utils/fee.rs index bbf09a75f3f..23b744a348f 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/fee.rs @@ -4,7 +4,7 @@ use zksync_utils::ceil_div; use crate::vm_latest::old_vm::utils::eth_price_per_pubdata_byte; -/// Calcluates the amount of gas required to publish one byte of pubdata +/// Calculates the amount of gas required to publish one byte of pubdata pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); diff --git a/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs b/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs index 2541c7d7037..a4012e540ed 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs @@ -12,7 +12,7 @@ pub fn derive_overhead( gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Even if the gas limit is greater than the MAX_TX_ERGS_LIMIT, we assume that everything beyond MAX_TX_ERGS_LIMIT // will be spent entirely on publishing bytecodes and so we derive the overhead solely based on the capped value @@ -49,31 +49,31 @@ pub fn derive_overhead( // ); vec![ - (coeficients.ergs_limit_overhead_coeficient + (coefficients.ergs_limit_overhead_coeficient * overhead_for_single_instance_circuits.as_u32() as f64) .floor() as u32, - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) .floor() as u32, - (coeficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, + (coefficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, ] .into_iter() .max() .unwrap() } -/// Contains the coeficients with which the overhead for transactions will be calculated. -/// All of the coeficients should be <= 1. There are here to provide a certain "discount" for normal transactions +/// Contains the coefficients with which the overhead for transactions will be calculated. +/// All of the coefficients should be <= 1. There are here to provide a certain "discount" for normal transactions /// at the risk of malicious transactions that may close the block prematurely. -/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coeficients.ergs_limit_overhead_coeficient` MUST +/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coefficients.ergs_limit_overhead_coeficient` MUST /// result in an integer number #[derive(Debug, Clone, Copy)] -pub struct OverheadCoeficients { +pub struct OverheadCoefficients { slot_overhead_coeficient: f64, bootloader_memory_overhead_coeficient: f64, ergs_limit_overhead_coeficient: f64, } -impl OverheadCoeficients { +impl OverheadCoefficients { // This method ensures that the parameters keep the required invariants fn new_checked( slot_overhead_coeficient: f64, @@ -95,11 +95,11 @@ impl OverheadCoeficients { // L1->L2 do not receive any discounts fn new_l1() -> Self { - OverheadCoeficients::new_checked(1.0, 1.0, 1.0) + OverheadCoefficients::new_checked(1.0, 1.0, 1.0) } fn new_l2() -> Self { - OverheadCoeficients::new_checked( + OverheadCoefficients::new_checked( 1.0, 1.0, // For L2 transactions we allow a certain default discount with regard to the number of ergs. // Multiinstance circuits can in theory be spawned infinite times, while projected future limitations @@ -109,7 +109,7 @@ impl OverheadCoeficients { ) } - /// Return the coeficients for the given transaction type + /// Return the coefficients for the given transaction type pub fn from_tx_type(tx_type: u8) -> Self { if is_l1_tx_type(tx_type) { Self::new_l1() @@ -124,7 +124,7 @@ pub(crate) fn get_amortized_overhead( total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Using large U256 type to prevent overflows. let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); @@ -161,7 +161,7 @@ pub(crate) fn get_amortized_overhead( let tx_slot_overhead = { let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()).as_u32(); - (coeficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 + (coefficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 }; // 2. The overhead for occupying the bootloader memory can be derived from encoded_len @@ -172,7 +172,7 @@ pub(crate) fn get_amortized_overhead( ) .as_u32(); - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() as u32 }; @@ -217,7 +217,7 @@ pub(crate) fn get_amortized_overhead( let overhead_for_gas = { let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); let denominator: U256 = U256::from( - (MAX_TX_ERGS_LIMIT as f64 / coeficients.ergs_limit_overhead_coeficient) as u64, + (MAX_TX_ERGS_LIMIT as f64 / coefficients.ergs_limit_overhead_coeficient) as u64, ) + overhead_for_block_gas; let overhead_for_gas = (numerator - 1) / denominator; @@ -242,7 +242,7 @@ pub(crate) fn get_amortized_overhead( MAX_L2_TX_GAS_LIMIT as u32, gas_per_pubdata_byte_limit, encoded_len.as_usize(), - coeficients, + coefficients, ) } else { overhead @@ -263,7 +263,7 @@ mod tests { total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { total_gas_limit - MAX_TX_ERGS_LIMIT @@ -281,7 +281,7 @@ mod tests { total_gas_limit - suggested_overhead, gas_per_pubdata_byte_limit, encoded_len, - coeficients, + coefficients, ); derived_overhead >= suggested_overhead @@ -310,40 +310,40 @@ mod tests { let test_params = |total_gas_limit: u32, gas_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients| { + coefficients: OverheadCoefficients| { let result_by_efficient_search = - get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coeficients); + get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coefficients); let result_by_binary_search = get_maximal_allowed_overhead_bin_search( total_gas_limit, gas_per_pubdata, encoded_len, - coeficients, + coefficients, ); assert_eq!(result_by_efficient_search, result_by_binary_search); }; // Some arbitrary test - test_params(60_000_000, 800, 2900, OverheadCoeficients::new_l2()); + test_params(60_000_000, 800, 2900, OverheadCoefficients::new_l2()); // Very small parameters - test_params(0, 1, 12, OverheadCoeficients::new_l2()); + test_params(0, 1, 12, OverheadCoefficients::new_l2()); // Relatively big parameters let max_tx_overhead = derive_overhead( MAX_TX_ERGS_LIMIT, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); test_params( MAX_TX_ERGS_LIMIT + max_tx_overhead, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); - test_params(115432560, 800, 2900, OverheadCoeficients::new_l1()); + test_params(115432560, 800, 2900, OverheadCoefficients::new_l1()); } } diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index e63b6438dc9..20d74e39093 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -138,7 +138,7 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipullations +/// Methods of vm, which required some history manipulations impl VmInterfaceHistoryEnabled for Vm { /// Create snapshot of current vm state and push it into the memory fn make_snapshot(&mut self) { diff --git a/core/lib/multivm/src/versions/vm_m5/bootloader_state.rs b/core/lib/multivm/src/versions/vm_m5/bootloader_state.rs index 518d999b6ea..4bb51c7a839 100644 --- a/core/lib/multivm/src/versions/vm_m5/bootloader_state.rs +++ b/core/lib/multivm/src/versions/vm_m5/bootloader_state.rs @@ -5,7 +5,7 @@ use crate::vm_m5::vm_with_bootloader::TX_DESCRIPTION_OFFSET; /// Required to process transactions one by one (since we intercept the VM execution to execute /// transactions and add new ones to the memory on the fly). /// Think about it like a two-pointer scheme: one pointer (`free_tx_index`) tracks the end of the -/// initialized memory; while another (`tx_to_execute`) tracks our progess in this initialized memory. +/// initialized memory; while another (`tx_to_execute`) tracks our progress in this initialized memory. /// This is required since it's possible to push several transactions to the bootloader memory and then /// execute it one by one. /// diff --git a/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs index 1997336c3a4..5d1a075f6a5 100644 --- a/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs @@ -15,7 +15,7 @@ pub enum VmRevertReasonParsingError { IncorrectStringLength(Vec), } -/// Rich Revert Reasons https://github.com/0xProject/ZEIPs/issues/32 +/// Rich Revert Reasons `https://github.com/0xProject/ZEIPs/issues/32` #[derive(Debug, Clone, PartialEq)] pub enum VmRevertReason { General { diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs index d8a70bdaf64..96ba04e85aa 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs @@ -651,7 +651,7 @@ impl OneTxTracer { } } -/// Tells the VM to end the execution before `ret` from the booloader if there is no panic or revert. +/// Tells the VM to end the execution before `ret` from the bootloader if there is no panic or revert. /// Also, saves the information if this `ret` was caused by "out of gas" panic. #[derive(Debug, Clone, Default)] pub struct BootloaderTracer { @@ -816,7 +816,7 @@ fn get_debug_log(state: &VmLocalStateData<'_>, memory: &SimpleMemory) -> String } /// Reads the memory slice represented by the fat pointer. -/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +/// Note, that the fat pointer must point to the accessible memory (i.e. not cleared up yet). pub(crate) fn read_pointer(memory: &SimpleMemory, pointer: FatPointer) -> Vec { let FatPointer { offset, diff --git a/core/lib/multivm/src/versions/vm_m5/test_utils.rs b/core/lib/multivm/src/versions/vm_m5/test_utils.rs index 590579be6d8..36c1d60dfda 100644 --- a/core/lib/multivm/src/versions/vm_m5/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/test_utils.rs @@ -58,7 +58,7 @@ impl PartialEq for ModifiedKeysMap { #[derive(Clone, PartialEq, Debug)] pub struct DecommitterTestInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, pub known_bytecodes: HistoryRecorder>>, @@ -67,7 +67,7 @@ pub struct DecommitterTestInnerState { #[derive(Clone, PartialEq, Debug)] pub struct StorageOracleInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index 1df40c8a0b8..e92305003c7 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -108,7 +108,7 @@ pub struct VmExecutionResult { /// available to VM before and after execution. /// /// It means, that depending on the context, `gas_used` may represent different things. - /// If VM is continously invoked and interrupted after each tx, this field may represent the + /// If VM is continuously invoked and interrupted after each tx, this field may represent the /// amount of gas spent by a single transaction. /// /// To understand, which value does `gas_used` represent, see the documentation for the method diff --git a/core/lib/multivm/src/versions/vm_m6/bootloader_state.rs b/core/lib/multivm/src/versions/vm_m6/bootloader_state.rs index 5dce7e1c6a9..1328d0dd701 100644 --- a/core/lib/multivm/src/versions/vm_m6/bootloader_state.rs +++ b/core/lib/multivm/src/versions/vm_m6/bootloader_state.rs @@ -5,7 +5,7 @@ use crate::vm_m6::vm_with_bootloader::TX_DESCRIPTION_OFFSET; /// Required to process transactions one by one (since we intercept the VM execution to execute /// transactions and add new ones to the memory on the fly). /// Think about it like a two-pointer scheme: one pointer (`free_tx_index`) tracks the end of the -/// initialized memory; while another (`tx_to_execute`) tracks our progess in this initialized memory. +/// initialized memory; while another (`tx_to_execute`) tracks our progress in this initialized memory. /// This is required since it's possible to push several transactions to the bootloader memory and then /// execute it one by one. /// diff --git a/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs index d954f077953..9025ee9f378 100644 --- a/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs @@ -15,7 +15,7 @@ pub enum VmRevertReasonParsingError { IncorrectStringLength(Vec), } -/// Rich Revert Reasons https://github.com/0xProject/ZEIPs/issues/32 +/// Rich Revert Reasons `https://github.com/0xProject/ZEIPs/issues/32` #[derive(Debug, Clone, PartialEq)] pub enum VmRevertReason { General { diff --git a/core/lib/multivm/src/versions/vm_m6/oracle_tools.rs b/core/lib/multivm/src/versions/vm_m6/oracle_tools.rs index 6650752da27..4acc2fe68e5 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracle_tools.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracle_tools.rs @@ -13,7 +13,7 @@ use zk_evm_1_3_1::witness_trace::DummyTracer; /// zkEVM requires a bunch of objects implementing given traits to work. /// For example: Storage, Memory, PrecompilerProcessor etc -/// (you can find all these traites in zk_evm crate -> src/abstractions/mod.rs) +/// (you can find all these traits in zk_evm crate -> src/abstractions/mod.rs) /// For each of these traits, we have a local implementation (for example StorageOracle) /// that also support additional features (like rollbacks & history). /// The OracleTools struct, holds all these things together in one place. diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs index 3917063422a..48948827c3d 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs @@ -66,7 +66,7 @@ impl DecommitterOracle { } } - /// Adds additional bytecodes. They will take precendent over the bytecodes from storage. + /// Adds additional bytecodes. They will take precedent over the bytecodes from storage. pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { for (hash, bytecode) in bytecodes { self.known_bytecodes.insert(hash, bytecode, timestamp); @@ -170,7 +170,7 @@ impl DecommittmentProcessor ) -> (DecommittmentQuery, Option>) { self.decommitment_requests.push((), partial_query.timestamp); // First - check if we didn't fetch this bytecode in the past. - // If we did - we can just return the page that we used before (as the memory is read only). + // If we did - we can just return the page that we used before (as the memory is readonly). if let Some(memory_page) = self .decommitted_code_hashes .inner() diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/bootloader.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/bootloader.rs index fc2a62374db..81902f330a5 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/bootloader.rs @@ -16,7 +16,7 @@ use zk_evm_1_3_1::{ zkevm_opcode_defs::{Opcode, RetOpcode}, }; -/// Tells the VM to end the execution before `ret` from the booloader if there is no panic or revert. +/// Tells the VM to end the execution before `ret` from the bootloader if there is no panic or revert. /// Also, saves the information if this `ret` was caused by "out of gas" panic. #[derive(Debug, Clone, Default)] pub struct BootloaderTracer { diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs index 4b61c9fcc15..f2ddd2762ad 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs @@ -95,7 +95,7 @@ impl Tracer for CallTracer { } impl CallTracer { - /// We use parent gas for propery calculation of gas used in the trace. + /// We use parent gas for property calculation of gas used in the trace. /// This method updates parent gas for the current call. fn update_parent_gas(&mut self, state: &VmLocalStateData<'_>, current_call: &mut Call) { let current = state.vm_local_state.callstack.current; diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs index e2f1652e9b7..87aa81d69db 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs @@ -99,7 +99,7 @@ pub(crate) fn get_debug_log( } /// Reads the memory slice represented by the fat pointer. -/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +/// Note, that the fat pointer must point to the accessible memory (i.e. not cleared up yet). pub(crate) fn read_pointer( memory: &SimpleMemory, pointer: FatPointer, diff --git a/core/lib/multivm/src/versions/vm_m6/test_utils.rs b/core/lib/multivm/src/versions/vm_m6/test_utils.rs index 8b022c008a7..6cce779362d 100644 --- a/core/lib/multivm/src/versions/vm_m6/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/test_utils.rs @@ -58,7 +58,7 @@ impl PartialEq for ModifiedKeysMap { #[derive(Clone, PartialEq, Debug)] pub struct DecommitterTestInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, pub known_bytecodes: HistoryRecorder>, H>, @@ -67,7 +67,7 @@ pub struct DecommitterTestInnerState { #[derive(Clone, PartialEq, Debug)] pub struct StorageOracleInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub modified_storage_keys: ModifiedKeysMap, diff --git a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs index f41afee3a40..bdecb9bf454 100644 --- a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs @@ -213,12 +213,12 @@ impl TransactionData { self.reserved_dynamic.len() as u64, ); - let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + let coefficients = OverheadCoefficients::from_tx_type(self.tx_type); get_amortized_overhead( total_gas_limit, gas_price_per_pubdata, encoded_len, - coeficients, + coefficients, ) } @@ -232,7 +232,7 @@ pub fn derive_overhead( gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Even if the gas limit is greater than the MAX_TX_ERGS_LIMIT, we assume that everything beyond MAX_TX_ERGS_LIMIT // will be spent entirely on publishing bytecodes and so we derive the overhead solely based on the capped value @@ -269,31 +269,31 @@ pub fn derive_overhead( // ); vec![ - (coeficients.ergs_limit_overhead_coeficient + (coefficients.ergs_limit_overhead_coeficient * overhead_for_single_instance_circuits.as_u32() as f64) .floor() as u32, - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) .floor() as u32, - (coeficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, + (coefficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, ] .into_iter() .max() .unwrap() } -/// Contains the coeficients with which the overhead for transactions will be calculated. -/// All of the coeficients should be <= 1. There are here to provide a certain "discount" for normal transactions +/// Contains the coefficients with which the overhead for transactions will be calculated. +/// All of the coefficients should be <= 1. There are here to provide a certain "discount" for normal transactions /// at the risk of malicious transactions that may close the block prematurely. -/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coeficients.ergs_limit_overhead_coeficient` MUST +/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coefficients.ergs_limit_overhead_coeficient` MUST /// result in an integer number #[derive(Debug, Clone, Copy)] -pub struct OverheadCoeficients { +pub struct OverheadCoefficients { slot_overhead_coeficient: f64, bootloader_memory_overhead_coeficient: f64, ergs_limit_overhead_coeficient: f64, } -impl OverheadCoeficients { +impl OverheadCoefficients { // This method ensures that the parameters keep the required invariants fn new_checked( slot_overhead_coeficient: f64, @@ -315,11 +315,11 @@ impl OverheadCoeficients { // L1->L2 do not receive any discounts fn new_l1() -> Self { - OverheadCoeficients::new_checked(1.0, 1.0, 1.0) + OverheadCoefficients::new_checked(1.0, 1.0, 1.0) } fn new_l2() -> Self { - OverheadCoeficients::new_checked( + OverheadCoefficients::new_checked( 1.0, 1.0, // For L2 transactions we allow a certain default discount with regard to the number of ergs. // Multiinstance circuits can in theory be spawned infinite times, while projected future limitations @@ -343,7 +343,7 @@ pub fn get_amortized_overhead( total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Using large U256 type to prevent overflows. let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); @@ -380,7 +380,7 @@ pub fn get_amortized_overhead( let tx_slot_overhead = { let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()).as_u32(); - (coeficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 + (coefficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 }; // 2. The overhead for occupying the bootloader memory can be derived from encoded_len @@ -391,7 +391,7 @@ pub fn get_amortized_overhead( ) .as_u32(); - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() as u32 }; @@ -436,7 +436,7 @@ pub fn get_amortized_overhead( let overhead_for_gas = { let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); let denominator: U256 = U256::from( - (MAX_TX_ERGS_LIMIT as f64 / coeficients.ergs_limit_overhead_coeficient) as u64, + (MAX_TX_ERGS_LIMIT as f64 / coefficients.ergs_limit_overhead_coeficient) as u64, ) + overhead_for_block_gas; let overhead_for_gas = (numerator - 1) / denominator; @@ -461,7 +461,7 @@ pub fn get_amortized_overhead( MAX_L2_TX_GAS_LIMIT as u32, gas_per_pubdata_byte_limit, encoded_len.as_usize(), - coeficients, + coefficients, ) } else { overhead @@ -484,7 +484,7 @@ mod tests { total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { total_gas_limit - MAX_TX_ERGS_LIMIT @@ -502,7 +502,7 @@ mod tests { total_gas_limit - suggested_overhead, gas_per_pubdata_byte_limit, encoded_len, - coeficients, + coefficients, ); derived_overhead >= suggested_overhead @@ -531,41 +531,41 @@ mod tests { let test_params = |total_gas_limit: u32, gas_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients| { + coefficients: OverheadCoefficients| { let result_by_efficient_search = - get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coeficients); + get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coefficients); let result_by_binary_search = get_maximal_allowed_overhead_bin_search( total_gas_limit, gas_per_pubdata, encoded_len, - coeficients, + coefficients, ); assert_eq!(result_by_efficient_search, result_by_binary_search); }; // Some arbitrary test - test_params(60_000_000, 800, 2900, OverheadCoeficients::new_l2()); + test_params(60_000_000, 800, 2900, OverheadCoefficients::new_l2()); // Very small parameters - test_params(0, 1, 12, OverheadCoeficients::new_l2()); + test_params(0, 1, 12, OverheadCoefficients::new_l2()); // Relatively big parameters let max_tx_overhead = derive_overhead( MAX_TX_ERGS_LIMIT, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); test_params( MAX_TX_ERGS_LIMIT + max_tx_overhead, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); - test_params(115432560, 800, 2900, OverheadCoeficients::new_l1()); + test_params(115432560, 800, 2900, OverheadCoefficients::new_l1()); } #[test] diff --git a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs index cfb5bac806d..468dd3fc72d 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs @@ -383,7 +383,7 @@ impl VmInstance { } } - /// Removes the latest snapshot without rollbacking to it. + /// Removes the latest snapshot without rolling back to it. /// This function expects that there is at least one snapshot present. pub fn pop_snapshot_no_rollback(&mut self) { self.snapshots.pop().unwrap(); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/snapshot.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/snapshot.rs index e417a3b9ee6..2c599092869 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/snapshot.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/snapshot.rs @@ -4,9 +4,9 @@ use zksync_types::H256; pub(crate) struct BootloaderStateSnapshot { /// ID of the next transaction to be executed. pub(crate) tx_to_execute: usize, - /// Stored l2 blocks in bootloader memory + /// Stored L2 blocks in bootloader memory pub(crate) l2_blocks_len: usize, - /// Snapshot of the last l2 block. Only this block could be changed during the rollback + /// Snapshot of the last L2 block. Only this block could be changed during the rollback pub(crate) last_l2_block: L2BlockSnapshot, /// The number of 32-byte words spent on the already included compressed bytecodes. pub(crate) compressed_bytecodes_encoding: usize, @@ -18,6 +18,6 @@ pub(crate) struct BootloaderStateSnapshot { pub(crate) struct L2BlockSnapshot { /// The rolling hash of all the transactions in the miniblock pub(crate) txs_rolling_hash: H256, - /// The number of transactions in the last l2 block + /// The number of transactions in the last L2 block pub(crate) txs_len: usize, } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs index c1551dcf6cd..3bd10e9374b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs @@ -14,7 +14,7 @@ pub(super) struct BootloaderTx { pub(super) refund: u32, /// Gas overhead pub(super) gas_overhead: u32, - /// Gas Limit for this transaction. It can be different from the gaslimit inside the transaction + /// Gas Limit for this transaction. It can be different from the gas limit inside the transaction pub(super) trusted_gas_limit: U256, /// Offset of the tx in bootloader memory pub(super) offset: usize, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/constants.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/constants.rs index ef3b09299fd..0dca7a6ce26 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/constants.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/constants.rs @@ -75,10 +75,10 @@ pub const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; pub const BLOCK_OVERHEAD_PUBDATA: u32 = BLOCK_OVERHEAD_L1_GAS / L1_GAS_PER_PUBDATA_BYTE; /// VM Hooks are used for communication between bootloader and tracers. -/// The 'type'/'opcode' is put into VM_HOOK_POSITION slot, +/// The 'type' / 'opcode' is put into VM_HOOK_POSITION slot, /// and VM_HOOKS_PARAMS_COUNT parameters (each 32 bytes) are put in the slots before. /// So the layout looks like this: -/// [param 0][param 1][vmhook opcode] +/// `[param 0][param 1][vmhook opcode]` pub const VM_HOOK_POSITION: u32 = RESULT_SUCCESS_FIRST_SLOT - 1; pub const VM_HOOK_PARAMS_COUNT: u32 = 2; pub const VM_HOOK_PARAMS_START_POSITION: u32 = VM_HOOK_POSITION - VM_HOOK_PARAMS_COUNT; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index 48bbd64ecf2..a49ce2a6746 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -43,7 +43,7 @@ impl Vm { } } - /// Returns the hashes the bytecodes that have been decommitted by the decomittment processor. + /// Returns the hashes the bytecodes that have been decommitted by the decommitment processor. pub(crate) fn get_used_contracts(&self) -> Vec { self.state .decommittment_processor diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs index 0f335cabf39..a39be0ba93b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs @@ -70,7 +70,7 @@ impl DecommitterOracle { } } - /// Adds additional bytecodes. They will take precendent over the bytecodes from storage. + /// Adds additional bytecodes. They will take precedent over the bytecodes from storage. pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { for (hash, bytecode) in bytecodes { self.known_bytecodes.insert(hash, bytecode, timestamp); @@ -180,7 +180,7 @@ impl DecommittmentProcess > { self.decommitment_requests.push((), partial_query.timestamp); // First - check if we didn't fetch this bytecode in the past. - // If we did - we can just return the page that we used before (as the memory is read only). + // If we did - we can just return the page that we used before (as the memory is readonly). if let Some(memory_page) = self .decommitted_code_hashes .inner() diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs index c4c6ec05bd7..5af50ee0d91 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs @@ -35,7 +35,7 @@ impl PartialEq for ModifiedKeysMap { #[derive(Clone, PartialEq, Debug)] pub(crate) struct DecommitterTestInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub(crate) modified_storage_keys: ModifiedKeysMap, pub(crate) known_bytecodes: HistoryRecorder>, H>, @@ -44,7 +44,7 @@ pub(crate) struct DecommitterTestInnerState { #[derive(Clone, PartialEq, Debug)] pub(crate) struct StorageOracleInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub(crate) modified_storage_keys: ModifiedKeysMap, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs index 654c7300e4a..a9170c5a442 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs @@ -109,7 +109,7 @@ pub(crate) fn get_debug_log( } /// Reads the memory slice represented by the fat pointer. -/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +/// Note, that the fat pointer must point to the accessible memory (i.e. not cleared up yet). pub(crate) fn read_pointer( memory: &SimpleMemory, pointer: FatPointer, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs index 1b589146a29..1ad2ce0f977 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs @@ -11,7 +11,9 @@ use zksync_types::{ use zksync_utils::address_to_h256; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_refunds_enhancement::utils::overhead::{get_amortized_overhead, OverheadCoeficients}; +use crate::vm_refunds_enhancement::utils::overhead::{ + get_amortized_overhead, OverheadCoefficients, +}; /// This structure represents the data that is used by /// the Bootloader to describe the transaction. @@ -212,12 +214,12 @@ impl TransactionData { self.reserved_dynamic.len() as u64, ); - let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + let coefficients = OverheadCoefficients::from_tx_type(self.tx_type); get_amortized_overhead( total_gas_limit, gas_price_per_pubdata, encoded_len, - coeficients, + coefficients, ) } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/fee.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/fee.rs index 02ea1c4a561..cc6081d7a22 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/fee.rs @@ -4,7 +4,7 @@ use zksync_utils::ceil_div; use crate::vm_refunds_enhancement::old_vm::utils::eth_price_per_pubdata_byte; -/// Calcluates the amount of gas required to publish one byte of pubdata +/// Calculates the amount of gas required to publish one byte of pubdata pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs index 671ff0e0572..cce2f2914e3 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs @@ -12,7 +12,7 @@ pub fn derive_overhead( gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Even if the gas limit is greater than the MAX_TX_ERGS_LIMIT, we assume that everything beyond MAX_TX_ERGS_LIMIT // will be spent entirely on publishing bytecodes and so we derive the overhead solely based on the capped value @@ -49,31 +49,31 @@ pub fn derive_overhead( // ); vec![ - (coeficients.ergs_limit_overhead_coeficient + (coefficients.ergs_limit_overhead_coeficient * overhead_for_single_instance_circuits.as_u32() as f64) .floor() as u32, - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) .floor() as u32, - (coeficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, + (coefficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, ] .into_iter() .max() .unwrap() } -/// Contains the coeficients with which the overhead for transactions will be calculated. -/// All of the coeficients should be <= 1. There are here to provide a certain "discount" for normal transactions +/// Contains the coefficients with which the overhead for transactions will be calculated. +/// All of the coefficients should be <= 1. There are here to provide a certain "discount" for normal transactions /// at the risk of malicious transactions that may close the block prematurely. -/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coeficients.ergs_limit_overhead_coeficient` MUST +/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coefficients.ergs_limit_overhead_coeficient` MUST /// result in an integer number #[derive(Debug, Clone, Copy)] -pub struct OverheadCoeficients { +pub struct OverheadCoefficients { slot_overhead_coeficient: f64, bootloader_memory_overhead_coeficient: f64, ergs_limit_overhead_coeficient: f64, } -impl OverheadCoeficients { +impl OverheadCoefficients { // This method ensures that the parameters keep the required invariants fn new_checked( slot_overhead_coeficient: f64, @@ -95,11 +95,11 @@ impl OverheadCoeficients { // L1->L2 do not receive any discounts fn new_l1() -> Self { - OverheadCoeficients::new_checked(1.0, 1.0, 1.0) + OverheadCoefficients::new_checked(1.0, 1.0, 1.0) } fn new_l2() -> Self { - OverheadCoeficients::new_checked( + OverheadCoefficients::new_checked( 1.0, 1.0, // For L2 transactions we allow a certain default discount with regard to the number of ergs. // Multiinstance circuits can in theory be spawned infinite times, while projected future limitations @@ -109,7 +109,7 @@ impl OverheadCoeficients { ) } - /// Return the coeficients for the given transaction type + /// Return the coefficients for the given transaction type pub fn from_tx_type(tx_type: u8) -> Self { if is_l1_tx_type(tx_type) { Self::new_l1() @@ -124,7 +124,7 @@ pub(crate) fn get_amortized_overhead( total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Using large U256 type to prevent overflows. let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); @@ -161,7 +161,7 @@ pub(crate) fn get_amortized_overhead( let tx_slot_overhead = { let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()).as_u32(); - (coeficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 + (coefficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 }; // 2. The overhead for occupying the bootloader memory can be derived from encoded_len @@ -172,7 +172,7 @@ pub(crate) fn get_amortized_overhead( ) .as_u32(); - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() as u32 }; @@ -217,7 +217,7 @@ pub(crate) fn get_amortized_overhead( let overhead_for_gas = { let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); let denominator: U256 = U256::from( - (MAX_TX_ERGS_LIMIT as f64 / coeficients.ergs_limit_overhead_coeficient) as u64, + (MAX_TX_ERGS_LIMIT as f64 / coefficients.ergs_limit_overhead_coeficient) as u64, ) + overhead_for_block_gas; let overhead_for_gas = (numerator - 1) / denominator; @@ -242,7 +242,7 @@ pub(crate) fn get_amortized_overhead( MAX_L2_TX_GAS_LIMIT as u32, gas_per_pubdata_byte_limit, encoded_len.as_usize(), - coeficients, + coefficients, ) } else { overhead @@ -263,7 +263,7 @@ mod tests { total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { total_gas_limit - MAX_TX_ERGS_LIMIT @@ -281,7 +281,7 @@ mod tests { total_gas_limit - suggested_overhead, gas_per_pubdata_byte_limit, encoded_len, - coeficients, + coefficients, ); derived_overhead >= suggested_overhead @@ -310,40 +310,40 @@ mod tests { let test_params = |total_gas_limit: u32, gas_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients| { + coefficients: OverheadCoefficients| { let result_by_efficient_search = - get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coeficients); + get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coefficients); let result_by_binary_search = get_maximal_allowed_overhead_bin_search( total_gas_limit, gas_per_pubdata, encoded_len, - coeficients, + coefficients, ); assert_eq!(result_by_efficient_search, result_by_binary_search); }; // Some arbitrary test - test_params(60_000_000, 800, 2900, OverheadCoeficients::new_l2()); + test_params(60_000_000, 800, 2900, OverheadCoefficients::new_l2()); // Very small parameters - test_params(0, 1, 12, OverheadCoeficients::new_l2()); + test_params(0, 1, 12, OverheadCoefficients::new_l2()); // Relatively big parameters let max_tx_overhead = derive_overhead( MAX_TX_ERGS_LIMIT, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); test_params( MAX_TX_ERGS_LIMIT + max_tx_overhead, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); - test_params(115432560, 800, 2900, OverheadCoeficients::new_l1()); + test_params(115432560, 800, 2900, OverheadCoefficients::new_l1()); } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 4056d709a9b..11eea1206a8 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -131,7 +131,7 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipullations +/// Methods of vm, which required some history manipulations impl VmInterfaceHistoryEnabled for Vm { /// Create snapshot of current vm state and push it into the memory fn make_snapshot(&mut self) { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/snapshot.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/snapshot.rs index e417a3b9ee6..2c599092869 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/snapshot.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/snapshot.rs @@ -4,9 +4,9 @@ use zksync_types::H256; pub(crate) struct BootloaderStateSnapshot { /// ID of the next transaction to be executed. pub(crate) tx_to_execute: usize, - /// Stored l2 blocks in bootloader memory + /// Stored L2 blocks in bootloader memory pub(crate) l2_blocks_len: usize, - /// Snapshot of the last l2 block. Only this block could be changed during the rollback + /// Snapshot of the last L2 block. Only this block could be changed during the rollback pub(crate) last_l2_block: L2BlockSnapshot, /// The number of 32-byte words spent on the already included compressed bytecodes. pub(crate) compressed_bytecodes_encoding: usize, @@ -18,6 +18,6 @@ pub(crate) struct BootloaderStateSnapshot { pub(crate) struct L2BlockSnapshot { /// The rolling hash of all the transactions in the miniblock pub(crate) txs_rolling_hash: H256, - /// The number of transactions in the last l2 block + /// The number of transactions in the last L2 block pub(crate) txs_len: usize, } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs index 73825312b5e..3b53c918fda 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs @@ -14,7 +14,7 @@ pub(super) struct BootloaderTx { pub(super) refund: u32, /// Gas overhead pub(super) gas_overhead: u32, - /// Gas Limit for this transaction. It can be different from the gaslimit inside the transaction + /// Gas Limit for this transaction. It can be different from the gas limit inside the transaction pub(super) trusted_gas_limit: U256, /// Offset of the tx in bootloader memory pub(super) offset: usize, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/constants.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/constants.rs index ed462581cb7..5535be90381 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/constants.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/constants.rs @@ -75,10 +75,10 @@ pub(crate) const BLOCK_OVERHEAD_L1_GAS: u32 = 1000000; pub const BLOCK_OVERHEAD_PUBDATA: u32 = BLOCK_OVERHEAD_L1_GAS / L1_GAS_PER_PUBDATA_BYTE; /// VM Hooks are used for communication between bootloader and tracers. -/// The 'type'/'opcode' is put into VM_HOOK_POSITION slot, +/// The 'type' / 'opcode' is put into VM_HOOK_POSITION slot, /// and VM_HOOKS_PARAMS_COUNT parameters (each 32 bytes) are put in the slots before. /// So the layout looks like this: -/// [param 0][param 1][vmhook opcode] +/// `[param 0][param 1][vmhook opcode]` pub const VM_HOOK_POSITION: u32 = RESULT_SUCCESS_FIRST_SLOT - 1; pub const VM_HOOK_PARAMS_COUNT: u32 = 2; pub const VM_HOOK_PARAMS_START_POSITION: u32 = VM_HOOK_POSITION - VM_HOOK_PARAMS_COUNT; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index 14570f15453..dd4a5ad55b2 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -43,7 +43,7 @@ impl Vm { } } - /// Returns the hashes the bytecodes that have been decommitted by the decomittment processor. + /// Returns the hashes the bytecodes that have been decommitted by the decommitment processor. pub(crate) fn get_used_contracts(&self) -> Vec { self.state .decommittment_processor diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs index 050b244736f..12c3ffd403d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs @@ -70,7 +70,7 @@ impl DecommitterOracle { } } - /// Adds additional bytecodes. They will take precendent over the bytecodes from storage. + /// Adds additional bytecodes. They will take precedent over the bytecodes from storage. pub fn populate(&mut self, bytecodes: Vec<(U256, Vec)>, timestamp: Timestamp) { for (hash, bytecode) in bytecodes { self.known_bytecodes.insert(hash, bytecode, timestamp); @@ -180,7 +180,7 @@ impl DecommittmentProcess > { self.decommitment_requests.push((), partial_query.timestamp); // First - check if we didn't fetch this bytecode in the past. - // If we did - we can just return the page that we used before (as the memory is read only). + // If we did - we can just return the page that we used before (as the memory is readonly). if let Some(memory_page) = self .decommitted_code_hashes .inner() diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs index 8105ca244d3..83ad0b9044b 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs @@ -36,7 +36,7 @@ impl PartialEq for ModifiedKeysMap { #[derive(Clone, PartialEq, Debug)] pub(crate) struct DecommitterTestInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub(crate) modified_storage_keys: ModifiedKeysMap, pub(crate) known_bytecodes: HistoryRecorder>, H>, @@ -45,7 +45,7 @@ pub(crate) struct DecommitterTestInnerState { #[derive(Clone, PartialEq, Debug)] pub(crate) struct StorageOracleInnerState { - /// There is no way to "trully" compare the storage pointer, + /// There is no way to "truly" compare the storage pointer, /// so we just compare the modified keys. This is reasonable enough. pub(crate) modified_storage_keys: ModifiedKeysMap, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs index abf8714bbe9..0b6c7ebcfa8 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs @@ -106,7 +106,7 @@ pub(crate) fn get_debug_log( } /// Reads the memory slice represented by the fat pointer. -/// Note, that the fat pointer must point to the accesible memory (i.e. not cleared up yet). +/// Note, that the fat pointer must point to the accessible memory (i.e. not cleared up yet). pub(crate) fn read_pointer( memory: &SimpleMemory, pointer: FatPointer, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs index 55f942d9928..add3d829d80 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs @@ -11,7 +11,7 @@ use zksync_types::{ use zksync_utils::address_to_h256; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; -use crate::vm_virtual_blocks::utils::overhead::{get_amortized_overhead, OverheadCoeficients}; +use crate::vm_virtual_blocks::utils::overhead::{get_amortized_overhead, OverheadCoefficients}; /// This structure represents the data that is used by /// the Bootloader to describe the transaction. @@ -212,12 +212,12 @@ impl TransactionData { self.reserved_dynamic.len() as u64, ); - let coeficients = OverheadCoeficients::from_tx_type(self.tx_type); + let coefficients = OverheadCoefficients::from_tx_type(self.tx_type); get_amortized_overhead( total_gas_limit, gas_price_per_pubdata, encoded_len, - coeficients, + coefficients, ) } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/fee.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/fee.rs index d4808e91bf4..6753e819781 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/fee.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/fee.rs @@ -4,7 +4,7 @@ use zksync_utils::ceil_div; use crate::vm_virtual_blocks::old_vm::utils::eth_price_per_pubdata_byte; -/// Calcluates the amount of gas required to publish one byte of pubdata +/// Calculates the amount of gas required to publish one byte of pubdata pub fn base_fee_to_gas_per_pubdata(l1_gas_price: u64, base_fee: u64) -> u64 { let eth_price_per_pubdata_byte = eth_price_per_pubdata_byte(l1_gas_price); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs index 85446534a2e..59b54888ee1 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs @@ -12,7 +12,7 @@ pub fn derive_overhead( gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Even if the gas limit is greater than the MAX_TX_ERGS_LIMIT, we assume that everything beyond MAX_TX_ERGS_LIMIT // will be spent entirely on publishing bytecodes and so we derive the overhead solely based on the capped value @@ -49,31 +49,31 @@ pub fn derive_overhead( // ); vec![ - (coeficients.ergs_limit_overhead_coeficient + (coefficients.ergs_limit_overhead_coeficient * overhead_for_single_instance_circuits.as_u32() as f64) .floor() as u32, - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length.as_u32() as f64) .floor() as u32, - (coeficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, + (coefficients.slot_overhead_coeficient * tx_slot_overhead.as_u32() as f64) as u32, ] .into_iter() .max() .unwrap() } -/// Contains the coeficients with which the overhead for transactions will be calculated. -/// All of the coeficients should be <= 1. There are here to provide a certain "discount" for normal transactions +/// Contains the coefficients with which the overhead for transactions will be calculated. +/// All of the coefficients should be <= 1. There are here to provide a certain "discount" for normal transactions /// at the risk of malicious transactions that may close the block prematurely. -/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coeficients.ergs_limit_overhead_coeficient` MUST +/// IMPORTANT: to perform correct computations, `MAX_TX_ERGS_LIMIT / coefficients.ergs_limit_overhead_coeficient` MUST /// result in an integer number #[derive(Debug, Clone, Copy)] -pub struct OverheadCoeficients { +pub struct OverheadCoefficients { slot_overhead_coeficient: f64, bootloader_memory_overhead_coeficient: f64, ergs_limit_overhead_coeficient: f64, } -impl OverheadCoeficients { +impl OverheadCoefficients { // This method ensures that the parameters keep the required invariants fn new_checked( slot_overhead_coeficient: f64, @@ -95,11 +95,11 @@ impl OverheadCoeficients { // L1->L2 do not receive any discounts fn new_l1() -> Self { - OverheadCoeficients::new_checked(1.0, 1.0, 1.0) + OverheadCoefficients::new_checked(1.0, 1.0, 1.0) } fn new_l2() -> Self { - OverheadCoeficients::new_checked( + OverheadCoefficients::new_checked( 1.0, 1.0, // For L2 transactions we allow a certain default discount with regard to the number of ergs. // Multiinstance circuits can in theory be spawned infinite times, while projected future limitations @@ -109,7 +109,7 @@ impl OverheadCoeficients { ) } - /// Return the coeficients for the given transaction type + /// Return the coefficients for the given transaction type pub fn from_tx_type(tx_type: u8) -> Self { if is_l1_tx_type(tx_type) { Self::new_l1() @@ -124,7 +124,7 @@ pub(crate) fn get_amortized_overhead( total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { // Using large U256 type to prevent overflows. let overhead_for_block_gas = U256::from(block_overhead_gas(gas_per_pubdata_byte_limit)); @@ -161,7 +161,7 @@ pub(crate) fn get_amortized_overhead( let tx_slot_overhead = { let tx_slot_overhead = ceil_div_u256(overhead_for_block_gas, MAX_TXS_IN_BLOCK.into()).as_u32(); - (coeficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 + (coefficients.slot_overhead_coeficient * tx_slot_overhead as f64).floor() as u32 }; // 2. The overhead for occupying the bootloader memory can be derived from encoded_len @@ -172,7 +172,7 @@ pub(crate) fn get_amortized_overhead( ) .as_u32(); - (coeficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() + (coefficients.bootloader_memory_overhead_coeficient * overhead_for_length as f64).floor() as u32 }; @@ -217,7 +217,7 @@ pub(crate) fn get_amortized_overhead( let overhead_for_gas = { let numerator = overhead_for_block_gas * total_gas_limit + U256::from(MAX_TX_ERGS_LIMIT); let denominator: U256 = U256::from( - (MAX_TX_ERGS_LIMIT as f64 / coeficients.ergs_limit_overhead_coeficient) as u64, + (MAX_TX_ERGS_LIMIT as f64 / coefficients.ergs_limit_overhead_coeficient) as u64, ) + overhead_for_block_gas; let overhead_for_gas = (numerator - 1) / denominator; @@ -242,7 +242,7 @@ pub(crate) fn get_amortized_overhead( MAX_L2_TX_GAS_LIMIT as u32, gas_per_pubdata_byte_limit, encoded_len.as_usize(), - coeficients, + coefficients, ) } else { overhead @@ -263,7 +263,7 @@ mod tests { total_gas_limit: u32, gas_per_pubdata_byte_limit: u32, encoded_len: usize, - coeficients: OverheadCoeficients, + coefficients: OverheadCoefficients, ) -> u32 { let mut left_bound = if MAX_TX_ERGS_LIMIT < total_gas_limit { total_gas_limit - MAX_TX_ERGS_LIMIT @@ -281,7 +281,7 @@ mod tests { total_gas_limit - suggested_overhead, gas_per_pubdata_byte_limit, encoded_len, - coeficients, + coefficients, ); derived_overhead >= suggested_overhead @@ -310,40 +310,40 @@ mod tests { let test_params = |total_gas_limit: u32, gas_per_pubdata: u32, encoded_len: usize, - coeficients: OverheadCoeficients| { + coefficients: OverheadCoefficients| { let result_by_efficient_search = - get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coeficients); + get_amortized_overhead(total_gas_limit, gas_per_pubdata, encoded_len, coefficients); let result_by_binary_search = get_maximal_allowed_overhead_bin_search( total_gas_limit, gas_per_pubdata, encoded_len, - coeficients, + coefficients, ); assert_eq!(result_by_efficient_search, result_by_binary_search); }; // Some arbitrary test - test_params(60_000_000, 800, 2900, OverheadCoeficients::new_l2()); + test_params(60_000_000, 800, 2900, OverheadCoefficients::new_l2()); // Very small parameters - test_params(0, 1, 12, OverheadCoeficients::new_l2()); + test_params(0, 1, 12, OverheadCoefficients::new_l2()); // Relatively big parameters let max_tx_overhead = derive_overhead( MAX_TX_ERGS_LIMIT, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); test_params( MAX_TX_ERGS_LIMIT + max_tx_overhead, 5000, 10000, - OverheadCoeficients::new_l2(), + OverheadCoefficients::new_l2(), ); - test_params(115432560, 800, 2900, OverheadCoeficients::new_l1()); + test_params(115432560, 800, 2900, OverheadCoefficients::new_l1()); } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index e96c326b219..4110825a260 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -132,7 +132,7 @@ impl VmInterface for Vm { } } -/// Methods of vm, which required some history manipullations +/// Methods of vm, which required some history manipulations impl VmInterfaceHistoryEnabled for Vm { /// Create snapshot of current vm state and push it into the memory fn make_snapshot(&mut self) { diff --git a/core/lib/state/src/cache/metrics.rs b/core/lib/state/src/cache/metrics.rs index 7198d433947..13bc8c94aa9 100644 --- a/core/lib/state/src/cache/metrics.rs +++ b/core/lib/state/src/cache/metrics.rs @@ -18,7 +18,7 @@ pub(super) enum RequestOutcome { Miss, } -/// Buckets for small latencies: from 10ns to 1ms. +/// Buckets for small latencies: from 10 ns to 1 ms. const SMALL_LATENCIES: Buckets = Buckets::values(&[ 1e-8, 2.5e-8, 5e-8, 1e-7, 2.5e-7, 5e-7, 1e-6, 2.5e-6, 5e-6, 1e-5, 2.5e-5, 5e-5, 1e-4, 1e-3, ]); diff --git a/core/lib/state/src/in_memory.rs b/core/lib/state/src/in_memory.rs index 87a26b238f2..fcb69affea8 100644 --- a/core/lib/state/src/in_memory.rs +++ b/core/lib/state/src/in_memory.rs @@ -8,7 +8,7 @@ use zksync_types::{ }; use zksync_utils::u256_to_h256; -/// Network ID we use by defailt for in memory storage. +/// Network ID we use by default for in memory storage. pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u32 = 270; /// In-memory storage. diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index c943e48dbc1..3d54967c9ad 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -43,7 +43,7 @@ pub trait ReadStorage: fmt::Debug { /// Checks whether a write to this storage at the specified `key` would be an initial write. /// Roughly speaking, this is the case when the storage doesn't contain `key`, although - /// in case of mutable storages, the caveats apply (a write to a key that is present + /// in case of mutable storage, the caveats apply (a write to a key that is present /// in the storage but was not committed is still an initial write). fn is_write_initial(&mut self, key: &StorageKey) -> bool; diff --git a/core/lib/storage/src/db.rs b/core/lib/storage/src/db.rs index 3280183abf9..617d14d272d 100644 --- a/core/lib/storage/src/db.rs +++ b/core/lib/storage/src/db.rs @@ -553,7 +553,7 @@ impl RocksDB { } impl RocksDB<()> { - /// Awaits termination of all running rocksdb instances. + /// Awaits termination of all running RocksDB instances. /// /// This method is blocking and should be wrapped in `spawn_blocking(_)` if run in the async context. pub fn await_rocksdb_termination() { @@ -570,7 +570,7 @@ impl RocksDB<()> { } } -/// Empty struct used to register rocksdb instance +/// Empty struct used to register RocksDB instance #[derive(Debug)] struct RegistryEntry; diff --git a/core/lib/storage/src/metrics.rs b/core/lib/storage/src/metrics.rs index 0c26bd749d5..928e735a30c 100644 --- a/core/lib/storage/src/metrics.rs +++ b/core/lib/storage/src/metrics.rs @@ -96,7 +96,7 @@ pub(crate) struct RocksdbSizeMetrics { pub live_data_size: Family>, /// Total size of all SST files in the column family of a RocksDB instance. pub total_sst_size: Family>, - /// Total size of all mem tables in the column family of a RocksDB instance. + /// Total size of all memory tables in the column family of a RocksDB instance. pub total_mem_table_size: Family>, /// Total size of block cache in the column family of a RocksDB instance. pub block_cache_size: Family>, diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index f0cc7132831..24ac74ab335 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -89,7 +89,7 @@ impl<'de> Deserialize<'de> for BlockNumber { } } -/// Block unified identifier in terms of ZKSync +/// Block unified identifier in terms of zkSync /// /// This is an utility structure that cannot be (de)serialized, it has to be created manually. /// The reason is because Web3 API provides multiple methods for referring block either by hash or number, @@ -271,7 +271,7 @@ pub struct Block { /// Hash of the uncles #[serde(rename = "sha3Uncles")] pub uncles_hash: H256, - /// Miner/author's address + /// Miner / author's address #[serde(rename = "miner", default, deserialize_with = "null_to_default")] pub author: H160, /// State root hash @@ -463,7 +463,7 @@ pub struct Transaction { pub from: Option

, /// Recipient (None when contract creation) pub to: Option
, - /// Transfered value + /// Transferred value pub value: U256, /// Gas Price #[serde(rename = "gasPrice")] diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 762733f8e21..80a4d131e21 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -64,7 +64,7 @@ pub struct L1BatchHeader { /// The L2 gas price that the operator agrees on. pub l2_fair_gas_price: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, - /// System logs are those emitted as part of the Vm excecution. + /// System logs are those emitted as part of the Vm execution. pub system_logs: Vec, /// Version of protocol used for the L1 batch. pub protocol_version: Option, diff --git a/core/lib/types/src/storage/writes/mod.rs b/core/lib/types/src/storage/writes/mod.rs index 6a17afb7d15..54393f41785 100644 --- a/core/lib/types/src/storage/writes/mod.rs +++ b/core/lib/types/src/storage/writes/mod.rs @@ -41,7 +41,7 @@ pub struct RepeatedStorageWrite { #[derive(Clone, Debug, Deserialize, Serialize, Default, Eq, PartialEq)] pub struct StateDiffRecord { - /// address state diff occured at + /// address state diff occurred at pub address: Address, /// storage slot key updated pub key: U256, @@ -115,7 +115,7 @@ impl StateDiffRecord { } } - /// compression follows the following algo: + /// compression follows the following algorithm: /// 1. if repeated write: /// entry <- enumeration_index || compressed value /// 2. if initial write: diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 3c450e77c89..85194902e32 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -60,7 +60,7 @@ pub struct CallRequest { /// Access list #[serde(default, skip_serializing_if = "Option::is_none")] pub access_list: Option, - /// Eip712 meta + /// EIP712 meta #[serde(default, skip_serializing_if = "Option::is_none")] pub eip712_meta: Option, } @@ -97,7 +97,7 @@ impl CallRequestBuilder { self } - /// Set transfered value (None for no transfer) + /// Set transferred, value (None for no transfer) pub fn gas_price(mut self, gas_price: U256) -> Self { self.call_request.gas_price = Some(gas_price); self @@ -113,7 +113,7 @@ impl CallRequestBuilder { self } - /// Set transfered value (None for no transfer) + /// Set transferred, value (None for no transfer) pub fn value(mut self, value: U256) -> Self { self.call_request.value = Some(value); self @@ -177,7 +177,7 @@ pub enum SerializationTransactionError { AccessListsNotSupported, #[error("nonce has max value")] TooBigNonce, - /// TooHighGas is a sanity error to avoid extremely big numbers specified + /// Sanity check error to avoid extremely big numbers specified /// to gas and pubdata price. #[error("{0}")] TooHighGas(String), diff --git a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs index 999afbbe604..28e9d27f0a6 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs @@ -29,7 +29,7 @@ impl EncodedStructureMember { } } - /// Encodes the structure as `name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")". + /// Encodes the structure as `name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")"`. pub fn get_encoded_type(&self) -> String { let mut encoded_type = String::new(); encoded_type.push_str(&self.member_type); diff --git a/core/lib/types/src/tx/primitives/packed_eth_signature.rs b/core/lib/types/src/tx/primitives/packed_eth_signature.rs index b249d151ef5..32564829ad8 100644 --- a/core/lib/types/src/tx/primitives/packed_eth_signature.rs +++ b/core/lib/types/src/tx/primitives/packed_eth_signature.rs @@ -14,7 +14,7 @@ use thiserror::Error; use zksync_basic_types::{Address, H256}; use zksync_utils::ZeroPrefixHexSerde; -/// Struct used for working with ethereum signatures created using eth_sign (using geth, ethers.js, etc) +/// Struct used for working with Ethereum signatures created using eth_sign (using geth, ethers.js, etc) /// message is serialized as 65 bytes long `0x` prefixed string. /// /// Some notes on implementation of methods of this structure: @@ -66,7 +66,7 @@ impl PackedEthSignature { Ok(PackedEthSignature(ETHSignature::from(signature))) } - /// Signs message using ethereum private key, results are identical to signature created + /// Signs message using Ethereum private key, results are identical to signature created /// using `geth`, `ethers.js`, etc. No hashing and prefixes required. pub fn sign(private_key: &H256, msg: &[u8]) -> Result { let signed_bytes = Self::message_to_signed_bytes(msg); @@ -85,7 +85,7 @@ impl PackedEthSignature { Ok(PackedEthSignature(signature)) } - /// Signs typed struct using ethereum private key by EIP-712 signature standard. + /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// Result of this function is the equivalent of RPC calling `eth_signTypedData`. pub fn sign_typed_data( private_key: &H256, @@ -115,7 +115,7 @@ impl PackedEthSignature { msg.keccak256().into() } - /// Checks signature and returns ethereum address of the signer. + /// Checks signature and returns Ethereum address of the signer. /// message should be the same message that was passed to `eth.sign`(or similar) method /// as argument. No hashing and prefixes required. pub fn signature_recover_signer( diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index 66101da4f5b..4a9d1cd2475 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -27,7 +27,7 @@ pub enum FailedToCompressBytecodeError { InvalidBytecode(#[from] InvalidBytecodeError), } -/// Implelements a simple compression algorithm for the bytecode. +/// Implements, a simple compression algorithm for the bytecode. pub fn compress_bytecode(code: &[u8]) -> Result, FailedToCompressBytecodeError> { validate_bytecode(code)?; diff --git a/core/lib/utils/src/convert.rs b/core/lib/utils/src/convert.rs index bcaa6c68f1f..973b28cc613 100644 --- a/core/lib/utils/src/convert.rs +++ b/core/lib/utils/src/convert.rs @@ -154,7 +154,7 @@ pub fn h256_to_u32(value: H256) -> u32 { u32::from_be_bytes(be_u32_bytes) } -/// Converts u32 into the h256 as BE bytes +/// Converts u32 into the H256 as BE bytes pub fn u32_to_h256(value: u32) -> H256 { let mut result = [0u8; 32]; result[28..].copy_from_slice(&value.to_be_bytes()); diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index 173770beece..2c6702ede96 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -153,7 +153,7 @@ pub fn log_format_from_env() -> LogFormat { } /// Loads the Sentry URL from the environment variable according to the existing zkSync configuration scheme. -/// If the environemnt value is present but the value is `unset`, `None` will be returned for compatibility with the +/// If the environment value is present but the value is `unset`, `None` will be returned for compatibility with the /// existing configuration setup. /// /// This is a deprecated function existing for compatibility with the old configuration scheme. diff --git a/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs b/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs index 1b7d07b4276..5f9730d458e 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs @@ -19,7 +19,7 @@ impl RestApi { } } - /// Creates an actix-web `Scope`, which can be mounted to the Http server. + /// Creates an actix-web `Scope`, which can be mounted to the HTTP server. pub fn into_scope(self) -> actix_web::Scope { web::scope("") .app_data(web::Data::new(self)) diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs index d1ac41553df..9621adae2d8 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs @@ -5,7 +5,7 @@ use tracing::{span, Level}; use multivm::interface::{TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface}; use multivm::tracers::StorageInvocations; use multivm::vm_latest::constants::ETH_CALL_GAS_LIMIT; -use multivm::MultivmTracer; +use multivm::MultiVMTracer; use zksync_dal::ConnectionPool; use zksync_types::{ diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs index ac675eee707..9f987a150da 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs @@ -1,13 +1,13 @@ use multivm::tracers::CallTracer; use multivm::vm_latest::HistoryMode; -use multivm::{MultiVmTracerPointer, MultivmTracer}; +use multivm::{MultiVMTracer, MultiVmTracerPointer}; use once_cell::sync::OnceCell; use std::sync::Arc; use zksync_state::WriteStorage; use zksync_types::vm_trace::Call; -/// Custom tracers supported by our api +/// Custom tracers supported by our API #[derive(Debug)] pub(crate) enum ApiTracer { CallTracer(Arc>>), diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs index 119d6423ba2..4b9e13084ef 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -1,5 +1,5 @@ use multivm::interface::{ExecutionResult, VmExecutionMode, VmInterface}; -use multivm::MultivmTracer; +use multivm::MultiVMTracer; use std::collections::HashSet; use multivm::tracers::{ diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index b994fbff882..12c73800415 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -18,7 +18,7 @@ use multivm::vm_latest::{ constants::{BLOCK_GAS_LIMIT, MAX_PUBDATA_PER_BLOCK}, utils::{ fee::derive_base_fee_and_gas_per_pubdata, - overhead::{derive_overhead, OverheadCoeficients}, + overhead::{derive_overhead, OverheadCoefficients}, }, }; @@ -111,7 +111,7 @@ pub struct ApiContracts { pub(crate) estimate_gas: MultiVMBaseSystemContracts, /// Contracts to be used when performing `eth_call` requests. /// These contracts (mainly, bootloader) normally should be tuned to provide better UX - /// exeprience (e.g. revert messages). + /// experience (e.g. revert messages). pub(crate) eth_call: MultiVMBaseSystemContracts, } @@ -600,7 +600,7 @@ impl TxSender { tx_gas_limit, gas_per_pubdata_byte as u32, tx.encoding_len(), - OverheadCoeficients::from_tx_type(tx.tx_format() as u8), + OverheadCoefficients::from_tx_type(tx.tx_format() as u8), ); match &mut tx.common_data { @@ -832,7 +832,7 @@ impl TxSender { suggested_gas_limit, gas_per_pubdata_byte as u32, tx.encoding_len(), - OverheadCoeficients::from_tx_type(tx.tx_format() as u8), + OverheadCoefficients::from_tx_type(tx.tx_format() as u8), ); let full_gas_limit = diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs index da11e905647..98e75702a4c 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -43,7 +43,7 @@ pub(super) struct L1BlockNumbers { /// The component is responsible for managing sending eth_txs attempts: /// Based on eth_tx queue the component generates new attempt with the minimum possible fee, -/// save it to the database, and send it to ethereum. +/// save it to the database, and send it to Ethereum. /// Based on eth_tx_history queue the component can mark txs as stuck and create the new attempt /// with higher gas price #[derive(Debug)] diff --git a/core/lib/zksync_core/src/eth_sender/metrics.rs b/core/lib/zksync_core/src/eth_sender/metrics.rs index 2f4a225e570..950ff8bf6f7 100644 --- a/core/lib/zksync_core/src/eth_sender/metrics.rs +++ b/core/lib/zksync_core/src/eth_sender/metrics.rs @@ -78,7 +78,7 @@ pub(super) struct EthSenderMetrics { pub used_priority_fee_per_gas: Histogram, /// Last L1 block observed by the Ethereum sender. pub last_known_l1_block: Gauge, - /// Number of inflight txs produced by the Ethereum sender. + /// Number of in-flight txs produced by the Ethereum sender. pub number_of_inflight_txs: Gauge, #[metrics(buckets = GAS_BUCKETS)] pub l1_gas_used: Family>, diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index ba731ede944..9d3264b679e 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -24,7 +24,7 @@ impl FriProverStatsReporter { } } -/// Invoked periodically to push prover queued/inprogress job statistics +/// Invoked periodically to push prover queued/in-progress job statistics #[async_trait] impl PeriodicJob for FriProverStatsReporter { const SERVICE_NAME: &'static str = "FriProverStatsReporter"; diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index d52fd76661f..3b00d7cffda 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -251,7 +251,7 @@ pub enum Component { WitnessGenerator(Option, AggregationRound), /// Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, - /// Component for exposing API's to prover for providing proof generation data and accepting proofs. + /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. ProofDataHandler, } diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index ffd87b92d16..32f39276a1e 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -42,7 +42,7 @@ impl From for Health { /// /// Async methods provided by this wrapper are not cancel-safe! This is probably not an issue; /// `ZkSyncTree` is only indirectly available via `MetadataCalculator::run()` entrypoint -/// which consumes `self`. That is, if `MetadataCalculator::run()` is cancelled (which we don't currently do, +/// which consumes `self`. That is, if `MetadataCalculator::run()` is canceled (which we don't currently do, /// at least not explicitly), all `MetadataCalculator` data including `ZkSyncTree` is discarded. /// In the unlikely case you get a "`ZkSyncTree` is in inconsistent state" panic, /// cancellation is most probably the reason. diff --git a/core/lib/zksync_core/src/metrics.rs b/core/lib/zksync_core/src/metrics.rs index fdb043e211f..539cbbbb2fb 100644 --- a/core/lib/zksync_core/src/metrics.rs +++ b/core/lib/zksync_core/src/metrics.rs @@ -180,9 +180,9 @@ pub(crate) struct ExternalNodeMetrics { pub synced: Gauge, /// Current sync lag of the external node. pub sync_lag: Gauge, - /// Number of the last L1 batch checked by the reorg detector or consistency checker. + /// Number of the last L1 batch checked by the re-org detector or consistency checker. pub last_correct_batch: Family>, - /// Number of the last miniblock checked by the reorg detector or consistency checker. + /// Number of the last miniblock checked by the re-org detector or consistency checker. pub last_correct_miniblock: Family>, } diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs index 4f7b8eb5503..16137d40af6 100644 --- a/core/lib/zksync_core/src/reorg_detector/mod.rs +++ b/core/lib/zksync_core/src/reorg_detector/mod.rs @@ -14,19 +14,19 @@ use crate::metrics::{CheckerComponent, EN_METRICS}; const SLEEP_INTERVAL: Duration = Duration::from_secs(5); -/// This is a component that is responsible for detecting the batch reorgs. -/// Batch reorg is a rare event of manual intervention, when the node operator +/// This is a component that is responsible for detecting the batch re-orgs. +/// Batch re-org is a rare event of manual intervention, when the node operator /// decides to revert some of the not yet finalized batches for some reason /// (e.g. inability to generate a proof), and then potentially /// re-organize transactions in them to fix the problem. /// /// To detect them, we constantly check the latest sealed batch root hash, -/// and in the event of mismatch, we know that there has been a reorg. +/// and in the event of mismatch, we know that there has been a re-org. /// We then perform a binary search to find the latest correct block /// and revert all batches after it, to keep being consistent with the main node. /// /// This is the only component that is expected to finish its execution -/// in the even of reorg, since we have to restart the node after a rollback is performed, +/// in the even of re-org, since we have to restart the node after a rollback is performed, /// and is special-cased in the `zksync_external_node` crate. #[derive(Debug)] pub struct ReorgDetector { @@ -113,7 +113,7 @@ impl ReorgDetector { Ok(hash == local_hash) } - /// Localizes a reorg: performs binary search to determine the last non-diverged block. + /// Localizes a re-org: performs binary search to determine the last non-diverged block. async fn detect_reorg(&self, diverged_l1_batch: L1BatchNumber) -> RpcResult { // TODO (BFT-176, BFT-181): We have to look through the whole history, since batch status updater may mark // a block as executed even if the state diverges for it. diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs index fffabe0b7e2..389677b0439 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -5,7 +5,7 @@ use tokio::{ task::JoinHandle, }; -use multivm::MultivmTracer; +use multivm::MultiVMTracer; use std::{fmt, sync::Arc}; use multivm::{ diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index 06397c3cd17..858c46b2e70 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -143,7 +143,7 @@ impl MiniblockSealerHandle { /// Submits a new sealing `command` to the sealer that this handle is attached to. /// /// If there are currently too many unprocessed commands, this method will wait until - /// enough of them are processed (i.e., there is backpressure). + /// enough of them are processed (i.e., there is back pressure). pub async fn submit(&mut self, command: MiniblockSealCommand) { let miniblock_number = command.miniblock_number; tracing::debug!( diff --git a/core/lib/zksync_core/src/state_keeper/metrics.rs b/core/lib/zksync_core/src/state_keeper/metrics.rs index f16b311e805..f3f43324320 100644 --- a/core/lib/zksync_core/src/state_keeper/metrics.rs +++ b/core/lib/zksync_core/src/state_keeper/metrics.rs @@ -175,7 +175,7 @@ pub(super) enum L1BatchSealStage { const COUNT_BUCKETS: Buckets = Buckets::values(&[ 10.0, 20.0, 50.0, 100.0, 200.0, 500.0, 1_000.0, 2_000.0, 5_000.0, 10_000.0, 20_000.0, 50_000.0, ]); -/// Buckets for sealing deltas for L1 batches (in seconds). The expected delta is ~1 minute. +/// Buckets for sealing deltas for L1 batches (in seconds). The expected delta is approximately 1 minute. const L1_BATCH_SEAL_DELTA_BUCKETS: Buckets = Buckets::values(&[ 0.1, 0.5, 1.0, 5.0, 10.0, 20.0, 30.0, 40.0, 60.0, 90.0, 120.0, 180.0, 240.0, 300.0, ]); @@ -205,7 +205,7 @@ pub(crate) struct L1BatchMetrics { /// Number of entities stored in Postgres during a specific stage of sealing an L1 batch. #[metrics(buckets = COUNT_BUCKETS)] sealed_entity_count: Family>, - /// Latency of sealing an L1 batch split by the stage and divided by the number of entiries + /// Latency of sealing an L1 batch split by the stage and divided by the number of entries /// stored in the stage. #[metrics(buckets = Buckets::LATENCIES)] sealed_entity_per_unit: Family>, @@ -285,7 +285,7 @@ pub(super) struct MiniblockMetrics { /// Number of entities stored in Postgres during a specific stage of sealing a miniblock. #[metrics(buckets = COUNT_BUCKETS)] sealed_entity_count: Family>, - /// Latency of sealing a miniblock split by the stage and divided by the number of entiries + /// Latency of sealing a miniblock split by the stage and divided by the number of entries /// stored in the stage. #[metrics(buckets = Buckets::LATENCIES)] sealed_entity_per_unit: Family>, diff --git a/core/lib/zksync_core/src/witness_generator/mod.rs b/core/lib/zksync_core/src/witness_generator/mod.rs index 10a7ff861bd..5dcebadf6a8 100644 --- a/core/lib/zksync_core/src/witness_generator/mod.rs +++ b/core/lib/zksync_core/src/witness_generator/mod.rs @@ -9,7 +9,7 @@ //! each of them starts with an invocation of `WitnessGenerator` with a corresponding `WitnessGeneratorJobType`: //! * `WitnessGeneratorJobType::BasicCircuits`: //! generates basic circuits (circuits like `Main VM` - up to 50 * 48 = 2400 circuits): -//! input table: `basic_circuit_witness_jobs` (todo SMA-1362: will be renamed from `witness_inputs`) +//! input table: `basic_circuit_witness_jobs` (TODO SMA-1362: will be renamed from `witness_inputs`) //! artifact/output table: `leaf_aggregation_jobs` (also creates job stubs in `node_aggregation_jobs` and `scheduler_aggregation_jobs`) //! value in `aggregation_round` field of `prover_jobs` table: 0 //! * `WitnessGeneratorJobType::LeafAggregation`: @@ -28,14 +28,14 @@ //! //! One round of prover generation consists of: //! * `WitnessGenerator` picks up the next `queued` job in its input table and processes it -//! (invoking the corresponding helper function in `zkevm_test_harness` repo) -//! * it saves the generated circuis to `prover_jobs` table and the other artifacts to its output table +//! (invoking the corresponding helper function in `zkevm_test_harness` repository) +//! * it saves the generated circuits to `prover_jobs` table and the other artifacts to its output table //! * the individual proofs are picked up by the provers, processed, and marked as complete. //! * when the last proof for this round is computed, the prover updates the row in the output table //! setting its status to `queued` //! * `WitnessGenerator` picks up such job and proceeds to the next round //! -//! Note that the very first input table (`basic_circuit_witness_jobs` (todo SMA-1362: will be renamed from `witness_inputs`)) +//! Note that the very first input table (`basic_circuit_witness_jobs` (TODO SMA-1362: will be renamed from `witness_inputs`)) //! is populated by the tree (as the input artifact for the `WitnessGeneratorJobType::BasicCircuits` is the merkle proofs) use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; diff --git a/core/tests/loadnext/src/account/mod.rs b/core/tests/loadnext/src/account/mod.rs index bead354cdea..14aa23b5031 100644 --- a/core/tests/loadnext/src/account/mod.rs +++ b/core/tests/loadnext/src/account/mod.rs @@ -59,7 +59,7 @@ pub struct AccountLifespan { contract_execution_params: LoadnextContractExecutionParams, /// Pool of account addresses, used to generate commands. addresses: AddressPool, - /// Successful transactions, required for requesting api + /// Successful transactions, required for requesting API successfully_sent_txs: Arc>>, /// L1 ERC-20 token used in the test. main_l1_token: Address, diff --git a/core/tests/loadnext/src/account_pool.rs b/core/tests/loadnext/src/account_pool.rs index 556bee7f402..e4ded62dcf1 100644 --- a/core/tests/loadnext/src/account_pool.rs +++ b/core/tests/loadnext/src/account_pool.rs @@ -76,7 +76,7 @@ pub struct TestWallet { } /// Pool of accounts to be used in the test. -/// Each account is represented as `zksync::Wallet` in order to provide convenient interface of interation with zkSync. +/// Each account is represented as `zksync::Wallet` in order to provide convenient interface of interaction with zkSync. #[derive(Debug)] pub struct AccountPool { /// Main wallet that will be used to initialize all the test wallets. diff --git a/core/tests/loadnext/src/command/api.rs b/core/tests/loadnext/src/command/api.rs index e865ab00031..1e520d7c195 100644 --- a/core/tests/loadnext/src/command/api.rs +++ b/core/tests/loadnext/src/command/api.rs @@ -54,7 +54,7 @@ impl AllWeighted for ApiRequestType { pub struct ApiRequest { /// Type of the request to be performed. pub request_type: ApiRequestType, - /// ZkSync block number, generated randomly. + /// zkSync block number, generated randomly. pub block_number: api::BlockNumber, } diff --git a/docs/advanced/pubdata.md b/docs/advanced/pubdata.md index 6bab6a85a46..d34033d0a5e 100644 --- a/docs/advanced/pubdata.md +++ b/docs/advanced/pubdata.md @@ -53,7 +53,7 @@ In pre-boojum era the superset of pubdata fields and input to the `commitBlocks` /// @param repeatedStorageChanges Storage write access as a concatenation index-value /// @param l2Logs concatenation of all L2 -> L1 logs in the block /// @param l2ArbitraryLengthMessages array of hash preimages that were sent as value of L2 logs by special system L2 contract -/// @param factoryDeps (contract bytecodes) array of l2 bytecodes that were deployed +/// @param factoryDeps (contract bytecodes) array of L2 bytecodes that were deployed struct CommitBlockInfo { uint64 blockNumber; uint64 timestamp; diff --git a/prover/witness_generator/README.md b/prover/witness_generator/README.md index 9d35fe7e054..dc476ca44fc 100644 --- a/prover/witness_generator/README.md +++ b/prover/witness_generator/README.md @@ -15,7 +15,7 @@ aggregation. That is, every aggregation round needs two sets of input: ## BasicCircuitsWitnessGenerator - generates basic circuits (circuits like `Main VM` - up to 50 \* 48 = 2400 circuits): -- input table: `basic_circuit_witness_jobs` (todo SMA-1362: will be renamed from `witness_inputs`) +- input table: `basic_circuit_witness_jobs` (TODO SMA-1362: will be renamed from `witness_inputs`) - artifact/output table: `leaf_aggregation_jobs` (also creates job stubs in `node_aggregation_jobs` and `scheduler_aggregation_jobs`) value in `aggregation_round` field of `prover_jobs` table: 0 @@ -42,7 +42,7 @@ One round of prover generation consists of: - `WitnessGenerator` picks up the next `queued` job in its input table and processes it (invoking the corresponding helper function in `zkevm_test_harness` repo) -- it saves the generated circuis to `prover_jobs` table and the other artifacts to its output table +- it saves the generated circuits to `prover_jobs` table and the other artifacts to its output table - the individual proofs are picked up by the provers, processed, and marked as complete. - when the last proof for this round is computed, the prover updates the row in the output table setting its status to `queued` diff --git a/spellcheck/era.cfg b/spellcheck/era.cfg new file mode 100644 index 00000000000..c00c2d7a64c --- /dev/null +++ b/spellcheck/era.cfg @@ -0,0 +1,69 @@ +# Project settings where a Cargo.toml exists and is passed +# ${CARGO_MANIFEST_DIR}/.config/spellcheck.toml + +# Also take into account developer comments +dev_comments = false + +# Skip the README.md file as defined in the cargo manifest +skip_readme = false + +[Hunspell] +# lang and name of `.dic` file +lang = "en_US" +# OS specific additives +# Linux: [ /usr/share/myspell ] +# Windows: [] +# macOS [ /home/alice/Libraries/hunspell, /Libraries/hunspell ] + +# Additional search paths, which take presedence over the default +# os specific search dirs, searched in order, defaults last +search_dirs = ["."] + +# Adds additional dictionaries, can be specified as +# absolute paths or relative in the search dirs (in this order). +# Relative paths are resolved relative to the configuration file +# which is used. +# Refer to `man 5 hunspell` +# or https://www.systutorials.com/docs/linux/man/4-hunspell/#lbAE +# on how to define a custom dictionary file. +extra_dictionaries = ["era.dic"] + +# If set to `true`, the OS specific default search paths +# are skipped and only explicitly specified ones are used. +skip_os_lookups = false + +# Use the builtin dictionaries if none were found in +# in the configured lookup paths. +# Usually combined with `skip_os_lookups=true` +# to enforce the `builtin` usage for consistent +# results across distributions and CI runs. +# Setting this will still use the dictionaries +# specified in `extra_dictionaries = [..]` +# for topic specific lingo. +use_builtin = true + + +[Hunspell.quirks] +# Transforms words that are provided by the tokenizer +# into word fragments based on the capture groups which are to +# be checked. +# If no capture groups are present, the matched word is whitelisted. +transform_regex = ["^'([^\\s])'$", "^[0-9]+x$"] +# Accepts `alphabeta` variants if the checker provides a replacement suggestion +# of `alpha-beta`. +allow_concatenation = true +# And the counterpart, which accepts words with dashes, when the suggestion has +# recommendations without the dashes. This is less common. +allow_dashed = false + +[NlpRules] +# Allows the user to override the default included +# exports of LanguageTool, with other custom +# languages + +# override_rules = "/path/to/rules_binencoded.bin" +# override_tokenizer = "/path/to/tokenizer_binencoded.bin" + +[Reflow] +# Reflows doc comments to adhere to a given maximum line width limit. +max_line_length = 80 diff --git a/spellcheck/era.dic b/spellcheck/era.dic new file mode 100644 index 00000000000..214efbcd595 --- /dev/null +++ b/spellcheck/era.dic @@ -0,0 +1,605 @@ +42 +<= +=> +== +-> +<- ++ +- +* +\ += +/ +|| +< +> +% +0x00 +0x01 +0x02 +~10x +u32 +u64 +H256 +10e18 +10^9 +U256 +k +M +kb +50M +– +18kb +128kb +10k +100k +120k +24k +500k +120kb +500B +100M +~100us +10ms +1us +~100 + +ABI +vlog +L2 +L1 +json +l1 +SystemConfig +TODO +se +ZKSYNC_HOME +MultiVMTracer +vm_virtual_blocks +eth_node +BaseSystemContracts +eth_calls +refactor +WS +env +url +GasAdjuster +base_fee +base_fee_per_gas +ERC20 +Finalizer +Backoff +middleware +parallelization +precompute +precomputed +Postgres +parallelized +parallelize +job_id +API +APIs +async +pointwise +observability +atomics +integrations +stdout +GCS +websocket +struct +localhost +TOML +config +finalizer +boolean +prover +timestamp +H160 +zkSync +AccessList +miniblock +member₁ +member₂ +memberₙ +merkle +eth +Ethereum +deployer +RPC +tx +txs +subtrees +subtree +unfinalizable +meterer +Timedout +bootloader +testkit +Sepolia +Goerli +miniblock +miniblocks +MempoolIO +mempool +latencies +OracleTools +StorageOracle +zk_evm +zkEVM +src +utils +ptr +RefCell +Rc +StorageView +VM_HOOK_POSITION +VM_HOOKS_PARAMS_COUNT +PAYMASTER_CONTEXT_SLOTS +PrecompilerProcessor +MAX_POSTOP_SLOTS +postOp +type +opcode +KnownCodesStorage +param +HistoryDisabled +HistoryEnabled +sorted_timestamps +DecommiterOracle +DecommittmentProcessor +encodings +DecommittmentProcessor +decommitment +known_bytecodes +returndata +namespaces +StateDiffRecord +BYTES_PER_ENUMERATION_INDEX +derived_key + +// zkSync-related words +matterlabs +zkweb +zksync +blockchain +zkscan +zkscrypto +PubSub +loadtest +BigUint +radix +state_keeper +MIN_PAYMASTER_BALANCE +PrometheusCollector +RetryCollector +ScriptCollector +MetricsCollector +OperationResultsCollector +ReportCollector +filesystem +hasher +Hasher +grafana +prometheus +serializer +serializable +deserializer +Deserializes +serializing +deserializing +deserialization +configs +operation_number +hashed_key +deduplication +mutexes +mutex +Blake2s +Blake2 +web3 +Testnets +miniblock_number +hashed_key +tuples +\x19Ethereum +libzkscrypto +EOA +MultiVM +nonces +fri +rollup +pubkey +JSON +keccak256 +pubdata +timestamps +keccak +musig +len +calldata +DApp +metadata +boojum +deps +Precalculated +decommitted +WASM +DefaultPrecompilesProcessor +LSB +DDoS +refactored +tuple +HistoryMode +vm +VM +VMs +VM's +MSB +Enum +PublishProof +jsrpc +backends +ethsig +ethop +decentralization +rollups +zkrollup +unencrypted +permissionless +trustlessness +IERC +Schnorr +MuSig +Merkle +decentralised +mainchain +offchain +processed +zcli +blockchains +sidechain +sidechains +tokenomics +validator's +CHAINID +PREVRANDAO +ECDSA +EIP712 +EIP1559 +EIPs +eth_estimateGas +eth_call +versa +blake2 +AR16MT +Preimages + +// Names +Vyper +stimate +samount +Stichting +Kingsfordweg +RSIN +ABDK +Alef +Zcon +Paypal +Numio +MLTT +USDCs +dapi +validiums +validium +Validium +sharded +pepe +Arweave +Streamr +dutterbutter + +// Used libraries +numberish +arrayify +hexlify +markdownlint +ethersproject +nomicfoundation +nomiclabs +Consensys +zkforge +zkcast +Eigen +IPFS + +// Used programming language words +printf +charsets +println +fatalf +allowfullscreen +inttypes +zbin +Panicf +Deri +DERI +Furucombo +kwargs +scaleb +isinstance +RocksDB +mload +secp +porco +rosso +insize +MLOAD + +// ETC +gitter +signup +signups +precompiled +checkmark +Vitalik +Buterin +roadmap +majeure +conveniens +reimplementing +subsecond +supermajority +gemeente +unauthorised +Ethereum's +SDKs +EVM's +EVM +Göerli +ETHUSDC +USDCUSD +ETHUS +USDCUS +ETHUSD +Arbitrum +Adamantium +Immunefi +Winternitz +ewasm +Evmla +UUPS +Uups +TLDR +BLAKE2s +bytes32 +enumeration_index +backend +enum +num_initial +to_check_storage +source_storage +prepend +deduplicated +user_l2_to_l1_logs +L1Messeger +params +provers +zk +substring +reverter +wei +deduplicate +testnet +mainnet +performant +opcodes +USDC +USD +DBs +unexecutable +RLP +DAL +zkSync's +l2_to_l1 + +// crypto events +Edcon + +// Famous crypto people +Gluchowski +Vitalik's +Buterin's +multisignature +onchain +convertion +Keyhash +Armeabi +scijava +gluk +@Deniallugo's + +// Programming related words +bytecode +bytecodes +timeframe +mkdir +zksolc +zksyncrobot +precompiles +vyper +zkvyper +undol +applyl +Upgradability +Initializable +Hola +mundo +ISTN +Zerion +Maverik +zk_evm_1_3_3 +vk +verifier +crypto +callee +Subcalls +Vec +vecs +L1Messenger +SystemL2ToL1Log +witness_inputs +StateKeeper +enum_index +virtual_block_start_batch +virtual_block_finish_l2_block +maxFeePerGas +maxPriorityFeePerGas +structs +all_circuit +OversizedData +M5 +eth_sign +geth +ethers +js +recovery_id +&self +ETHSignature +recover_signer +BlockNumber +(de) +{result +DebugCall} +CREATE2 +memtables +memtable +PostgreSQL +OneTx +DefaultTracer +Tx1 +Tx2 +TxN +VmStopped +Unversioned +versioned +l2_block +submodule +enums +deserialized +hashmap +vm_m5 +SDK +1M +dir +SSD +getter +Getters +WebSocket +gasLimit +MiBs +MiB +GiB +GiBs +pubsub +\x19Ethereum +nibbles–node +ZkSyncTree +invariants +LEB128 +workflow +L1Batch +runtime +Tokio +Blobstore +S3 +AWS +ExternalIO +ClosedFormInputWrapper +AggregationWrapper +(de)serializer +typesafe +LRU +ns +Q3 +loadnext +args +with_arg +node_aggregation_job +scheduler_job +leaf_aggregation_job +MAX_ATTEMPTs +fsync +TEST_DATABASE_URL +newest_block +block_count +contracts_verification_info +RNG +jsonrpsee +l1_batch +Namespace +ExecutionStatus +VmStarted +reproducibility +CFs +key–value +enum_index_migration_cursor +block_number +initial_writes +errored +FactoryDeps +de +StorageView's +Yul +eth_txs +eth_tx +ExecuteBlock +PublishProofBlocksOnchain +CommitBlocks +entrypoint +gas_limit +TxSender +UX +BasicWitnessInputProducer +eth_tx_history +PENDING_BLOCK +from_block +namespace +PriorityQueue +Görli +Ropsten +Rinkeby +tokio +threadpool +IntrinsicGas +InsufficientFundsForTransfer +ChainId +eth_getLogs +façade +virtual_blocks_per_miniblock +virtual_block_interval +cloneable +timestamped +healthcheck +readonly +upgrader +startup + +AUTOGENERATED +x19Ethereum +block_timestamp +SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER +OneTxTracer +multicall +Multicall3 +updatable +instantiation +unexecuted +transactional +benchmarking +virtual_blocks_interval +dal +codebase +compactions +M6 +compiler_common \ No newline at end of file From bd268ac02bc3530c1d3247cb9496c3e13c2e52d9 Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 29 Nov 2023 17:51:47 +0100 Subject: [PATCH 057/115] fix(vm): Expose additional types and traits (#563) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add more pubs for previous vm ## Why ❔ For updating era testnode we need to expose some additional data ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil --- core/lib/multivm/src/lib.rs | 3 +++ core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index adb9358980f..06d7a429130 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -26,3 +26,6 @@ pub use versions::vm_m5; pub use versions::vm_m6; pub use versions::vm_refunds_enhancement; pub use versions::vm_virtual_blocks; +pub use zk_evm_1_3_1; +pub use zk_evm_1_3_3; +pub use zk_evm_1_4_0; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs index 89e7b21b984..28a681e5e60 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs @@ -6,7 +6,7 @@ pub use old_vm::{ pub use oracles::storage::StorageOracle; pub use tracers::dispatcher::TracerDispatcher; -pub use tracers::traits::{TracerPointer, VmTracer}; +pub use tracers::traits::{ToTracerPointer, TracerPointer, VmTracer}; pub use utils::transaction_encoding::TransactionVmExt; From 8b34ab9d35b678da6df7c7f9eee0652bc8522d56 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 29 Nov 2023 19:53:22 +0200 Subject: [PATCH 058/115] test(api): Test coverage for WebSocket server (#525) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Cover basic WS-specific JSON-RPC functionality (subscriptions, `getFilterChanges`) with tests. ## Why ❔ Will allow to set the expectations the current server behavior, so that it could be evolved in the future w/o unexpected / breaking changes (e.g., when implementing `jsonrpsee`-based subscriptions). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/types/src/tx/mod.rs | 2 +- core/lib/web3_decl/src/types.rs | 61 ++- .../web3/backend_jsonrpc/pub_sub.rs | 3 +- .../zksync_core/src/api_server/web3/mod.rs | 48 +- .../web3/namespaces/eth_subscribe.rs | 147 ----- .../src/api_server/web3/namespaces/mod.rs | 10 +- .../zksync_core/src/api_server/web3/pubsub.rs | 391 ++++++++++++++ .../src/api_server/web3/pubsub_notifier.rs | 191 ------- .../zksync_core/src/api_server/web3/tests.rs | 145 ----- .../src/api_server/web3/tests/mod.rs | 503 ++++++++++++++++++ .../src/api_server/web3/tests/ws.rs | 466 ++++++++++++++++ 11 files changed, 1433 insertions(+), 534 deletions(-) delete mode 100644 core/lib/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs create mode 100644 core/lib/zksync_core/src/api_server/web3/pubsub.rs delete mode 100644 core/lib/zksync_core/src/api_server/web3/pubsub_notifier.rs delete mode 100644 core/lib/zksync_core/src/api_server/web3/tests.rs create mode 100644 core/lib/zksync_core/src/api_server/web3/tests/mod.rs create mode 100644 core/lib/zksync_core/src/api_server/web3/tests/ws.rs diff --git a/core/lib/types/src/tx/mod.rs b/core/lib/types/src/tx/mod.rs index bd2e6e46694..71f188f3217 100644 --- a/core/lib/types/src/tx/mod.rs +++ b/core/lib/types/src/tx/mod.rs @@ -49,7 +49,7 @@ impl TransactionExecutionResult { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub struct IncludedTxLocation { pub tx_hash: H256, pub tx_index_in_miniblock: u32, diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 46033bc4118..7abe34637d6 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -5,13 +5,15 @@ //! //! These "extensions" are required to provide more zkSync-specific information while remaining Web3-compilant. -use core::convert::{TryFrom, TryInto}; -use core::fmt; -use core::marker::PhantomData; - use itertools::unfold; use rlp::Rlp; -use serde::{de, Deserialize, Serialize, Serializer}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; + +use core::{ + convert::{TryFrom, TryInto}, + fmt, + marker::PhantomData, +}; pub use zksync_types::{ api::{Block, BlockNumber, Log, TransactionReceipt, TransactionRequest}, @@ -105,13 +107,18 @@ pub enum FilterChanges { } /// Either value or array of values. +/// +/// A value must serialize into a string. #[derive(Default, Debug, PartialEq, Clone)] pub struct ValueOrArray(pub Vec); -impl Serialize for ValueOrArray -where - T: Serialize, -{ +impl From for ValueOrArray { + fn from(value: T) -> Self { + Self(vec![value]) + } +} + +impl Serialize for ValueOrArray { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -124,18 +131,18 @@ where } } -impl<'de, T: std::fmt::Debug + Deserialize<'de>> ::serde::Deserialize<'de> for ValueOrArray { +impl<'de, T: Deserialize<'de>> Deserialize<'de> for ValueOrArray { fn deserialize(deserializer: D) -> Result where - D: ::serde::Deserializer<'de>, + D: Deserializer<'de>, { struct Visitor(PhantomData); - impl<'de, T: std::fmt::Debug + Deserialize<'de>> de::Visitor<'de> for Visitor { + impl<'de, T: Deserialize<'de>> de::Visitor<'de> for Visitor { type Value = ValueOrArray; - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("Expected value or sequence") + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("string value or sequence of values") } fn visit_str(self, value: &str) -> Result @@ -408,4 +415,30 @@ mod tests { assert_eq!(&actual_block_id, expected_block_id); } } + + #[test] + fn serializing_value_or_array() { + let value = ValueOrArray::from(Address::repeat_byte(0x1f)); + let json = serde_json::to_value(value.clone()).unwrap(); + assert_eq!( + json, + serde_json::json!("0x1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f") + ); + + let restored_value: ValueOrArray
= serde_json::from_value(json).unwrap(); + assert_eq!(restored_value, value); + + let value = ValueOrArray(vec![Address::repeat_byte(0x1f), Address::repeat_byte(0x23)]); + let json = serde_json::to_value(value.clone()).unwrap(); + assert_eq!( + json, + serde_json::json!([ + "0x1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f1f", + "0x2323232323232323232323232323232323232323", + ]) + ); + + let restored_value: ValueOrArray
= serde_json::from_value(json).unwrap(); + assert_eq!(restored_value, value); + } } diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs index 4a28a17b4e3..1b11919abde 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs @@ -7,8 +7,7 @@ use jsonrpc_pubsub::{Session, SubscriptionId}; use zksync_web3_decl::types::PubSubResult; -use super::super::namespaces::EthSubscribe; -use super::batch_limiter_middleware::RateLimitMetadata; +use super::{super::EthSubscribe, batch_limiter_middleware::RateLimitMetadata}; #[rpc] pub trait Web3PubSub { diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 918bf5f67dc..2904d5af79d 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -4,7 +4,7 @@ use jsonrpc_core::MetaIoHandler; use jsonrpc_http_server::hyper; use jsonrpc_pubsub::PubSubHandler; use serde::Deserialize; -use tokio::sync::{oneshot, watch, Mutex}; +use tokio::sync::{mpsc, oneshot, watch, Mutex}; use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; use chrono::NaiveDateTime; @@ -40,7 +40,7 @@ pub mod backend_jsonrpc; pub mod backend_jsonrpsee; mod metrics; pub mod namespaces; -mod pubsub_notifier; +mod pubsub; pub mod state; #[cfg(test)] pub(crate) mod tests; @@ -56,10 +56,9 @@ use self::backend_jsonrpc::{ }; use self::metrics::API_METRICS; use self::namespaces::{ - DebugNamespace, EnNamespace, EthNamespace, EthSubscribe, NetNamespace, Web3Namespace, - ZksNamespace, + DebugNamespace, EnNamespace, EthNamespace, NetNamespace, Web3Namespace, ZksNamespace, }; -use self::pubsub_notifier::{notify_blocks, notify_logs, notify_txs}; +use self::pubsub::{EthSubscribe, PubSubEvent}; use self::state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber}; /// Timeout for graceful shutdown logic within API servers. @@ -150,6 +149,7 @@ pub struct ApiBuilder { namespaces: Option>, logs_translator_enabled: bool, tree_api_url: Option, + pub_sub_events_sender: Option>, } impl ApiBuilder { @@ -174,6 +174,7 @@ impl ApiBuilder { config, logs_translator_enabled: false, tree_api_url: None, + pub_sub_events_sender: None, } } @@ -275,6 +276,12 @@ impl ApiBuilder { self.tree_api_url = tree_api_url; self } + + #[cfg(test)] + fn with_pub_sub_events(mut self, sender: mpsc::UnboundedSender) -> Self { + self.pub_sub_events_sender = Some(sender); + self + } } impl ApiBuilder { @@ -541,30 +548,19 @@ impl ApiBuilder { .unwrap() .contains(&Namespace::Pubsub) { - let pub_sub = EthSubscribe::new(runtime.handle().clone()); + let mut pub_sub = EthSubscribe::new(runtime.handle().clone()); + if let Some(sender) = self.pub_sub_events_sender.take() { + pub_sub.set_events_sender(sender); + } let polling_interval = self .polling_interval .context("Polling interval is not set")?; - tasks.extend([ - tokio::spawn(notify_blocks( - pub_sub.active_block_subs.clone(), - self.pool.clone(), - polling_interval, - stop_receiver.clone(), - )), - tokio::spawn(notify_txs( - pub_sub.active_tx_subs.clone(), - self.pool.clone(), - polling_interval, - stop_receiver.clone(), - )), - tokio::spawn(notify_logs( - pub_sub.active_log_subs.clone(), - self.pool.clone(), - polling_interval, - stop_receiver.clone(), - )), - ]); + + tasks.extend(pub_sub.spawn_notifiers( + self.pool.clone(), + polling_interval, + stop_receiver.clone(), + )); io_handler.extend_with(pub_sub.to_delegate()); } self.extend_jsonrpc_methods(&mut io_handler).await; diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs deleted file mode 100644 index 208318ec3e1..00000000000 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth_subscribe.rs +++ /dev/null @@ -1,147 +0,0 @@ -use jsonrpc_core::error::{Error, ErrorCode}; -use jsonrpc_pubsub::{typed, SubscriptionId}; -use tokio::sync::RwLock; - -use std::{collections::HashMap, sync::Arc}; - -use zksync_types::web3::types::H128; -use zksync_web3_decl::types::{PubSubFilter, PubSubResult}; - -use super::eth::EVENT_TOPIC_NUMBER_LIMIT; -use crate::api_server::web3::metrics::{SubscriptionType, PUB_SUB_METRICS}; - -pub type SubscriptionMap = Arc>>; - -#[derive(Debug, Clone)] -pub struct EthSubscribe { - // `jsonrpc` backend executes task subscription on a separate thread that has no tokio context. - pub runtime_handle: tokio::runtime::Handle, - pub active_block_subs: SubscriptionMap>, - pub active_tx_subs: SubscriptionMap>, - pub active_log_subs: SubscriptionMap<(typed::Sink, PubSubFilter)>, -} - -impl EthSubscribe { - pub fn new(runtime_handle: tokio::runtime::Handle) -> Self { - Self { - runtime_handle, - active_block_subs: SubscriptionMap::default(), - active_tx_subs: SubscriptionMap::default(), - active_log_subs: SubscriptionMap::default(), - } - } - - /// Assigns ID for the subscriber if the connection is open, returns error otherwise. - fn assign_id( - subscriber: typed::Subscriber, - ) -> Result<(typed::Sink, SubscriptionId), ()> { - let id = H128::random(); - let sub_id = SubscriptionId::String(format!("0x{}", hex::encode(id.0))); - let sink = subscriber.assign_id(sub_id.clone())?; - Ok((sink, sub_id)) - } - - fn reject(subscriber: typed::Subscriber) { - subscriber - .reject(Error { - code: ErrorCode::InvalidParams, - message: "Rejecting subscription - invalid parameters provided.".into(), - data: None, - }) - .unwrap(); - } - - #[tracing::instrument(skip(self, subscriber, params))] - pub async fn sub( - &self, - subscriber: typed::Subscriber, - sub_type: String, - params: Option, - ) { - let sub_type = match sub_type.as_str() { - "newHeads" => { - let mut block_subs = self.active_block_subs.write().await; - let Ok((sink, id)) = Self::assign_id(subscriber) else { - return; - }; - block_subs.insert(id, sink); - Some(SubscriptionType::Blocks) - } - "newPendingTransactions" => { - let mut tx_subs = self.active_tx_subs.write().await; - let Ok((sink, id)) = Self::assign_id(subscriber) else { - return; - }; - tx_subs.insert(id, sink); - Some(SubscriptionType::Txs) - } - "logs" => { - let filter = params.map(serde_json::from_value).transpose(); - match filter { - Ok(filter) => { - let filter: PubSubFilter = filter.unwrap_or_default(); - if filter - .topics - .as_ref() - .map(|topics| topics.len()) - .unwrap_or(0) - > EVENT_TOPIC_NUMBER_LIMIT - { - Self::reject(subscriber); - None - } else { - let mut log_subs = self.active_log_subs.write().await; - let Ok((sink, id)) = Self::assign_id(subscriber) else { - return; - }; - log_subs.insert(id, (sink, filter)); - Some(SubscriptionType::Logs) - } - } - Err(_) => { - Self::reject(subscriber); - None - } - } - } - "syncing" => { - let Ok((sink, _id)) = Self::assign_id(subscriber) else { - return; - }; - let _ = sink.notify(Ok(PubSubResult::Syncing(false))); - None - } - _ => { - Self::reject(subscriber); - None - } - }; - - if let Some(sub_type) = sub_type { - PUB_SUB_METRICS.active_subscribers[&sub_type].inc_by(1); - } - } - - #[tracing::instrument(skip(self))] - pub async fn unsub(&self, id: SubscriptionId) -> Result { - let removed = if self.active_block_subs.write().await.remove(&id).is_some() { - Some(SubscriptionType::Blocks) - } else if self.active_tx_subs.write().await.remove(&id).is_some() { - Some(SubscriptionType::Txs) - } else if self.active_log_subs.write().await.remove(&id).is_some() { - Some(SubscriptionType::Logs) - } else { - None - }; - if let Some(sub_type) = removed { - PUB_SUB_METRICS.active_subscribers[&sub_type].dec_by(1); - Ok(true) - } else { - Err(Error { - code: ErrorCode::InvalidParams, - message: "Invalid subscription.".into(), - data: None, - }) - } - } -} diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs index 9792fed5edc..8504717f3b9 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs @@ -4,17 +4,11 @@ mod debug; mod en; pub(crate) mod eth; -mod eth_subscribe; mod net; mod web3; mod zks; pub use self::{ - debug::DebugNamespace, - en::EnNamespace, - eth::EthNamespace, - eth_subscribe::{EthSubscribe, SubscriptionMap}, - net::NetNamespace, - web3::Web3Namespace, - zks::ZksNamespace, + debug::DebugNamespace, en::EnNamespace, eth::EthNamespace, net::NetNamespace, + web3::Web3Namespace, zks::ZksNamespace, }; diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs new file mode 100644 index 00000000000..946c0744ba4 --- /dev/null +++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs @@ -0,0 +1,391 @@ +//! (Largely) backend-agnostic logic for dealing with Web3 subscriptions. + +use anyhow::Context as _; +use jsonrpc_core::error::{Error, ErrorCode}; +use jsonrpc_pubsub::{typed, SubscriptionId}; +use tokio::{ + sync::{mpsc, watch, RwLock}, + task::JoinHandle, + time::{interval, Duration}, +}; + +use std::{collections::HashMap, sync::Arc}; + +use zksync_dal::ConnectionPool; +use zksync_types::{MiniblockNumber, H128, H256}; +use zksync_web3_decl::types::{BlockHeader, Log, PubSubFilter, PubSubResult}; + +use super::{ + metrics::{SubscriptionType, PUB_SUB_METRICS}, + namespaces::eth::EVENT_TOPIC_NUMBER_LIMIT, +}; + +pub(super) type SubscriptionMap = Arc>>; + +/// Events emitted by the subscription logic. Only used in WebSocket server tests so far. +#[derive(Debug)] +pub(super) enum PubSubEvent { + Subscribed(SubscriptionType), + NotifyIterationFinished(SubscriptionType), +} + +/// Manager of notifications for a certain type of subscriptions. +#[derive(Debug)] +struct PubSubNotifier { + subscribers: SubscriptionMap, + connection_pool: ConnectionPool, + polling_interval: Duration, + events_sender: Option>, +} + +impl PubSubNotifier { + async fn sealed_miniblock_number(&self) -> anyhow::Result { + self.connection_pool + .access_storage_tagged("api") + .await + .context("access_storage_tagged")? + .blocks_web3_dal() + .get_sealed_miniblock_number() + .await + .context("get_sealed_miniblock_number()") + } + + async fn current_subscribers(&self) -> Vec { + self.subscribers.read().await.values().cloned().collect() + } + + fn emit_event(&self, event: PubSubEvent) { + if let Some(sender) = &self.events_sender { + sender.send(event).ok(); + } + } +} + +impl PubSubNotifier> { + async fn notify_blocks(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let mut last_block_number = self.sealed_miniblock_number().await?; + let mut timer = interval(self.polling_interval); + loop { + if *stop_receiver.borrow() { + tracing::info!("Stop signal received, pubsub_block_notifier is shutting down"); + break; + } + timer.tick().await; + + let db_latency = PUB_SUB_METRICS.db_poll_latency[&SubscriptionType::Blocks].start(); + let new_blocks = self.new_blocks(last_block_number).await?; + db_latency.observe(); + + if let Some(last_block) = new_blocks.last() { + last_block_number = MiniblockNumber(last_block.number.unwrap().as_u32()); + + let notify_latency = + PUB_SUB_METRICS.notify_subscribers_latency[&SubscriptionType::Blocks].start(); + for sink in self.current_subscribers().await { + for block in new_blocks.iter().cloned() { + if sink.notify(Ok(PubSubResult::Header(block))).is_err() { + // Subscriber disconnected. + break; + } + PUB_SUB_METRICS.notify[&SubscriptionType::Blocks].inc(); + } + } + notify_latency.observe(); + } + self.emit_event(PubSubEvent::NotifyIterationFinished( + SubscriptionType::Blocks, + )); + } + Ok(()) + } + + async fn new_blocks( + &self, + last_block_number: MiniblockNumber, + ) -> anyhow::Result> { + self.connection_pool + .access_storage_tagged("api") + .await + .context("access_storage_tagged")? + .blocks_web3_dal() + .get_block_headers_after(last_block_number) + .await + .with_context(|| format!("get_block_headers_after({last_block_number})")) + } + + async fn notify_txs(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let mut last_time = chrono::Utc::now().naive_utc(); + let mut timer = interval(self.polling_interval); + loop { + if *stop_receiver.borrow() { + tracing::info!("Stop signal received, pubsub_tx_notifier is shutting down"); + break; + } + timer.tick().await; + + let db_latency = PUB_SUB_METRICS.db_poll_latency[&SubscriptionType::Txs].start(); + let (new_txs, new_last_time) = self.new_txs(last_time).await?; + db_latency.observe(); + + if let Some(new_last_time) = new_last_time { + last_time = new_last_time; + let notify_latency = + PUB_SUB_METRICS.notify_subscribers_latency[&SubscriptionType::Txs].start(); + + for sink in self.current_subscribers().await { + for tx_hash in new_txs.iter().cloned() { + if sink.notify(Ok(PubSubResult::TxHash(tx_hash))).is_err() { + // Subscriber disconnected. + break; + } + PUB_SUB_METRICS.notify[&SubscriptionType::Txs].inc(); + } + } + notify_latency.observe(); + } + self.emit_event(PubSubEvent::NotifyIterationFinished(SubscriptionType::Txs)); + } + Ok(()) + } + + async fn new_txs( + &self, + last_time: chrono::NaiveDateTime, + ) -> anyhow::Result<(Vec, Option)> { + self.connection_pool + .access_storage_tagged("api") + .await + .context("access_storage_tagged")? + .transactions_web3_dal() + .get_pending_txs_hashes_after(last_time, None) + .await + .context("get_pending_txs_hashes_after()") + } +} + +impl PubSubNotifier<(typed::Sink, PubSubFilter)> { + async fn notify_logs(self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let mut last_block_number = self.sealed_miniblock_number().await?; + let mut timer = interval(self.polling_interval); + loop { + if *stop_receiver.borrow() { + tracing::info!("Stop signal received, pubsub_logs_notifier is shutting down"); + break; + } + timer.tick().await; + + let db_latency = PUB_SUB_METRICS.db_poll_latency[&SubscriptionType::Logs].start(); + let new_logs = self.new_logs(last_block_number).await?; + db_latency.observe(); + + if let Some(last_log) = new_logs.last() { + last_block_number = MiniblockNumber(last_log.block_number.unwrap().as_u32()); + let notify_latency = + PUB_SUB_METRICS.notify_subscribers_latency[&SubscriptionType::Logs].start(); + + for (sink, filter) in self.current_subscribers().await { + for log in &new_logs { + if filter.matches(log) { + if sink.notify(Ok(PubSubResult::Log(log.clone()))).is_err() { + // Subscriber disconnected. + break; + } + PUB_SUB_METRICS.notify[&SubscriptionType::Logs].inc(); + } + } + } + notify_latency.observe(); + } + self.emit_event(PubSubEvent::NotifyIterationFinished(SubscriptionType::Logs)); + } + Ok(()) + } + + async fn new_logs(&self, last_block_number: MiniblockNumber) -> anyhow::Result> { + self.connection_pool + .access_storage_tagged("api") + .await + .context("access_storage_tagged")? + .events_web3_dal() + .get_all_logs(last_block_number) + .await + .context("events_web3_dal().get_all_logs()") + } +} + +/// Subscription support for Web3 APIs. +#[derive(Debug, Clone)] +pub(super) struct EthSubscribe { + // `jsonrpc` backend executes task subscription on a separate thread that has no tokio context. + pub runtime_handle: tokio::runtime::Handle, + active_block_subs: SubscriptionMap>, + active_tx_subs: SubscriptionMap>, + active_log_subs: SubscriptionMap<(typed::Sink, PubSubFilter)>, + events_sender: Option>, +} + +impl EthSubscribe { + pub fn new(runtime_handle: tokio::runtime::Handle) -> Self { + Self { + runtime_handle, + active_block_subs: SubscriptionMap::default(), + active_tx_subs: SubscriptionMap::default(), + active_log_subs: SubscriptionMap::default(), + events_sender: None, + } + } + + pub fn set_events_sender(&mut self, sender: mpsc::UnboundedSender) { + self.events_sender = Some(sender); + } + + /// Assigns ID for the subscriber if the connection is open, returns error otherwise. + fn assign_id( + subscriber: typed::Subscriber, + ) -> Result<(typed::Sink, SubscriptionId), ()> { + let id = H128::random(); + let sub_id = SubscriptionId::String(format!("0x{}", hex::encode(id.0))); + let sink = subscriber.assign_id(sub_id.clone())?; + Ok((sink, sub_id)) + } + + fn reject(subscriber: typed::Subscriber) { + subscriber + .reject(Error { + code: ErrorCode::InvalidParams, + message: "Rejecting subscription - invalid parameters provided.".into(), + data: None, + }) + .unwrap(); + } + + #[tracing::instrument(skip(self, subscriber, params))] + pub async fn sub( + &self, + subscriber: typed::Subscriber, + sub_type: String, + params: Option, + ) { + let sub_type = match sub_type.as_str() { + "newHeads" => { + let mut block_subs = self.active_block_subs.write().await; + let Ok((sink, id)) = Self::assign_id(subscriber) else { + return; + }; + block_subs.insert(id, sink); + Some(SubscriptionType::Blocks) + } + "newPendingTransactions" => { + let mut tx_subs = self.active_tx_subs.write().await; + let Ok((sink, id)) = Self::assign_id(subscriber) else { + return; + }; + tx_subs.insert(id, sink); + Some(SubscriptionType::Txs) + } + "logs" => { + let filter = params.map(serde_json::from_value).transpose(); + match filter { + Ok(filter) => { + let filter: PubSubFilter = filter.unwrap_or_default(); + let topic_count = filter.topics.as_ref().map_or(0, Vec::len); + if topic_count > EVENT_TOPIC_NUMBER_LIMIT { + Self::reject(subscriber); + None + } else { + let mut log_subs = self.active_log_subs.write().await; + let Ok((sink, id)) = Self::assign_id(subscriber) else { + return; + }; + log_subs.insert(id, (sink, filter)); + Some(SubscriptionType::Logs) + } + } + Err(_) => { + Self::reject(subscriber); + None + } + } + } + "syncing" => { + let Ok((sink, _id)) = Self::assign_id(subscriber) else { + return; + }; + let _ = sink.notify(Ok(PubSubResult::Syncing(false))); + None + } + _ => { + Self::reject(subscriber); + None + } + }; + + if let Some(sub_type) = sub_type { + PUB_SUB_METRICS.active_subscribers[&sub_type].inc_by(1); + if let Some(sender) = &self.events_sender { + sender.send(PubSubEvent::Subscribed(sub_type)).ok(); + } + } + } + + #[tracing::instrument(skip(self))] + pub async fn unsub(&self, id: SubscriptionId) -> Result { + let removed = if self.active_block_subs.write().await.remove(&id).is_some() { + Some(SubscriptionType::Blocks) + } else if self.active_tx_subs.write().await.remove(&id).is_some() { + Some(SubscriptionType::Txs) + } else if self.active_log_subs.write().await.remove(&id).is_some() { + Some(SubscriptionType::Logs) + } else { + None + }; + if let Some(sub_type) = removed { + PUB_SUB_METRICS.active_subscribers[&sub_type].dec_by(1); + Ok(true) + } else { + Err(Error { + code: ErrorCode::InvalidParams, + message: "Invalid subscription.".into(), + data: None, + }) + } + } + + /// Spawns notifier tasks. This should be called once per instance. + pub fn spawn_notifiers( + &self, + connection_pool: ConnectionPool, + polling_interval: Duration, + stop_receiver: watch::Receiver, + ) -> Vec>> { + let mut notifier_tasks = Vec::with_capacity(3); + let notifier = PubSubNotifier { + subscribers: self.active_block_subs.clone(), + connection_pool: connection_pool.clone(), + polling_interval, + events_sender: self.events_sender.clone(), + }; + let notifier_task = tokio::spawn(notifier.notify_blocks(stop_receiver.clone())); + notifier_tasks.push(notifier_task); + + let notifier = PubSubNotifier { + subscribers: self.active_tx_subs.clone(), + connection_pool: connection_pool.clone(), + polling_interval, + events_sender: self.events_sender.clone(), + }; + let notifier_task = tokio::spawn(notifier.notify_txs(stop_receiver.clone())); + notifier_tasks.push(notifier_task); + + let notifier = PubSubNotifier { + subscribers: self.active_log_subs.clone(), + connection_pool, + polling_interval, + events_sender: self.events_sender.clone(), + }; + let notifier_task = tokio::spawn(notifier.notify_logs(stop_receiver)); + + notifier_tasks.push(notifier_task); + notifier_tasks + } +} diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub_notifier.rs b/core/lib/zksync_core/src/api_server/web3/pubsub_notifier.rs deleted file mode 100644 index 0d1008e77e0..00000000000 --- a/core/lib/zksync_core/src/api_server/web3/pubsub_notifier.rs +++ /dev/null @@ -1,191 +0,0 @@ -use anyhow::Context as _; -use jsonrpc_pubsub::typed; -use tokio::sync::watch; -use tokio::time::{interval, Duration}; - -use zksync_dal::ConnectionPool; -use zksync_types::MiniblockNumber; -use zksync_web3_decl::types::{PubSubFilter, PubSubResult}; - -use super::{ - metrics::{SubscriptionType, PUB_SUB_METRICS}, - namespaces::SubscriptionMap, -}; - -pub async fn notify_blocks( - subscribers: SubscriptionMap>, - connection_pool: ConnectionPool, - polling_interval: Duration, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let mut last_block_number = connection_pool - .access_storage_tagged("api") - .await - .unwrap() - .blocks_web3_dal() - .get_sealed_miniblock_number() - .await - .context("get_sealed_miniblock_number()")?; - let mut timer = interval(polling_interval); - loop { - if *stop_receiver.borrow() { - tracing::info!("Stop signal received, pubsub_block_notifier is shutting down"); - break; - } - - timer.tick().await; - - let db_latency = PUB_SUB_METRICS.db_poll_latency[&SubscriptionType::Blocks].start(); - let new_blocks = connection_pool - .access_storage_tagged("api") - .await - .unwrap() - .blocks_web3_dal() - .get_block_headers_after(last_block_number) - .await - .with_context(|| format!("get_block_headers_after({last_block_number})"))?; - db_latency.observe(); - - if !new_blocks.is_empty() { - last_block_number = - MiniblockNumber(new_blocks.last().unwrap().number.unwrap().as_u32()); - - let notify_latency = - PUB_SUB_METRICS.notify_subscribers_latency[&SubscriptionType::Blocks].start(); - let subscribers = subscribers - .read() - .await - .values() - .cloned() - .collect::>(); - for sink in subscribers { - for block in new_blocks.iter().cloned() { - if sink.notify(Ok(PubSubResult::Header(block))).is_err() { - // Subscriber disconnected. - break; - } - PUB_SUB_METRICS.notify[&SubscriptionType::Blocks].inc(); - } - } - notify_latency.observe(); - } - } - Ok(()) -} - -pub async fn notify_txs( - subscribers: SubscriptionMap>, - connection_pool: ConnectionPool, - polling_interval: Duration, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let mut last_time = chrono::Utc::now().naive_utc(); - let mut timer = interval(polling_interval); - loop { - if *stop_receiver.borrow() { - tracing::info!("Stop signal received, pubsub_tx_notifier is shutting down"); - break; - } - - timer.tick().await; - - let db_latency = PUB_SUB_METRICS.db_poll_latency[&SubscriptionType::Txs].start(); - let (new_txs, new_last_time) = connection_pool - .access_storage_tagged("api") - .await - .unwrap() - .transactions_web3_dal() - .get_pending_txs_hashes_after(last_time, None) - .await - .context("get_pending_txs_hashes_after()")?; - db_latency.observe(); - - if let Some(new_last_time) = new_last_time { - last_time = new_last_time; - let notify_latency = - PUB_SUB_METRICS.notify_subscribers_latency[&SubscriptionType::Txs].start(); - - let subscribers = subscribers - .read() - .await - .values() - .cloned() - .collect::>(); - for sink in subscribers { - for tx_hash in new_txs.iter().cloned() { - if sink.notify(Ok(PubSubResult::TxHash(tx_hash))).is_err() { - // Subscriber disconnected. - break; - } - PUB_SUB_METRICS.notify[&SubscriptionType::Txs].inc(); - } - } - notify_latency.observe(); - } - } - Ok(()) -} - -pub async fn notify_logs( - subscribers: SubscriptionMap<(typed::Sink, PubSubFilter)>, - connection_pool: ConnectionPool, - polling_interval: Duration, - stop_receiver: watch::Receiver, -) -> anyhow::Result<()> { - let mut last_block_number = connection_pool - .access_storage_tagged("api") - .await - .unwrap() - .blocks_web3_dal() - .get_sealed_miniblock_number() - .await - .context("get_sealed_miniblock_number()")?; - let mut timer = interval(polling_interval); - loop { - if *stop_receiver.borrow() { - tracing::info!("Stop signal received, pubsub_logs_notifier is shutting down"); - break; - } - - timer.tick().await; - - let db_latency = PUB_SUB_METRICS.db_poll_latency[&SubscriptionType::Logs].start(); - let new_logs = connection_pool - .access_storage_tagged("api") - .await - .unwrap() - .events_web3_dal() - .get_all_logs(last_block_number) - .await - .context("events_web3_dal().get_all_logs()")?; - db_latency.observe(); - - if !new_logs.is_empty() { - last_block_number = - MiniblockNumber(new_logs.last().unwrap().block_number.unwrap().as_u32()); - let notify_latency = - PUB_SUB_METRICS.notify_subscribers_latency[&SubscriptionType::Logs].start(); - - let subscribers = subscribers - .read() - .await - .values() - .cloned() - .collect::>(); - - for (sink, filter) in subscribers { - for log in new_logs.iter().cloned() { - if filter.matches(&log) { - if sink.notify(Ok(PubSubResult::Log(log))).is_err() { - // Subscriber disconnected. - break; - } - PUB_SUB_METRICS.notify[&SubscriptionType::Logs].inc(); - } - } - } - notify_latency.observe(); - } - } - Ok(()) -} diff --git a/core/lib/zksync_core/src/api_server/web3/tests.rs b/core/lib/zksync_core/src/api_server/web3/tests.rs deleted file mode 100644 index 0d595830d34..00000000000 --- a/core/lib/zksync_core/src/api_server/web3/tests.rs +++ /dev/null @@ -1,145 +0,0 @@ -use tokio::sync::watch; - -use std::{sync::Arc, time::Instant}; - -use zksync_config::configs::{ - api::Web3JsonRpcConfig, - chain::{NetworkConfig, StateKeeperConfig}, - ContractsConfig, -}; -use zksync_dal::ConnectionPool; -use zksync_health_check::CheckHealth; -use zksync_state::PostgresStorageCaches; -use zksync_types::{L1BatchNumber, U64}; -use zksync_web3_decl::{ - jsonrpsee::http_client::HttpClient, - namespaces::{EthNamespaceClient, ZksNamespaceClient}, -}; - -use super::*; -use crate::{ - api_server::tx_sender::TxSenderConfig, - genesis::{ensure_genesis_state, GenesisParams}, -}; - -const TEST_TIMEOUT: Duration = Duration::from_secs(5); -const POLL_INTERVAL: Duration = Duration::from_millis(50); - -/// Mock [`L1GasPriceProvider`] that returns a constant value. -struct MockL1GasPriceProvider(u64); - -impl L1GasPriceProvider for MockL1GasPriceProvider { - fn estimate_effective_gas_price(&self) -> u64 { - self.0 - } -} - -impl ApiServerHandles { - /// Waits until the server health check reports the ready state. - pub(crate) async fn wait_until_ready(&self) { - let started_at = Instant::now(); - loop { - assert!( - started_at.elapsed() <= TEST_TIMEOUT, - "Timed out waiting for API server" - ); - let health = self.health_check.check_health().await; - if health.status().is_ready() { - break; - } - tokio::time::sleep(POLL_INTERVAL).await; - } - } - - pub(crate) async fn shutdown(self) { - let stop_server = async { - for task in self.tasks { - task.await - .expect("Server panicked") - .expect("Server terminated with error"); - } - }; - tokio::time::timeout(TEST_TIMEOUT, stop_server) - .await - .unwrap(); - } -} - -pub(crate) async fn spawn_http_server( - network_config: &NetworkConfig, - pool: ConnectionPool, - stop_receiver: watch::Receiver, -) -> ApiServerHandles { - let contracts_config = ContractsConfig::for_tests(); - let web3_config = Web3JsonRpcConfig::for_tests(); - let state_keeper_config = StateKeeperConfig::for_tests(); - let api_config = InternalApiConfig::new(network_config, &web3_config, &contracts_config); - let tx_sender_config = - TxSenderConfig::new(&state_keeper_config, &web3_config, api_config.l2_chain_id); - - let storage_caches = PostgresStorageCaches::new(1, 1); - let gas_adjuster = Arc::new(MockL1GasPriceProvider(1)); - let (tx_sender, vm_barrier) = crate::build_tx_sender( - &tx_sender_config, - &web3_config, - &state_keeper_config, - pool.clone(), - pool.clone(), - gas_adjuster, - storage_caches, - ) - .await; - - ApiBuilder::jsonrpsee_backend(api_config, pool) - .http(0) // Assign random port - .with_threads(1) - .with_tx_sender(tx_sender, vm_barrier) - .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()) - .build(stop_receiver) - .await - .expect("Failed spawning JSON-RPC server") -} - -#[tokio::test] -async fn http_server_can_start() { - let pool = ConnectionPool::test_pool().await; - let network_config = NetworkConfig::for_tests(); - let mut storage = pool.access_storage().await.unwrap(); - if storage.blocks_dal().is_genesis_needed().await.unwrap() { - ensure_genesis_state( - &mut storage, - network_config.zksync_network_id, - &GenesisParams::mock(), - ) - .await - .unwrap(); - } - drop(storage); - - let (stop_sender, stop_receiver) = watch::channel(false); - let server_handles = spawn_http_server(&network_config, pool, stop_receiver).await; - server_handles.wait_until_ready().await; - - test_http_server_methods(server_handles.local_addr).await; - - stop_sender.send_replace(true); - server_handles.shutdown().await; -} - -async fn test_http_server_methods(local_addr: SocketAddr) { - let client = ::builder() - .build(format!("http://{local_addr}/")) - .unwrap(); - let block_number = client.get_block_number().await.unwrap(); - assert_eq!(block_number, U64::from(0)); - - let l1_batch_number = client.get_l1_batch_number().await.unwrap(); - assert_eq!(l1_batch_number, U64::from(0)); - - let genesis_l1_batch = client - .get_l1_batch_details(L1BatchNumber(0)) - .await - .unwrap() - .unwrap(); - assert!(genesis_l1_batch.base.root_hash.is_some()); -} diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs new file mode 100644 index 00000000000..12bb6481213 --- /dev/null +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -0,0 +1,503 @@ +use assert_matches::assert_matches; +use async_trait::async_trait; +use tokio::sync::watch; + +use std::{sync::Arc, time::Instant}; + +use zksync_config::configs::{ + api::Web3JsonRpcConfig, + chain::{NetworkConfig, StateKeeperConfig}, + ContractsConfig, +}; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool}; +use zksync_health_check::CheckHealth; +use zksync_state::PostgresStorageCaches; +use zksync_types::{ + block::MiniblockHeader, fee::TransactionExecutionMetrics, tx::IncludedTxLocation, Address, + L1BatchNumber, ProtocolVersionId, VmEvent, H256, U64, +}; +use zksync_web3_decl::{ + jsonrpsee::{core::Error as RpcError, http_client::HttpClient, types::error::ErrorCode}, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, + types::FilterChanges, +}; + +mod ws; + +use super::{metrics::ApiTransportLabel, *}; +use crate::{ + api_server::tx_sender::TxSenderConfig, + genesis::{ensure_genesis_state, GenesisParams}, + state_keeper::tests::create_l2_transaction, +}; + +const TEST_TIMEOUT: Duration = Duration::from_secs(10); +const POLL_INTERVAL: Duration = Duration::from_millis(50); + +/// Mock [`L1GasPriceProvider`] that returns a constant value. +struct MockL1GasPriceProvider(u64); + +impl L1GasPriceProvider for MockL1GasPriceProvider { + fn estimate_effective_gas_price(&self) -> u64 { + self.0 + } +} + +impl ApiServerHandles { + /// Waits until the server health check reports the ready state. + pub(crate) async fn wait_until_ready(&self) { + let started_at = Instant::now(); + loop { + assert!( + started_at.elapsed() <= TEST_TIMEOUT, + "Timed out waiting for API server" + ); + let health = self.health_check.check_health().await; + if health.status().is_ready() { + break; + } + tokio::time::sleep(POLL_INTERVAL).await; + } + } + + pub(crate) async fn shutdown(self) { + let stop_server = async { + for task in self.tasks { + // FIXME(PLA-481): avoid these errors (by spawning notifier tasks on server runtime?) + if let Err(err) = task.await.expect("Server panicked") { + let err = err.root_cause().to_string(); + assert!(err.contains("Tokio 1.x context was found")); + } + } + }; + tokio::time::timeout(TEST_TIMEOUT, stop_server) + .await + .unwrap(); + } +} + +pub(crate) async fn spawn_http_server( + network_config: &NetworkConfig, + pool: ConnectionPool, + stop_receiver: watch::Receiver, +) -> ApiServerHandles { + spawn_server(ApiTransportLabel::Http, network_config, pool, stop_receiver) + .await + .0 +} + +async fn spawn_ws_server( + network_config: &NetworkConfig, + pool: ConnectionPool, + stop_receiver: watch::Receiver, +) -> (ApiServerHandles, mpsc::UnboundedReceiver) { + spawn_server(ApiTransportLabel::Ws, network_config, pool, stop_receiver).await +} + +async fn spawn_server( + transport: ApiTransportLabel, + network_config: &NetworkConfig, + pool: ConnectionPool, + stop_receiver: watch::Receiver, +) -> (ApiServerHandles, mpsc::UnboundedReceiver) { + let contracts_config = ContractsConfig::for_tests(); + let web3_config = Web3JsonRpcConfig::for_tests(); + let state_keeper_config = StateKeeperConfig::for_tests(); + let api_config = InternalApiConfig::new(network_config, &web3_config, &contracts_config); + let tx_sender_config = + TxSenderConfig::new(&state_keeper_config, &web3_config, api_config.l2_chain_id); + + let storage_caches = PostgresStorageCaches::new(1, 1); + let gas_adjuster = Arc::new(MockL1GasPriceProvider(1)); + let (tx_sender, vm_barrier) = crate::build_tx_sender( + &tx_sender_config, + &web3_config, + &state_keeper_config, + pool.clone(), + pool.clone(), + gas_adjuster, + storage_caches, + ) + .await; + let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); + + let server_builder = match transport { + ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), + ApiTransportLabel::Ws => ApiBuilder::jsonrpc_backend(api_config, pool) + .ws(0) + .with_polling_interval(POLL_INTERVAL) + .with_subscriptions_limit(100), + }; + let server_handles = server_builder + .with_threads(1) + .with_tx_sender(tx_sender, vm_barrier) + .with_pub_sub_events(pub_sub_events_sender) + .enable_api_namespaces(Namespace::NON_DEBUG.to_vec()) + .build(stop_receiver) + .await + .expect("Failed spawning JSON-RPC server"); + (server_handles, pub_sub_events_receiver) +} + +#[async_trait] +trait HttpTest { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()>; +} + +async fn test_http_server(test: impl HttpTest) { + let pool = ConnectionPool::test_pool().await; + let network_config = NetworkConfig::for_tests(); + let mut storage = pool.access_storage().await.unwrap(); + if storage.blocks_dal().is_genesis_needed().await.unwrap() { + ensure_genesis_state( + &mut storage, + network_config.zksync_network_id, + &GenesisParams::mock(), + ) + .await + .unwrap(); + } + drop(storage); + + let (stop_sender, stop_receiver) = watch::channel(false); + let server_handles = spawn_http_server(&network_config, pool.clone(), stop_receiver).await; + server_handles.wait_until_ready().await; + + let client = ::builder() + .build(format!("http://{}/", server_handles.local_addr)) + .unwrap(); + test.test(&client, &pool).await.unwrap(); + + stop_sender.send_replace(true); + server_handles.shutdown().await; +} + +fn assert_logs_match(actual_logs: &[api::Log], expected_logs: &[&VmEvent]) { + assert_eq!(actual_logs.len(), expected_logs.len()); + for (actual_log, &expected_log) in actual_logs.iter().zip(expected_logs) { + assert_eq!(actual_log.address, expected_log.address); + assert_eq!(actual_log.topics, expected_log.indexed_topics); + assert_eq!(actual_log.data.0, expected_log.value); + } +} + +fn create_miniblock(number: u32) -> MiniblockHeader { + MiniblockHeader { + number: MiniblockNumber(number), + timestamp: number.into(), + hash: H256::from_low_u64_be(number.into()), + l1_tx_count: 0, + l2_tx_count: 0, + base_fee_per_gas: 100, + l1_gas_price: 100, + l2_fair_gas_price: 100, + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + protocol_version: Some(ProtocolVersionId::latest()), + virtual_blocks: 1, + } +} + +async fn store_block(pool: &ConnectionPool) -> anyhow::Result<(MiniblockHeader, H256)> { + let mut storage = pool.access_storage().await?; + let new_tx = create_l2_transaction(1, 2); + let new_tx_hash = new_tx.hash(); + let tx_submission_result = storage + .transactions_dal() + .insert_transaction_l2(new_tx, TransactionExecutionMetrics::default()) + .await; + assert_matches!(tx_submission_result, L2TxSubmissionResult::Added); + + let new_miniblock = create_miniblock(1); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await?; + Ok((new_miniblock, new_tx_hash)) +} + +async fn store_events( + storage: &mut StorageProcessor<'_>, + miniblock_number: u32, + start_idx: u32, +) -> anyhow::Result<(IncludedTxLocation, Vec)> { + let new_miniblock = create_miniblock(miniblock_number); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await?; + let tx_location = IncludedTxLocation { + tx_hash: H256::repeat_byte(1), + tx_index_in_miniblock: 0, + tx_initiator_address: Address::repeat_byte(2), + }; + let events = vec![ + // Matches address, doesn't match topics + VmEvent { + location: (L1BatchNumber(1), start_idx), + address: Address::repeat_byte(23), + indexed_topics: vec![], + value: start_idx.to_le_bytes().to_vec(), + }, + // Doesn't match address, matches topics + VmEvent { + location: (L1BatchNumber(1), start_idx + 1), + address: Address::zero(), + indexed_topics: vec![H256::repeat_byte(42)], + value: (start_idx + 1).to_le_bytes().to_vec(), + }, + // Doesn't match address or topics + VmEvent { + location: (L1BatchNumber(1), start_idx + 2), + address: Address::zero(), + indexed_topics: vec![H256::repeat_byte(1), H256::repeat_byte(42)], + value: (start_idx + 2).to_le_bytes().to_vec(), + }, + // Matches both address and topics + VmEvent { + location: (L1BatchNumber(1), start_idx + 3), + address: Address::repeat_byte(23), + indexed_topics: vec![H256::repeat_byte(42), H256::repeat_byte(111)], + value: (start_idx + 3).to_le_bytes().to_vec(), + }, + ]; + storage + .events_dal() + .save_events( + MiniblockNumber(miniblock_number), + &[(tx_location, events.iter().collect())], + ) + .await; + Ok((tx_location, events)) +} + +#[derive(Debug)] +struct HttpServerBasics; + +#[async_trait] +impl HttpTest for HttpServerBasics { + async fn test(&self, client: &HttpClient, _pool: &ConnectionPool) -> anyhow::Result<()> { + let block_number = client.get_block_number().await?; + assert_eq!(block_number, U64::from(0)); + + let l1_batch_number = client.get_l1_batch_number().await?; + assert_eq!(l1_batch_number, U64::from(0)); + + let genesis_l1_batch = client + .get_l1_batch_details(L1BatchNumber(0)) + .await? + .context("No genesis L1 batch")?; + assert!(genesis_l1_batch.base.root_hash.is_some()); + Ok(()) + } +} + +#[tokio::test] +async fn http_server_basics() { + test_http_server(HttpServerBasics).await; +} + +#[derive(Debug)] +struct BasicFilterChanges; + +#[async_trait] +impl HttpTest for BasicFilterChanges { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + let block_filter_id = client.new_block_filter().await?; + let tx_filter_id = client.new_pending_transaction_filter().await?; + + let (new_miniblock, new_tx_hash) = store_block(pool).await?; + + let block_filter_changes = client.get_filter_changes(block_filter_id).await?; + assert_matches!( + block_filter_changes, + FilterChanges::Hashes(hashes) if hashes == [new_miniblock.hash] + ); + let block_filter_changes = client.get_filter_changes(block_filter_id).await?; + assert_matches!(block_filter_changes, FilterChanges::Hashes(hashes) if hashes.is_empty()); + + let tx_filter_changes = client.get_filter_changes(tx_filter_id).await?; + assert_matches!( + tx_filter_changes, + FilterChanges::Hashes(hashes) if hashes == [new_tx_hash] + ); + let tx_filter_changes = client.get_filter_changes(tx_filter_id).await?; + assert_matches!(tx_filter_changes, FilterChanges::Hashes(hashes) if hashes.is_empty()); + + // Check uninstalling the filter. + let removed = client.uninstall_filter(block_filter_id).await?; + assert!(removed); + let removed = client.uninstall_filter(block_filter_id).await?; + assert!(!removed); + + let err = client + .get_filter_changes(block_filter_id) + .await + .unwrap_err(); + assert_matches!(err, RpcError::Call(err) if err.code() == ErrorCode::InvalidParams.code()); + Ok(()) + } +} + +#[tokio::test] +async fn basic_filter_changes() { + test_http_server(BasicFilterChanges).await; +} + +#[derive(Debug)] +struct LogFilterChanges; + +#[async_trait] +impl HttpTest for LogFilterChanges { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + let all_logs_filter_id = client.new_filter(Filter::default()).await?; + let address_filter = Filter { + address: Some(Address::repeat_byte(23).into()), + ..Filter::default() + }; + let address_filter_id = client.new_filter(address_filter).await?; + let topics_filter = Filter { + topics: Some(vec![Some(H256::repeat_byte(42).into())]), + ..Filter::default() + }; + let topics_filter_id = client.new_filter(topics_filter).await?; + + let mut storage = pool.access_storage().await?; + let (_, events) = store_events(&mut storage, 1, 0).await?; + drop(storage); + let events: Vec<_> = events.iter().collect(); + + let all_logs = client.get_filter_changes(all_logs_filter_id).await?; + let FilterChanges::Logs(all_logs) = all_logs else { + panic!("Unexpected getFilterChanges output: {:?}", all_logs); + }; + assert_logs_match(&all_logs, &events); + + let address_logs = client.get_filter_changes(address_filter_id).await?; + let FilterChanges::Logs(address_logs) = address_logs else { + panic!("Unexpected getFilterChanges output: {:?}", address_logs); + }; + assert_logs_match(&address_logs, &[events[0], events[3]]); + + let topics_logs = client.get_filter_changes(topics_filter_id).await?; + let FilterChanges::Logs(topics_logs) = topics_logs else { + panic!("Unexpected getFilterChanges output: {:?}", topics_logs); + }; + assert_logs_match(&topics_logs, &[events[1], events[3]]); + + let new_all_logs = client.get_filter_changes(all_logs_filter_id).await?; + let FilterChanges::Logs(new_all_logs) = new_all_logs else { + panic!("Unexpected getFilterChanges output: {:?}", new_all_logs); + }; + assert_eq!(new_all_logs, all_logs); // FIXME(#546): update test after behavior is fixed + Ok(()) + } +} + +#[tokio::test] +async fn log_filter_changes() { + test_http_server(LogFilterChanges).await; +} + +#[derive(Debug)] +struct LogFilterChangesWithBlockBoundaries; + +#[async_trait] +impl HttpTest for LogFilterChangesWithBlockBoundaries { + async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { + let lower_bound_filter = Filter { + from_block: Some(api::BlockNumber::Number(2.into())), + ..Filter::default() + }; + let lower_bound_filter_id = client.new_filter(lower_bound_filter).await?; + let upper_bound_filter = Filter { + to_block: Some(api::BlockNumber::Number(1.into())), + ..Filter::default() + }; + let upper_bound_filter_id = client.new_filter(upper_bound_filter).await?; + let bounded_filter = Filter { + from_block: Some(api::BlockNumber::Number(1.into())), + to_block: Some(api::BlockNumber::Number(1.into())), + ..Filter::default() + }; + let bounded_filter_id = client.new_filter(bounded_filter).await?; + + let mut storage = pool.access_storage().await?; + let (_, events) = store_events(&mut storage, 1, 0).await?; + drop(storage); + let events: Vec<_> = events.iter().collect(); + + let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?; + assert_matches!( + lower_bound_logs, + FilterChanges::Hashes(hashes) if hashes.is_empty() + ); + // ^ Since `FilterChanges` is serialized w/o a tag, an empty array will be deserialized + // as `Hashes(_)` (the first declared variant). + + let upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?; + let FilterChanges::Logs(upper_bound_logs) = upper_bound_logs else { + panic!("Unexpected getFilterChanges output: {:?}", upper_bound_logs); + }; + assert_logs_match(&upper_bound_logs, &events); + let bounded_logs = client.get_filter_changes(bounded_filter_id).await?; + let FilterChanges::Logs(bounded_logs) = bounded_logs else { + panic!("Unexpected getFilterChanges output: {:?}", bounded_logs); + }; + assert_eq!(bounded_logs, upper_bound_logs); + + // Add another miniblock with events to the storage. + let mut storage = pool.access_storage().await?; + let (_, new_events) = store_events(&mut storage, 2, 4).await?; + drop(storage); + let new_events: Vec<_> = new_events.iter().collect(); + + let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?; + let FilterChanges::Logs(lower_bound_logs) = lower_bound_logs else { + panic!("Unexpected getFilterChanges output: {:?}", lower_bound_logs); + }; + assert_logs_match(&lower_bound_logs, &new_events); + + // FIXME(#546): update test after behavior is fixed + let new_upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?; + assert_eq!(new_upper_bound_logs, FilterChanges::Logs(upper_bound_logs)); + let new_bounded_logs = client.get_filter_changes(bounded_filter_id).await?; + assert_eq!(new_bounded_logs, FilterChanges::Logs(bounded_logs)); + + // Add miniblock #3. It should not be picked up by the bounded and upper bound filters, + // and should be picked up by the lower bound filter. + let mut storage = pool.access_storage().await?; + let (_, new_events) = store_events(&mut storage, 3, 8).await?; + drop(storage); + let new_events: Vec<_> = new_events.iter().collect(); + + let bounded_logs = client.get_filter_changes(bounded_filter_id).await?; + let FilterChanges::Logs(bounded_logs) = bounded_logs else { + panic!("Unexpected getFilterChanges output: {:?}", bounded_logs); + }; + assert!(bounded_logs + .iter() + .all(|log| log.block_number.unwrap() < 3.into())); + + let upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?; + let FilterChanges::Logs(upper_bound_logs) = upper_bound_logs else { + panic!("Unexpected getFilterChanges output: {:?}", upper_bound_logs); + }; + assert!(upper_bound_logs + .iter() + .all(|log| log.block_number.unwrap() < 3.into())); + + let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?; + let FilterChanges::Logs(lower_bound_logs) = lower_bound_logs else { + panic!("Unexpected getFilterChanges output: {:?}", lower_bound_logs); + }; + let start_idx = lower_bound_logs.len() - 4; + assert_logs_match(&lower_bound_logs[start_idx..], &new_events); + Ok(()) + } +} + +#[tokio::test] +async fn log_filter_changes_with_block_boundaries() { + test_http_server(LogFilterChangesWithBlockBoundaries).await; +} diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs new file mode 100644 index 00000000000..704dfef6700 --- /dev/null +++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs @@ -0,0 +1,466 @@ +//! WS-related tests. + +use async_trait::async_trait; +use tokio::sync::watch; + +use zksync_config::configs::chain::NetworkConfig; +use zksync_dal::ConnectionPool; +use zksync_types::{api, Address, L1BatchNumber, H256, U64}; +use zksync_web3_decl::{ + jsonrpsee::{ + core::client::{Subscription, SubscriptionClientT}, + rpc_params, + ws_client::{WsClient, WsClientBuilder}, + }, + namespaces::{EthNamespaceClient, ZksNamespaceClient}, + types::{BlockHeader, PubSubFilter}, +}; + +use super::*; +use crate::api_server::web3::metrics::SubscriptionType; + +#[allow(clippy::needless_pass_by_ref_mut)] // false positive +async fn wait_for_subscription( + events: &mut mpsc::UnboundedReceiver, + sub_type: SubscriptionType, +) { + let wait_future = tokio::time::timeout(TEST_TIMEOUT, async { + loop { + let event = events + .recv() + .await + .expect("Events emitter unexpectedly dropped"); + if matches!(event, PubSubEvent::Subscribed(ty) if ty == sub_type) { + break; + } else { + tracing::trace!(?event, "Skipping event"); + } + } + }); + wait_future + .await + .expect("Timed out waiting for subscription") +} + +#[allow(clippy::needless_pass_by_ref_mut)] // false positive +async fn wait_for_notifier( + events: &mut mpsc::UnboundedReceiver, + sub_type: SubscriptionType, +) { + let wait_future = tokio::time::timeout(TEST_TIMEOUT, async { + loop { + let event = events + .recv() + .await + .expect("Events emitter unexpectedly dropped"); + if matches!(event, PubSubEvent::NotifyIterationFinished(ty) if ty == sub_type) { + break; + } else { + tracing::trace!(?event, "Skipping event"); + } + } + }); + wait_future.await.expect("Timed out waiting for notifier") +} + +#[async_trait] +trait WsTest { + async fn test( + &self, + client: &WsClient, + pool: &ConnectionPool, + pub_sub_events: mpsc::UnboundedReceiver, + ) -> anyhow::Result<()>; +} + +async fn test_ws_server(test: impl WsTest) { + let pool = ConnectionPool::test_pool().await; + let network_config = NetworkConfig::for_tests(); + let mut storage = pool.access_storage().await.unwrap(); + if storage.blocks_dal().is_genesis_needed().await.unwrap() { + ensure_genesis_state( + &mut storage, + network_config.zksync_network_id, + &GenesisParams::mock(), + ) + .await + .unwrap(); + } + drop(storage); + + let (stop_sender, stop_receiver) = watch::channel(false); + let (server_handles, pub_sub_events) = + spawn_ws_server(&network_config, pool.clone(), stop_receiver).await; + server_handles.wait_until_ready().await; + + let client = WsClientBuilder::default() + .build(format!("ws://{}", server_handles.local_addr)) + .await + .unwrap(); + test.test(&client, &pool, pub_sub_events).await.unwrap(); + + stop_sender.send_replace(true); + server_handles.shutdown().await; +} + +#[derive(Debug)] +struct WsServerCanStart; + +#[async_trait] +impl WsTest for WsServerCanStart { + async fn test( + &self, + client: &WsClient, + _pool: &ConnectionPool, + _pub_sub_events: mpsc::UnboundedReceiver, + ) -> anyhow::Result<()> { + let block_number = client.get_block_number().await?; + assert_eq!(block_number, U64::from(0)); + + let l1_batch_number = client.get_l1_batch_number().await?; + assert_eq!(l1_batch_number, U64::from(0)); + + let genesis_l1_batch = client + .get_l1_batch_details(L1BatchNumber(0)) + .await? + .context("missing genesis L1 batch")?; + assert!(genesis_l1_batch.base.root_hash.is_some()); + Ok(()) + } +} + +#[tokio::test] +async fn ws_server_can_start() { + test_ws_server(WsServerCanStart).await; +} + +#[derive(Debug)] +struct BasicSubscriptions; + +#[async_trait] +impl WsTest for BasicSubscriptions { + async fn test( + &self, + client: &WsClient, + pool: &ConnectionPool, + mut pub_sub_events: mpsc::UnboundedReceiver, + ) -> anyhow::Result<()> { + // Wait for the notifiers to get initialized so that they don't skip notifications + // for the created subscriptions. + wait_for_notifier(&mut pub_sub_events, SubscriptionType::Blocks).await; + wait_for_notifier(&mut pub_sub_events, SubscriptionType::Txs).await; + + let params = rpc_params!["newHeads"]; + let mut blocks_subscription = client + .subscribe::("eth_subscribe", params, "eth_unsubscribe") + .await?; + wait_for_subscription(&mut pub_sub_events, SubscriptionType::Blocks).await; + + let params = rpc_params!["newPendingTransactions"]; + let mut txs_subscription = client + .subscribe::("eth_subscribe", params, "eth_unsubscribe") + .await?; + wait_for_subscription(&mut pub_sub_events, SubscriptionType::Txs).await; + + let (new_miniblock, new_tx_hash) = store_block(pool).await?; + + let received_tx_hash = tokio::time::timeout(TEST_TIMEOUT, txs_subscription.next()) + .await + .context("Timed out waiting for new tx hash")? + .context("Pending txs subscription terminated")??; + assert_eq!(received_tx_hash, new_tx_hash); + let received_block_header = tokio::time::timeout(TEST_TIMEOUT, blocks_subscription.next()) + .await + .context("Timed out waiting for new block hash")? + .context("New blocks subscription terminated")??; + assert_eq!(received_block_header.number, Some(1.into())); + assert_eq!(received_block_header.hash, Some(new_miniblock.hash)); + assert_eq!(received_block_header.timestamp, 1.into()); + blocks_subscription.unsubscribe().await?; + Ok(()) + } +} + +#[tokio::test] +async fn basic_subscriptions() { + test_ws_server(BasicSubscriptions).await; +} + +#[derive(Debug)] +struct LogSubscriptions; + +#[derive(Debug)] +struct Subscriptions { + all_logs_subscription: Subscription, + address_subscription: Subscription, + topic_subscription: Subscription, +} + +impl Subscriptions { + async fn new( + client: &WsClient, + pub_sub_events: &mut mpsc::UnboundedReceiver, + ) -> anyhow::Result { + // Wait for the notifier to get initialized so that it doesn't skip notifications + // for the created subscriptions. + wait_for_notifier(pub_sub_events, SubscriptionType::Logs).await; + + let params = rpc_params!["logs"]; + let all_logs_subscription = client + .subscribe::("eth_subscribe", params, "eth_unsubscribe") + .await?; + let address_filter = PubSubFilter { + address: Some(Address::repeat_byte(23).into()), + topics: None, + }; + let params = rpc_params!["logs", address_filter]; + let address_subscription = client + .subscribe::("eth_subscribe", params, "eth_unsubscribe") + .await?; + let topic_filter = PubSubFilter { + address: None, + topics: Some(vec![Some(H256::repeat_byte(42).into())]), + }; + let params = rpc_params!["logs", topic_filter]; + let topic_subscription = client + .subscribe::("eth_subscribe", params, "eth_unsubscribe") + .await?; + for _ in 0..3 { + wait_for_subscription(pub_sub_events, SubscriptionType::Logs).await; + } + + Ok(Self { + all_logs_subscription, + address_subscription, + topic_subscription, + }) + } +} + +#[async_trait] +impl WsTest for LogSubscriptions { + async fn test( + &self, + client: &WsClient, + pool: &ConnectionPool, + mut pub_sub_events: mpsc::UnboundedReceiver, + ) -> anyhow::Result<()> { + let Subscriptions { + mut all_logs_subscription, + mut address_subscription, + mut topic_subscription, + } = Subscriptions::new(client, &mut pub_sub_events).await?; + + let mut storage = pool.access_storage().await?; + let (tx_location, events) = store_events(&mut storage, 1, 0).await?; + drop(storage); + let events: Vec<_> = events.iter().collect(); + + let all_logs = collect_logs(&mut all_logs_subscription, 4).await?; + for (i, log) in all_logs.iter().enumerate() { + assert_eq!(log.transaction_index, Some(0.into())); + assert_eq!(log.log_index, Some(i.into())); + assert_eq!(log.transaction_hash, Some(tx_location.tx_hash)); + assert_eq!(log.block_number, Some(1.into())); + } + assert_logs_match(&all_logs, &events); + + let address_logs = collect_logs(&mut address_subscription, 2).await?; + assert_logs_match(&address_logs, &[events[0], events[3]]); + + let topic_logs = collect_logs(&mut topic_subscription, 2).await?; + assert_logs_match(&topic_logs, &[events[1], events[3]]); + + wait_for_notifier(&mut pub_sub_events, SubscriptionType::Logs).await; + + // Check that no new notifications were sent to subscribers. + tokio::time::timeout(POLL_INTERVAL, all_logs_subscription.next()) + .await + .unwrap_err(); + tokio::time::timeout(POLL_INTERVAL, address_subscription.next()) + .await + .unwrap_err(); + tokio::time::timeout(POLL_INTERVAL, topic_subscription.next()) + .await + .unwrap_err(); + Ok(()) + } +} + +async fn collect_logs( + sub: &mut Subscription, + expected_count: usize, +) -> anyhow::Result> { + let mut logs = Vec::with_capacity(expected_count); + for _ in 0..expected_count { + let log = tokio::time::timeout(TEST_TIMEOUT, sub.next()) + .await + .context("Timed out waiting for new log")? + .context("Logs subscription terminated")??; + logs.push(log); + } + Ok(logs) +} + +#[tokio::test] +async fn log_subscriptions() { + test_ws_server(LogSubscriptions).await; +} + +#[derive(Debug)] +struct LogSubscriptionsWithNewBlock; + +#[async_trait] +impl WsTest for LogSubscriptionsWithNewBlock { + async fn test( + &self, + client: &WsClient, + pool: &ConnectionPool, + mut pub_sub_events: mpsc::UnboundedReceiver, + ) -> anyhow::Result<()> { + let Subscriptions { + mut all_logs_subscription, + mut address_subscription, + .. + } = Subscriptions::new(client, &mut pub_sub_events).await?; + + let mut storage = pool.access_storage().await?; + let (_, events) = store_events(&mut storage, 1, 0).await?; + drop(storage); + let events: Vec<_> = events.iter().collect(); + + let all_logs = collect_logs(&mut all_logs_subscription, 4).await?; + assert_logs_match(&all_logs, &events); + + // Create a new block and wait for the pub-sub notifier to run. + let mut storage = pool.access_storage().await?; + let (_, new_events) = store_events(&mut storage, 2, 4).await?; + drop(storage); + let new_events: Vec<_> = new_events.iter().collect(); + + let all_new_logs = collect_logs(&mut all_logs_subscription, 4).await?; + assert_logs_match(&all_new_logs, &new_events); + + let address_logs = collect_logs(&mut address_subscription, 4).await?; + assert_logs_match( + &address_logs, + &[events[0], events[3], new_events[0], new_events[3]], + ); + Ok(()) + } +} + +#[tokio::test] +async fn log_subscriptions_with_new_block() { + test_ws_server(LogSubscriptionsWithNewBlock).await; +} + +#[derive(Debug)] +struct LogSubscriptionsWithManyBlocks; + +#[async_trait] +impl WsTest for LogSubscriptionsWithManyBlocks { + async fn test( + &self, + client: &WsClient, + pool: &ConnectionPool, + mut pub_sub_events: mpsc::UnboundedReceiver, + ) -> anyhow::Result<()> { + let Subscriptions { + mut all_logs_subscription, + mut address_subscription, + .. + } = Subscriptions::new(client, &mut pub_sub_events).await?; + + // Add two blocks in the storage atomically. + let mut storage = pool.access_storage().await?; + let mut transaction = storage.start_transaction().await?; + let (_, events) = store_events(&mut transaction, 1, 0).await?; + let events: Vec<_> = events.iter().collect(); + let (_, new_events) = store_events(&mut transaction, 2, 4).await?; + let new_events: Vec<_> = new_events.iter().collect(); + transaction.commit().await?; + drop(storage); + + let all_logs = collect_logs(&mut all_logs_subscription, 4).await?; + assert_logs_match(&all_logs, &events); + let all_new_logs = collect_logs(&mut all_logs_subscription, 4).await?; + assert_logs_match(&all_new_logs, &new_events); + + let address_logs = collect_logs(&mut address_subscription, 4).await?; + assert_logs_match( + &address_logs, + &[events[0], events[3], new_events[0], new_events[3]], + ); + Ok(()) + } +} + +#[tokio::test] +async fn log_subscriptions_with_many_new_blocks_at_once() { + test_ws_server(LogSubscriptionsWithManyBlocks).await; +} + +#[derive(Debug)] +struct LogSubscriptionsWithDelay; + +#[async_trait] +impl WsTest for LogSubscriptionsWithDelay { + async fn test( + &self, + client: &WsClient, + pool: &ConnectionPool, + mut pub_sub_events: mpsc::UnboundedReceiver, + ) -> anyhow::Result<()> { + // Store a miniblock w/o subscriptions being present. + let mut storage = pool.access_storage().await?; + store_events(&mut storage, 1, 0).await?; + drop(storage); + + while pub_sub_events.try_recv().is_ok() { + // Drain all existing pub-sub events. + } + wait_for_notifier(&mut pub_sub_events, SubscriptionType::Logs).await; + + let params = rpc_params!["logs"]; + let mut all_logs_subscription = client + .subscribe::("eth_subscribe", params, "eth_unsubscribe") + .await?; + let address_and_topic_filter = PubSubFilter { + address: Some(Address::repeat_byte(23).into()), + topics: Some(vec![Some(H256::repeat_byte(42).into())]), + }; + let params = rpc_params!["logs", address_and_topic_filter]; + let mut address_and_topic_subscription = client + .subscribe::("eth_subscribe", params, "eth_unsubscribe") + .await?; + for _ in 0..2 { + wait_for_subscription(&mut pub_sub_events, SubscriptionType::Logs).await; + } + + let mut storage = pool.access_storage().await?; + let (_, new_events) = store_events(&mut storage, 2, 4).await?; + drop(storage); + let new_events: Vec<_> = new_events.iter().collect(); + + let all_logs = collect_logs(&mut all_logs_subscription, 4).await?; + assert_logs_match(&all_logs, &new_events); + let address_and_topic_logs = collect_logs(&mut address_and_topic_subscription, 1).await?; + assert_logs_match(&address_and_topic_logs, &[new_events[3]]); + + // Check the behavior of remaining subscriptions if a subscription is dropped. + all_logs_subscription.unsubscribe().await?; + let mut storage = pool.access_storage().await?; + let (_, new_events) = store_events(&mut storage, 3, 8).await?; + drop(storage); + + let address_and_topic_logs = collect_logs(&mut address_and_topic_subscription, 1).await?; + assert_logs_match(&address_and_topic_logs, &[&new_events[3]]); + Ok(()) + } +} + +#[tokio::test] +async fn log_subscriptions_with_delay() { + test_ws_server(LogSubscriptionsWithDelay).await; +} From 06f510d00f855ddafaebb504f7ea799700221072 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 29 Nov 2023 21:03:37 +0200 Subject: [PATCH 059/115] feat: Restore commitment test in Boojum integration (#539) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Restored commitmenet test ## Why ❔ During Boojum integration the commitment_test was commented out, since the commitment schema has changed. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/types/src/commitment.rs | 2 -- .../zksync_testharness_test.json | 20 +++++++++---------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 29750a5c77b..7925a37d92f 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -719,8 +719,6 @@ mod tests { expected_outputs: ExpectedOutput, } - // TODO(PLA-568): restore this test - #[ignore] #[test] fn commitment_test() { let zksync_home = std::env::var("ZKSYNC_HOME").unwrap_or_else(|_| ".".into()); diff --git a/etc/commitment_tests/zksync_testharness_test.json b/etc/commitment_tests/zksync_testharness_test.json index 3240c3b4d9e..6f5730144a0 100644 --- a/etc/commitment_tests/zksync_testharness_test.json +++ b/etc/commitment_tests/zksync_testharness_test.json @@ -56,19 +56,19 @@ ] }, "expected_outputs": { - "l2_l1_bytes": "0000000200000000000000000000000000000000000000000000800b000000000000000000000000000000000000000000000000000000006349f8b557a1099a71e51ea9cce8d89b5a9f1741d3a704e5258077c811223a8b604cdc8a000100000000000000000000000000000000000000008001cb99d29a1b4ffeaefdbf74b8b8b07c78e5e02b3100946f8d0463b79789086aff0000000000000000000000000000000000000000000000000000000000000001", - "l2_l1_linear_hash": "0x680f578a7b39e9f74385a3aabfb4cf054917f23aea9ae165d2afaac02fc9f3b8", - "l2_l1_root_hash": "0xcb5f7b72ab30095b81e2cd35c308a7a752fe59213475339b8a833e91bf731837", - "initial_writes_bytes": "00000002db1231bec2de6342908165662c0d968bb89db1e63211d92fa9547a7efb81499457a1099a71e51ea9cce8d89b5a9f1741d3a704e5258077c811223a8b604cdc8a098c669256db6fe36d87834ceae9c8161af6b72f8b1543b8a3ffacca5b9206af0000000000000000000000000000000000000000000000000000000000000064", - "repeated_writes_bytes": "0000000600000000000002920000000000000000000000000000000c0000000000000000000000000000001f000000000000003d000000000000000000000000000000000000000000000003aec912ce8057d164000000000000003e0000000000000000000000000000000000000000000000000003acf87e3e2200000000000000029400000000000000000000000000000000000000000000000001962afc49a1eb2e0000000000000028000000000000000000000000000000690000000000000000000000006349f8b5000000000000029800000000000000000000000000000000000000000000000000d3abb1bfb7be70", - "repeated_writes_hash": "0xdc5a883793479c779f5c99b0fca910deb20195d8ccf430afad05a9c2bd9f81bd", - "initial_writes_hash": "0xdcc4877ab0c07a79a16ae34de6fb7971a54128db0d11791fd5064bd6d03076c1", + "l2_l1_bytes": "00000000000000000000000000000000000000000000800b000000000000000000000000000000000000000000000000000000006349f8b557a1099a71e51ea9cce8d89b5a9f1741d3a704e5258077c811223a8b604cdc8a000100000000000000000000000000000000000000008001cb99d29a1b4ffeaefdbf74b8b8b07c78e5e02b3100946f8d0463b79789086aff0000000000000000000000000000000000000000000000000000000000000001", + "l2_l1_linear_hash": "0x3735b5ef78a8c9b9a88397148dc68860f183e75ff4e09a2b922554843dff6bde", + "l2_l1_root_hash": "0xf769ef2c8211398f31675bce8797ddc52dac8f4d22606e941a7d7561e1227dd1", + "initial_writes_bytes": "db1231bec2de6342908165662c0d968bb89db1e63211d92fa9547a7efb81499457a1099a71e51ea9cce8d89b5a9f1741d3a704e5258077c811223a8b604cdc8a098c669256db6fe36d87834ceae9c8161af6b72f8b1543b8a3ffacca5b9206af0000000000000000000000000000000000000000000000000000000000000064", + "repeated_writes_bytes": "00000000000002920000000000000000000000000000000c0000000000000000000000000000001f000000000000003d000000000000000000000000000000000000000000000003aec912ce8057d164000000000000003e0000000000000000000000000000000000000000000000000003acf87e3e2200000000000000029400000000000000000000000000000000000000000000000001962afc49a1eb2e0000000000000028000000000000000000000000000000690000000000000000000000006349f8b5000000000000029800000000000000000000000000000000000000000000000000d3abb1bfb7be70", + "repeated_writes_hash": "0xe67f7b247bf84fdbd8a65ab9181343bdc741abac85d783ac9278cc812a76334e", + "initial_writes_hash": "0xa4f52787f13aec574d566ef0d9d8c440b895fb70b74df3758c63da0ff5b47741", "pass_through_bytes": "000000000000012bbf08d89aaedde3696967d5ac74d2733f10ace64c3a492f503f23b7566b37ab1700000000000000000000000000000000000000000000000000000000000000000000000000000000", "pass_through_hash": "0x1c695ec7d7944f720a2c0fc6b5651cbd3178967407bc4df579a15985652350e9", "meta_params_bytes": "000100037723960c07cda7251089daffbdd567476a7e31971ff801568a3856e8e8010006699c833b654b365f0e3ce866c394626d5e40461a6868809d452738606f", "meta_params_hash": "0x57404e50342edcd09180fb27fa49634676f71a3ce1a76e9b3edf6185bf164082", - "auxiliary_bytes": "cb5f7b72ab30095b81e2cd35c308a7a752fe59213475339b8a833e91bf731837680f578a7b39e9f74385a3aabfb4cf054917f23aea9ae165d2afaac02fc9f3b8dcc4877ab0c07a79a16ae34de6fb7971a54128db0d11791fd5064bd6d03076c1dc5a883793479c779f5c99b0fca910deb20195d8ccf430afad05a9c2bd9f81bd", - "auxiliary_hash": "0x31fff4dc27ee5cbba99aac88a1fd05be00133398b9b7679774663f56b3775dd1", - "commitment_hash": "0x3af4672cd1362badfc0cbc47a7e8b3fbcd3c947055af041b4481bb15009c41a8" + "auxiliary_bytes": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "auxiliary_hash": "0x7c613c82ec911cf56dd6241854dd87bd538e0201f4ff0735f56a1a013db6466a", + "commitment_hash": "0xea55acb8903f82e4cfedd2041ce2db2f3b77741b6e35bc90a4f0a11e9526bfc2" } } From b74a0f09c8634ebc9f55d319c90c6da42cf2a94c Mon Sep 17 00:00:00 2001 From: Todd <148772493+toddfil@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:13:09 +0800 Subject: [PATCH 060/115] chore: update docs (#465) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - update docs ## Why ❔ - incorrect link reference in zk_intuition.md ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Igor Aleksanov --- docs/advanced/zk_intuition.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/advanced/zk_intuition.md b/docs/advanced/zk_intuition.md index 7ea7dad0a44..4a8996ff8db 100644 --- a/docs/advanced/zk_intuition.md +++ b/docs/advanced/zk_intuition.md @@ -139,8 +139,7 @@ version 1.4.0. [witness_example]: https://github.com/matter-labs/era-zkevm_test_harness/tree/main/src/witness/individual_circuits/decommit_code.rs#L24 -[verifier]: - https://github.com/matter-labs/zksync-2-contracts/blob/d9785355518edc7f686fb2c91ff7d1caced9f9b8/ethereum/contracts/zksync/Plonk4VerifierWithAccessToDNext.sol#L284 +[verifier]: https://github.com/matter-labs/era-contracts/blob/main/ethereum/contracts/zksync/Verifier.sol [bellman repo]: https://github.com/matter-labs/bellman [bellman cuda repo]: https://github.com/matter-labs/era-bellman-cuda [example ecrecover circuit]: From 0d55d6df980b70a060712ca4beb80d72d704b9d4 Mon Sep 17 00:00:00 2001 From: Jean <148654781+oxJean@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:14:45 +0800 Subject: [PATCH 061/115] chore: fixed typo in code notes (#497) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fixed typo in code notes ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Igor Aleksanov --- infrastructure/protocol-upgrade/src/transaction.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index 9162da1c46f..f4fd30dcde1 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -60,7 +60,7 @@ export interface L2CanonicalTransaction { // is to be passed to account and any changes to its structure // would mean a breaking change to these accounts. In order to prevent this, // we should keep some fields as "reserved". - // It is also recommneded that their length is fixed, since + // It is also recommended that their length is fixed, since // it would allow easier proof integration (in case we will need // some special circuit for preprocessing transactions). reserved: [BigNumberish, BigNumberish, BigNumberish, BigNumberish]; From c067007a3e9bc41db35d25f1beb79eb5d2dc5bb2 Mon Sep 17 00:00:00 2001 From: Karma <148863819+0xKarm@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:15:15 +0800 Subject: [PATCH 062/115] chore(docs): fixed typos in documentation (#479) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - fix typo in README.md ## Why ❔ - fix typo ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Igor Aleksanov --- core/tests/loadnext/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tests/loadnext/README.md b/core/tests/loadnext/README.md index 99c0a47d5b7..397226f6689 100644 --- a/core/tests/loadnext/README.md +++ b/core/tests/loadnext/README.md @@ -8,7 +8,7 @@ The general flow is as follows: - The master account performs an initial deposit to L2 - Paymaster on L2 is funded if necessary - The L2 master account distributes funds to the participating accounts (`accounts_amount` configuration option) -- Each account continiously sends L2 transactions as configured in `contract_execution_params` configuration option. At +- Each account continuously sends L2 transactions as configured in `contract_execution_params` configuration option. At any given time there are no more than `max_inflight_txs` transactions in flight for each account. - Once each account is done with the initial deposit, the test is run for `duration_sec` seconds. - After the test is finished, the master account withdraws all the remaining funds from L2. From f4f322ae03f08abe8c2ae391798297f345ee8da7 Mon Sep 17 00:00:00 2001 From: Doll <148654386+Dollyerls@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:16:35 +0800 Subject: [PATCH 063/115] chore: fixed typo (#486) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixed typo ## Why ❔ Affect reading ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Igor Aleksanov --- core/tests/ts-integration/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/tests/ts-integration/README.md b/core/tests/ts-integration/README.md index b3707cac664..5b34bd7ac02 100644 --- a/core/tests/ts-integration/README.md +++ b/core/tests/ts-integration/README.md @@ -134,7 +134,7 @@ finalization: it make take several hours to generate a proof and send it onchain Because of that, framework supports "fast" and "long" modes. `TestMaster` objects have `isFastMode` method to determine which mode is currently being used. -If you're going to write a test that can make test run duration longer, it is adviced to guard the "long" part with the +If you're going to write a test that can make test run duration longer, it is advised to guard the "long" part with the corresponding check. By default, "long" mode is assumed, and to enable the "fast" mode one must set the `ZK_INTEGRATION_TESTS_FAST_MODE` From dbe89e2dd802dcf4a4bbe23b05770a24d27b390a Mon Sep 17 00:00:00 2001 From: Salad <148864073+Saladerl@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:17:35 +0800 Subject: [PATCH 064/115] chore(docs): fix docs (#487) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fixed docs ## Why ❔ fixed typo ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Igor Aleksanov --- docs/advanced/02_deposits.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/02_deposits.md b/docs/advanced/02_deposits.md index 9d9ac4d526b..a7b72949593 100644 --- a/docs/advanced/02_deposits.md +++ b/docs/advanced/02_deposits.md @@ -162,7 +162,7 @@ The zk server (that you started with `zk server` command) is listening on events ) and adds them to the postgres database (into `transactions` table). You can actually check it - by running the psql and looking at the contents of the table - then you'll notice that -transaction was succesfully inserted, and it was also marked as 'priority' (as it came from L1) - as regular +transaction was successfully inserted, and it was also marked as 'priority' (as it came from L1) - as regular transactions that are received by the server directly are not marked as priority. You can verify that this is your transaction, by looking at the `l1_block_number` column (it should match the From 4b00ee01b5d07f829ff78d7ab05231f8854fee08 Mon Sep 17 00:00:00 2001 From: momodaka <463435681@qq.com> Date: Thu, 30 Nov 2023 14:17:37 +0800 Subject: [PATCH 065/115] docs: fix typo (#488) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fix typo issue `succesfully` -> `successfully` `occurence` -> `occurence` ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: jiachen Co-authored-by: Igor Aleksanov --- docs/advanced/bytecode_compression.md | 2 +- docs/advanced/zk_intuition.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/advanced/bytecode_compression.md b/docs/advanced/bytecode_compression.md index 6f94277f801..2c9e42acd5d 100644 --- a/docs/advanced/bytecode_compression.md +++ b/docs/advanced/bytecode_compression.md @@ -31,7 +31,7 @@ Dictionary would be: 3 -> 0xC (count: 1) ``` -Note that '1' maps to '0xD', as it occurs twice, and first occurrence is earlier than first occurence of 0xB, that also +Note that '1' maps to '0xD', as it occurs twice, and first occurrence is earlier than first occurrence of 0xB, that also occurs twice. Compressed bytecode: diff --git a/docs/advanced/zk_intuition.md b/docs/advanced/zk_intuition.md index 4a8996ff8db..58777b264fc 100644 --- a/docs/advanced/zk_intuition.md +++ b/docs/advanced/zk_intuition.md @@ -7,7 +7,7 @@ understanding. We're leaving out a lot of details to keep things brief. In our case, the prover takes public input and witness (which is huge - you'll see below), and produces a proof, but the verifier takes (public input, proof) only, without witness. This means that the huge witness doesn't have to be -submitted to L1. This property can be used for many things, like privacy, but here we use it to ipmlement an efficient +submitted to L1. This property can be used for many things, like privacy, but here we use it to implement an efficient rollup that publishes the least required amount of data to L1. ## Basic overview @@ -85,7 +85,7 @@ located in a module [zksync core witness]. However, for the new proof system, th new location called [separate witness binary]. Inside this new location, after the necessary data is fetched from storage, the witness generator calls another piece of -code from [zkevm_test_harness witness] named `run_with_fixed_params`. This code is responsible for createing the +code from [zkevm_test_harness witness] named `run_with_fixed_params`. This code is responsible for creating the witnesses themselves (which can get really HUGE). ## Generating the Proof From dddb797818661ba966e1a6202d340bc28ccaa971 Mon Sep 17 00:00:00 2001 From: buldazer <93915704+buldazer23@users.noreply.github.com> Date: Thu, 30 Nov 2023 09:19:14 +0300 Subject: [PATCH 066/115] chore: fix minor typo (#518) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fix minor typo ## Why ❔ minor typo succesfully---->successfully were----->where occurence---->occurrence separte---->separate constrain system---->constraint system constraing system---->constraint system Co-authored-by: Igor Aleksanov --- docs/advanced/03_withdrawals.md | 2 +- docs/advanced/contracts.md | 2 +- docs/advanced/prover.md | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/advanced/03_withdrawals.md b/docs/advanced/03_withdrawals.md index 003121d8646..46833809e0a 100644 --- a/docs/advanced/03_withdrawals.md +++ b/docs/advanced/03_withdrawals.md @@ -81,7 +81,7 @@ This is a good opportunity to talk about system contracts that are automatically list here [in github](https://github.com/matter-labs/era-system-contracts/blob/436d57da2fb35c40e38bcb6637c3a090ddf60701/scripts/constants.ts#L29) -This is the place were we specify that `bootloader` is at address 0x8001, `NonceHolder` at 0x8003 etc. +This is the place where we specify that `bootloader` is at address 0x8001, `NonceHolder` at 0x8003 etc. This brings us to [L2EthToken.sol](https://github.com/matter-labs/era-system-contracts/blob/main/contracts/L2EthToken.sol) that has the diff --git a/docs/advanced/contracts.md b/docs/advanced/contracts.md index 9b44268827c..e32992fb79b 100644 --- a/docs/advanced/contracts.md +++ b/docs/advanced/contracts.md @@ -32,7 +32,7 @@ a bunch of registers. More details on this will be written in the future article Having a different VM means that we must have a separate compiler [zk-solc](https://github.com/matter-labs/zksolc-bin) - as the bytecode that is produced by this compiler has to use the zkEVM specific opcodes. -While having a separte compiler introduces a bunch of challenges (for example, we need a custom +While having a separate compiler introduces a bunch of challenges (for example, we need a custom [hardhat plugins](https://github.com/matter-labs/hardhat-zksync) ), it brings a bunch of benefits too: for example it allows us to move some of the VM logic (like new contract deployment) into System contracts - which allows faster & cheaper modifications and increased flexibility. diff --git a/docs/advanced/prover.md b/docs/advanced/prover.md index 02e69c4d38e..6211f00dea7 100644 --- a/docs/advanced/prover.md +++ b/docs/advanced/prover.md @@ -86,7 +86,7 @@ pub fn select>( ``` And then there is a block of code for witness evaluation (let's skip it for now), and the final block that adds the gate -to the constrain system `cs`: +to the constraint system `cs`: ```rust if ::SetupConfig::KEEP_SETUP { @@ -204,7 +204,7 @@ filled with concrete values. ### CsAllocatable -Implements CsAllocatable - which allows you to directly 'allocate' this struct within constraing system (similarly to +Implements CsAllocatable - which allows you to directly 'allocate' this struct within constraint system (similarly to how we were operating on regular 'Variables' above). ### CSSelectable From 6095d690154cd16d6e6688ec4830f5e8f226e2ac Mon Sep 17 00:00:00 2001 From: Mc Kenna <150222622+McKenna8989@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:26:21 +0800 Subject: [PATCH 067/115] chore(docs): the typos have been corrected (#441) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - the typos have been corrected ## Why ❔ - the typos have been corrected ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Igor Aleksanov --- .../contracts/custom-account/interfaces/IPaymaster.sol | 2 +- core/tests/ts-integration/src/test-master.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/tests/ts-integration/contracts/custom-account/interfaces/IPaymaster.sol b/core/tests/ts-integration/contracts/custom-account/interfaces/IPaymaster.sol index cf5ced94878..1bd5b81f32b 100644 --- a/core/tests/ts-integration/contracts/custom-account/interfaces/IPaymaster.sol +++ b/core/tests/ts-integration/contracts/custom-account/interfaces/IPaymaster.sol @@ -37,7 +37,7 @@ interface IPaymaster { /// @param _context, the context of the execution, returned by the "validateAndPayForPaymasterTransaction" method. /// @param _transaction, the users' transaction. /// @param _txResult, the result of the transaction execution (success or failure). - /// @param _maxRefundedGas, the upper bound on the amout of gas that could be refunded to the paymaster. + /// @param _maxRefundedGas, the upper bound on the amount of gas that could be refunded to the paymaster. /// @dev The exact amount refunded depends on the gas spent by the "postOp" itself and so the developers should /// take that into account. function postTransaction( diff --git a/core/tests/ts-integration/src/test-master.ts b/core/tests/ts-integration/src/test-master.ts index 8f59288ba5c..8919bbffd1e 100644 --- a/core/tests/ts-integration/src/test-master.ts +++ b/core/tests/ts-integration/src/test-master.ts @@ -30,7 +30,7 @@ export class TestMaster { const contextStr = process.env.ZKSYNC_JEST_TEST_CONTEXT; if (!contextStr) { - throw new Error('Test context was not initalized; unable to load context environment variable'); + throw new Error('Test context was not initialized; unable to load context environment variable'); } const context = JSON.parse(contextStr) as TestContext; From f15885e4850a70f321da0c8d4b4d2b48df686df4 Mon Sep 17 00:00:00 2001 From: 0xmbcode <152050562+0xmbcode@users.noreply.github.com> Date: Thu, 30 Nov 2023 09:35:28 +0300 Subject: [PATCH 068/115] chore: Fix broken link in repositories.md (#565) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hello,
 ## Description This change is made to ensure that developers, contributors can access the correct documentation.
 ## Changes Made This pull request fixes a broken link. 

 ## Why The broken link was causing confusion and potentially preventing developers, contributors from accessing the zksync-cli. Correct link:
https://github.com/matter-labs/zksync-cli ## Testing Done - Manually reviewed the documentation and verified the correct link.
 - No additional dependencies or changes are required. - This fix is straightforward and does not impact any other functionalities. Thank you. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- docs/repositories.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/repositories.md b/docs/repositories.md index 7250c5aef22..0902f38dcd8 100644 --- a/docs/repositories.md +++ b/docs/repositories.md @@ -51,7 +51,7 @@ | [local-setup](https://github.com/matter-labs/local-setup) | Docker-based zk server (together with L1), that can be used for local testing | | [zksolc-bin](https://github.com/matter-labs/zksolc-bin) | repository with solc compiler binaries | | [zkvyper-bin](https://github.com/matter-labs/zkvyper-bin) | repository with vyper compiler binaries | -| [zksync-cli](<(https://github.com/matter-labs/zksync-cli)>) | Command line tool to interact with zksync | +| [zksync-cli](https://github.com/matter-labs/zksync-cli) | Command line tool to interact with zksync | | [hardhat-zksync](https://github.com/matter-labs/hardhat-zksync) | Plugins for hardhat | ### Examples & documentation From cae51479570060fe9e78816714f738cd6dff22d0 Mon Sep 17 00:00:00 2001 From: RakeshXBT <85406290+Rakesh-lab-stack@users.noreply.github.com> Date: Thu, 30 Nov 2023 12:06:27 +0530 Subject: [PATCH 069/115] chore: fix minor typo in rust code in docs (#566) Co-authored-by: Igor Aleksanov --- docs/advanced/gas_and_fees.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/gas_and_fees.md b/docs/advanced/gas_and_fees.md index 800d27299c2..b8f0e531e98 100644 --- a/docs/advanced/gas_and_fees.md +++ b/docs/advanced/gas_and_fees.md @@ -86,7 +86,7 @@ transaction. ```rust let gas_remaining_before = vm.gas_remaining(); execute_tx(); -let gas_used = gas_remainig_before = vm.gas_remaining(); +let gas_used = gas_remaining_before - vm.gas_remaining(); ``` ## Gas estimation From 26767b6952c3588e7bc1c9dfe6c3261931cf78d2 Mon Sep 17 00:00:00 2001 From: Tudor <32748771+RedaOps@users.noreply.github.com> Date: Thu, 30 Nov 2023 09:40:24 +0200 Subject: [PATCH 070/115] refactor(eth_client): Use `BlockId` for `block_id` instead of String in `eth_client` (#499) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ The `block` function within `eth_client` now uses the type `BlockId` instead of `String` for its `block_id` parameter. ## Why ❔ TODO comment: https://github.com/matter-labs/zksync-era/blob/cb873bd0da6b421160ce96b8d578f1351861f376/core/lib/eth_client/src/clients/http/query.rs#L289-L308 This PR fixes the TODO. The `web3` crate being used inside the project was updated to version `0.19.0` inside `core/lib/basic_types/Cargo.toml` in commit 829ef5085f938ddda1f2a695930c6b4308e1643a. The `web3` crate now supports `BlockNumber::Finalized`. Source: https://docs.rs/web3/latest/web3/types/enum.BlockNumber.html Source: https://github.com/tomusdrw/rust-web3/releases/tag/v0.19.0 ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Code has been formatted via `zk fmt` and `zk lint`. Co-authored-by: Igor Aleksanov --- core/lib/eth_client/src/clients/http/query.rs | 16 +++------------- core/lib/eth_client/src/clients/http/signing.rs | 2 +- core/lib/eth_client/src/clients/mock.rs | 4 ++-- core/lib/eth_client/src/lib.rs | 2 +- .../zksync_core/src/eth_sender/eth_tx_manager.rs | 8 ++++++-- core/lib/zksync_core/src/eth_watch/client.rs | 4 ++-- 6 files changed, 15 insertions(+), 21 deletions(-) diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 198f5fc45af..0094c76f88a 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -14,13 +14,12 @@ use zksync_types::web3::{ Contract, Options, }, ethabi, - helpers::CallFuture, transports::Http, types::{ Address, Block, BlockId, BlockNumber, Bytes, Filter, Log, Transaction, TransactionId, TransactionReceipt, H256, U256, U64, }, - Transport, Web3, + Web3, }; /// An "anonymous" Ethereum client that can invoke read-only methods that aren't @@ -286,23 +285,14 @@ impl EthInterface for QueryClient { Ok(logs) } - // TODO (PLA-333): at the moment the latest version of `web3` crate doesn't have `Finalized` variant in `BlockNumber`. - // However, it's already added in github repo and probably will be included in the next released version. - // Scope of PLA-333 includes forking/using crate directly from github, after that we will be able to change - // type of `block_id` from `String` to `BlockId` and use `self.web3.eth().block(block_id)`. async fn block( &self, - block_id: String, + block_id: BlockId, component: &'static str, ) -> Result>, Error> { COUNTERS.call[&(Method::Block, component)].inc(); let latency = LATENCIES.direct[&Method::Block].start(); - let block = CallFuture::new( - self.web3 - .transport() - .execute("eth_getBlockByNumber", vec![block_id.into(), false.into()]), - ) - .await?; + let block = self.web3.eth().block(block_id).await?; latency.observe(); Ok(block) } diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs index fcc38efb4cc..a0a6647db5f 100644 --- a/core/lib/eth_client/src/clients/http/signing.rs +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -213,7 +213,7 @@ impl EthInterface for SigningClient { async fn block( &self, - block_id: String, + block_id: BlockId, component: &'static str, ) -> Result>, Error> { self.query_client.block(block_id, component).await diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index 07297a3645f..576fbac21a7 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -342,7 +342,7 @@ impl EthInterface for MockEthereum { async fn block( &self, - _block_id: String, + _block_id: BlockId, _component: &'static str, ) -> Result>, Error> { unimplemented!("Not needed right now") @@ -524,7 +524,7 @@ impl + Send + Sync> EthInterface for T { async fn block( &self, - block_id: String, + block_id: BlockId, component: &'static str, ) -> Result>, Error> { self.as_ref().block(block_id, component).await diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index a0350368325..f61814893bb 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -131,7 +131,7 @@ pub trait EthInterface: Sync + Send { /// Returns the block header for the specified block number or hash. async fn block( &self, - block_id: String, + block_id: BlockId, component: &'static str, ) -> Result>, Error>; } diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs index 98e75702a4c..5aab4a2903c 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -12,7 +12,11 @@ use zksync_eth_client::{ }; use zksync_types::{ eth_sender::EthTx, - web3::{contract::Options, error::Error as Web3Error}, + web3::{ + contract::Options, + error::Error as Web3Error, + types::{BlockId, BlockNumber}, + }, L1BlockNumber, Nonce, H256, U256, }; use zksync_utils::time::seconds_since_epoch; @@ -285,7 +289,7 @@ where (latest_block_number.saturating_sub(confirmations) as u32).into() } else { self.ethereum_gateway - .block("finalized".to_string(), "eth_tx_manager") + .block(BlockId::Number(BlockNumber::Finalized), "eth_tx_manager") .await? .expect("Finalized block must be present on L1") .number diff --git a/core/lib/zksync_core/src/eth_watch/client.rs b/core/lib/zksync_core/src/eth_watch/client.rs index af38ac79ae7..cbd3785640e 100644 --- a/core/lib/zksync_core/src/eth_watch/client.rs +++ b/core/lib/zksync_core/src/eth_watch/client.rs @@ -5,7 +5,7 @@ use zksync_types::{ vk_transform::l1_vk_commitment, web3::{ self, - types::{BlockNumber, FilterBuilder, Log}, + types::{BlockId, BlockNumber, FilterBuilder, Log}, }, Address, H256, }; @@ -225,7 +225,7 @@ impl EthClient for EthHttpQueryClient Date: Thu, 30 Nov 2023 11:11:21 +0100 Subject: [PATCH 071/115] ci: Remove leftovers of explicit publishing to public Docker registries (#496) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Subj ## Why ❔ All DockerHub registries of FOSS repos are now public ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-core-template.yml | 2 +- infrastructure/zk/src/docker.ts | 24 ++++++++++------------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index c4ae27faf9c..95bb2894795 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -83,7 +83,7 @@ jobs: COMPONENT: ${{ matrix.component }} run: | ci_run rustup default nightly-2023-08-21 - ci_run zk docker $DOCKER_ACTION $COMPONENT -- --public + ci_run zk docker $DOCKER_ACTION $COMPONENT - name: Show sccache stats if: always() run: | diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 3db9a69313d..9a4bf6e291a 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -25,19 +25,19 @@ async function dockerCommand( command: 'push' | 'build', image: string, customTag?: string, - publishPublic: boolean = false, dockerOrg: string = 'matterlabs' ) { // Generating all tags for containers. We need 2 tags here: SHA and SHA+TS const { stdout: COMMIT_SHORT_SHA }: { stdout: string } = await utils.exec('git rev-parse --short HEAD'); + // COMMIT_SHORT_SHA returns with newline, so we need to trim it const imageTagShaTS: string = process.env.IMAGE_TAG_SUFFIX ? process.env.IMAGE_TAG_SUFFIX : `${COMMIT_SHORT_SHA.trim()}-${UNIX_TIMESTAMP}`; - // we want alternative flow for rust image + // We want an alternative flow for Rust image if (image == 'rust') { - await dockerCommand(command, 'server-v2', customTag, publishPublic); - await dockerCommand(command, 'prover', customTag, publishPublic); + await dockerCommand(command, 'server-v2', customTag, dockerOrg); + await dockerCommand(command, 'prover', customTag, dockerOrg); return; } if (!IMAGES.includes(image)) { @@ -49,14 +49,14 @@ async function dockerCommand( } const tagList = customTag ? [customTag] : defaultTagList(image, COMMIT_SHORT_SHA.trim(), imageTagShaTS); + // Main build\push flow - // COMMIT_SHORT_SHA returns with newline, so we need to trim it switch (command) { case 'build': await _build(image, tagList, dockerOrg); break; case 'push': - await _push(image, tagList, publishPublic); + await _push(image, tagList); break; default: console.log(`Unknown command for docker ${command}.`); @@ -114,7 +114,7 @@ async function _build(image: string, tagList: string[], dockerOrg: string) { await utils.spawn(buildCommand); } -async function _push(image: string, tagList: string[], publishPublic: boolean = false) { +async function _push(image: string, tagList: string[]) { // For development purposes, we want to use `2.0` tags for 2.0 images, just to not interfere with 1.x for (const tag of tagList) { @@ -134,9 +134,6 @@ async function _push(image: string, tagList: string[], publishPublic: boolean = await utils.spawn(`docker push asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag}`); await utils.spawn(`docker push europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${image}:${tag}`); } - if (image == 'external-node' && publishPublic) { - await utils.spawn(`docker push matterlabs/${image}-public:${tag}`); - } } } @@ -145,12 +142,12 @@ export async function build(image: string, cmd: Command) { } export async function customBuildForHyperchain(image: string, dockerOrg: string) { - await dockerCommand('build', image, '', false, dockerOrg); + await dockerCommand('build', image, '', dockerOrg); } export async function push(image: string, cmd: Command) { - await dockerCommand('build', image, cmd.customTag, cmd.public); - await dockerCommand('push', image, cmd.customTag, cmd.public); + await dockerCommand('build', image, cmd.customTag); + await dockerCommand('push', image, cmd.customTag); } export async function restart(container: string) { @@ -171,7 +168,6 @@ command command .command('push ') .option('--custom-tag ', 'Custom tag for image') - .option('--public', 'Publish image to the public repo') .description('build and push docker image') .action(push); command.command('pull').description('pull all containers').action(pull); From 83791aa6704221755674dd5b1eb428e286f791da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Thu, 30 Nov 2023 11:49:23 +0100 Subject: [PATCH 072/115] feat: zk db check-sqlx-check on pre-push (#548) --- .githooks/pre-push | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.githooks/pre-push b/.githooks/pre-push index eb1acbb693c..94cc937c9b1 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # # Pre-push hook verifying that inappropriate code will not be pushed. @@ -8,7 +8,13 @@ NC='\033[0m' # No Color # Check that prettier formatting rules are not violated. if ! zk fmt --check; then - echo -e "${RED}Commit error!${NC}" + echo -e "${RED}Push error!${NC}" echo "Please format the code via 'zk fmt', cannot push unformatted code" exit 1 fi + +if ! zk db check-sqlx-data; then + echo -e "${RED}Push error!${NC}" + echo "Please update sqlx-data.json via 'zk db setup', cannot push invalid sqlx-data.json file" + exit 1 +fi From e2c1b20e361e6ee2f5ac69cefe75d9c5575eb2f7 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 30 Nov 2023 14:34:55 +0200 Subject: [PATCH 073/115] feat(merkle tree): Remove enumeration index assignment from Merkle tree (#551) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Since enumeration indices are now fully stored in Postgres, it makes sense to not duplicate their assignment in the Merkle tree. Instead, the tree could take enum indices as inputs. ## Why ❔ This allows simplifying tree logic and unify "normal" L1 batch processing and tree recovery. (This unification is not a part of this PR; it'll be implemented separately.) ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../lib/merkle_tree/examples/loadtest/main.rs | 24 +- core/lib/merkle_tree/examples/recovery.rs | 10 +- core/lib/merkle_tree/src/consistency.rs | 35 ++- core/lib/merkle_tree/src/domain.rs | 145 ++++++------ core/lib/merkle_tree/src/getters.rs | 17 +- core/lib/merkle_tree/src/hasher/mod.rs | 24 +- core/lib/merkle_tree/src/hasher/proofs.rs | 62 +++-- core/lib/merkle_tree/src/lib.rs | 13 +- core/lib/merkle_tree/src/pruning.rs | 28 +-- core/lib/merkle_tree/src/recovery.rs | 35 +-- core/lib/merkle_tree/src/storage/mod.rs | 62 ++--- core/lib/merkle_tree/src/storage/patch.rs | 21 +- core/lib/merkle_tree/src/storage/proofs.rs | 224 ++---------------- .../merkle_tree/src/storage/serialization.rs | 11 +- core/lib/merkle_tree/src/storage/tests.rs | 128 +++++----- core/lib/merkle_tree/src/types/internal.rs | 22 +- core/lib/merkle_tree/src/types/mod.rs | 93 ++++++-- core/lib/merkle_tree/src/utils.rs | 5 - .../merkle_tree/tests/integration/common.rs | 50 ++-- .../tests/integration/consistency.rs | 6 +- .../merkle_tree/tests/integration/domain.rs | 53 +++-- .../tests/integration/merkle_tree.rs | 116 +++++---- .../merkle_tree/tests/integration/recovery.rs | 43 +--- .../zksync_core/src/api_server/tree/mod.rs | 2 +- .../src/metadata_calculator/helpers.rs | 91 +++---- .../src/metadata_calculator/metrics.rs | 2 +- 26 files changed, 591 insertions(+), 731 deletions(-) diff --git a/core/lib/merkle_tree/examples/loadtest/main.rs b/core/lib/merkle_tree/examples/loadtest/main.rs index b598a579f6b..527daa87b37 100644 --- a/core/lib/merkle_tree/examples/loadtest/main.rs +++ b/core/lib/merkle_tree/examples/loadtest/main.rs @@ -15,7 +15,8 @@ use std::{ use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ - Database, HashTree, MerkleTree, MerkleTreePruner, PatchSet, RocksDBWrapper, TreeInstruction, + Database, HashTree, MerkleTree, MerkleTreePruner, PatchSet, RocksDBWrapper, TreeEntry, + TreeInstruction, }; use zksync_storage::{RocksDB, RocksDBOptions}; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; @@ -135,19 +136,22 @@ impl Cli { next_key_idx += new_keys.len() as u64; next_value_idx += (new_keys.len() + updated_indices.len()) as u64; - let values = (next_value_idx..).map(H256::from_low_u64_be); let updated_keys = Self::generate_keys(updated_indices.into_iter()); - let kvs = new_keys.into_iter().chain(updated_keys).zip(values); + let kvs = new_keys + .into_iter() + .chain(updated_keys) + .zip(next_value_idx..); + let kvs = kvs.map(|(key, idx)| { + // The assigned leaf indices here are not always correct, but it's OK for load test purposes. + TreeEntry::new(key, idx, H256::from_low_u64_be(idx)) + }); tracing::info!("Processing block #{version}"); let start = Instant::now(); let root_hash = if self.proofs { - let reads = Self::generate_keys(read_indices.into_iter()) - .map(|key| (key, TreeInstruction::Read)); - let instructions = kvs - .map(|(key, hash)| (key, TreeInstruction::Write(hash))) - .chain(reads) - .collect(); + let reads = + Self::generate_keys(read_indices.into_iter()).map(TreeInstruction::Read); + let instructions = kvs.map(TreeInstruction::Write).chain(reads).collect(); let output = tree.extend_with_proofs(instructions); output.root_hash().unwrap() } else { @@ -160,7 +164,7 @@ impl Cli { tracing::info!("Verifying tree consistency..."); let start = Instant::now(); - tree.verify_consistency(self.commit_count - 1) + tree.verify_consistency(self.commit_count - 1, false) .expect("tree consistency check failed"); let elapsed = start.elapsed(); tracing::info!("Verified tree consistency in {elapsed:?}"); diff --git a/core/lib/merkle_tree/examples/recovery.rs b/core/lib/merkle_tree/examples/recovery.rs index af16ed05baf..1a2aae236ea 100644 --- a/core/lib/merkle_tree/examples/recovery.rs +++ b/core/lib/merkle_tree/examples/recovery.rs @@ -9,8 +9,8 @@ use std::time::Instant; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ - recovery::{MerkleTreeRecovery, RecoveryEntry}, - HashTree, Key, PatchSet, PruneDatabase, RocksDBWrapper, ValueHash, + recovery::MerkleTreeRecovery, HashTree, Key, PatchSet, PruneDatabase, RocksDBWrapper, + TreeEntry, ValueHash, }; use zksync_storage::{RocksDB, RocksDBOptions}; @@ -94,7 +94,7 @@ impl Cli { .map(|_| { last_leaf_index += 1; if self.random { - RecoveryEntry { + TreeEntry { key: Key::from(rng.gen::<[u8; 32]>()), value: ValueHash::zero(), leaf_index: last_leaf_index, @@ -102,7 +102,7 @@ impl Cli { } else { last_key += key_step - Key::from(rng.gen::()); // ^ Increases the key by a random increment close to `key` step with some randomness. - RecoveryEntry { + TreeEntry { key: last_key, value: ValueHash::zero(), leaf_index: last_leaf_index, @@ -127,7 +127,7 @@ impl Cli { recovery_started_at.elapsed() ); let started_at = Instant::now(); - tree.verify_consistency(recovered_version).unwrap(); + tree.verify_consistency(recovered_version, true).unwrap(); tracing::info!("Verified consistency in {:?}", started_at.elapsed()); } } diff --git a/core/lib/merkle_tree/src/consistency.rs b/core/lib/merkle_tree/src/consistency.rs index 85896bad1ae..2cc8996e64e 100644 --- a/core/lib/merkle_tree/src/consistency.rs +++ b/core/lib/merkle_tree/src/consistency.rs @@ -69,10 +69,17 @@ pub enum ConsistencyError { impl MerkleTree { /// Verifies the internal tree consistency as stored in the database. /// + /// If `validate_indices` flag is set, it will be checked that indices for all tree leaves are unique + /// and are sequentially assigned starting from 1. + /// /// # Errors /// /// Returns an error (the first encountered one if there are multiple). - pub fn verify_consistency(&self, version: u64) -> Result<(), ConsistencyError> { + pub fn verify_consistency( + &self, + version: u64, + validate_indices: bool, + ) -> Result<(), ConsistencyError> { let manifest = self.db.try_manifest()?; let manifest = manifest.ok_or(ConsistencyError::MissingVersion(version))?; if version >= manifest.version_count { @@ -91,16 +98,19 @@ impl MerkleTree { // We want to perform a depth-first walk of the tree in order to not keep // much in memory. let root_key = Nibbles::EMPTY.with_version(version); - let leaf_data = LeafConsistencyData::new(leaf_count); - self.validate_node(&root_node, root_key, &leaf_data)?; - leaf_data.validate_count() + let leaf_data = validate_indices.then(|| LeafConsistencyData::new(leaf_count)); + self.validate_node(&root_node, root_key, leaf_data.as_ref())?; + if let Some(leaf_data) = leaf_data { + leaf_data.validate_count()?; + } + Ok(()) } fn validate_node( &self, node: &Node, key: NodeKey, - leaf_data: &LeafConsistencyData, + leaf_data: Option<&LeafConsistencyData>, ) -> Result { match node { Node::Leaf(leaf) => { @@ -111,7 +121,9 @@ impl MerkleTree { full_key: leaf.full_key, }); } - leaf_data.insert_leaf(leaf)?; + if let Some(leaf_data) = leaf_data { + leaf_data.insert_leaf(leaf)?; + } } Node::Internal(node) => { @@ -261,7 +273,10 @@ mod tests { use std::num::NonZeroU64; use super::*; - use crate::{types::InternalNode, PatchSet}; + use crate::{ + types::{InternalNode, TreeEntry}, + PatchSet, + }; use zksync_types::{H256, U256}; const FIRST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); @@ -270,8 +285,8 @@ mod tests { fn prepare_database() -> PatchSet { let mut tree = MerkleTree::new(PatchSet::default()); tree.extend(vec![ - (FIRST_KEY, H256([1; 32])), - (SECOND_KEY, H256([2; 32])), + TreeEntry::new(FIRST_KEY, 1, H256([1; 32])), + TreeEntry::new(SECOND_KEY, 2, H256([2; 32])), ]); tree.db } @@ -300,7 +315,7 @@ mod tests { .num_threads(1) .build() .expect("failed initializing `rayon` thread pool"); - thread_pool.install(|| MerkleTree::new(db).verify_consistency(0)) + thread_pool.install(|| MerkleTree::new(db).verify_consistency(0, true)) } #[test] diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index bb82233aec2..0cd9a56a486 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -5,7 +5,10 @@ use zksync_utils::h256_to_u256; use crate::{ storage::{MerkleTreeColumnFamily, PatchSet, Patched, RocksDBWrapper}, - types::{Key, Root, TreeEntryWithProof, TreeInstruction, TreeLogEntry, ValueHash, TREE_DEPTH}, + types::{ + Key, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, ValueHash, + TREE_DEPTH, + }, BlockOutput, HashTree, MerkleTree, NoVersionError, }; use zksync_crypto::hasher::blake2::Blake2Hasher; @@ -13,7 +16,7 @@ use zksync_storage::RocksDB; use zksync_types::{ proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, - L1BatchNumber, StorageKey, StorageLog, StorageLogKind, U256, + L1BatchNumber, StorageKey, U256, }; /// Metadata for the current tree state. @@ -65,17 +68,17 @@ impl ZkSyncTree { /// Returns metadata based on `storage_logs` generated by the genesis L1 batch. This does not /// create a persistent tree. - pub fn process_genesis_batch(storage_logs: &[StorageLog]) -> BlockOutput { - let kvs = Self::filter_write_logs(storage_logs); + pub fn process_genesis_batch(storage_logs: &[TreeInstruction]) -> BlockOutput { + let kvs = Self::filter_write_instructions(storage_logs); tracing::info!( "Creating Merkle tree for genesis batch with {instr_count} writes", instr_count = kvs.len() ); - let kvs = kvs + let kvs: Vec<_> = kvs .iter() - .map(|(k, v)| (k.hashed_key_u256(), *v)) - .collect::>(); + .map(|instr| instr.map_key(StorageKey::hashed_key_u256)) + .collect(); let mut in_memory_tree = MerkleTree::new(PatchSet::default()); let output = in_memory_tree.extend(kvs); @@ -170,29 +173,36 @@ impl ZkSyncTree { /// Panics if an inconsistency is detected. pub fn verify_consistency(&self, l1_batch_number: L1BatchNumber) { let version = u64::from(l1_batch_number.0); - self.tree.verify_consistency(version).unwrap_or_else(|err| { - panic!("Tree at version {version} is inconsistent: {err}"); - }); + self.tree + .verify_consistency(version, true) + .unwrap_or_else(|err| { + panic!("Tree at version {version} is inconsistent: {err}"); + }); } /// Processes an iterator of storage logs comprising a single L1 batch. - pub fn process_l1_batch(&mut self, storage_logs: &[StorageLog]) -> TreeMetadata { + pub fn process_l1_batch( + &mut self, + storage_logs: &[TreeInstruction], + ) -> TreeMetadata { match self.mode { TreeMode::Full => self.process_l1_batch_full(storage_logs), TreeMode::Lightweight => self.process_l1_batch_lightweight(storage_logs), } } - fn process_l1_batch_full(&mut self, storage_logs: &[StorageLog]) -> TreeMetadata { + fn process_l1_batch_full( + &mut self, + instructions: &[TreeInstruction], + ) -> TreeMetadata { let l1_batch_number = self.next_l1_batch_number(); - let instructions = Self::transform_logs(storage_logs); let starting_leaf_count = self.tree.latest_root().leaf_count(); let starting_root_hash = self.tree.latest_root_hash(); - let instructions_with_hashed_keys = instructions + let instructions_with_hashed_keys: Vec<_> = instructions .iter() - .map(|(k, instr)| (k.hashed_key_u256(), *instr)) - .collect::>(); + .map(|instr| instr.map_key(StorageKey::hashed_key_u256)) + .collect(); tracing::info!( "Extending Merkle tree with batch #{l1_batch_number} with {instr_count} ops in full mode", @@ -207,7 +217,7 @@ impl ZkSyncTree { let mut witness = PrepareBasicCircuitsJob::new(starting_leaf_count + 1); witness.reserve(output.logs.len()); - for (log, (key, instruction)) in output.logs.iter().zip(&instructions) { + for (log, instruction) in output.logs.iter().zip(instructions) { let empty_levels_end = TREE_DEPTH - log.merkle_path.len(); let empty_subtree_hashes = (0..empty_levels_end).map(|i| Blake2Hasher.empty_subtree_hash(i)); @@ -218,20 +228,22 @@ impl ZkSyncTree { .collect(); let value_written = match instruction { - TreeInstruction::Write(value) => value.0, - TreeInstruction::Read => [0_u8; 32], + TreeInstruction::Write(entry) => entry.value.0, + TreeInstruction::Read(_) => [0_u8; 32], }; let log = StorageLogMetadata { root_hash: log.root_hash.0, is_write: !log.base.is_read(), - first_write: matches!(log.base, TreeLogEntry::Inserted { .. }), + first_write: matches!(log.base, TreeLogEntry::Inserted), merkle_paths, - leaf_hashed_key: key.hashed_key_u256(), - leaf_enumeration_index: match log.base { - TreeLogEntry::Updated { leaf_index, .. } - | TreeLogEntry::Inserted { leaf_index } - | TreeLogEntry::Read { leaf_index, .. } => leaf_index, - TreeLogEntry::ReadMissingKey => 0, + leaf_hashed_key: instruction.key().hashed_key_u256(), + leaf_enumeration_index: match instruction { + TreeInstruction::Write(entry) => entry.leaf_index, + TreeInstruction::Read(_) => match log.base { + TreeLogEntry::Read { leaf_index, .. } => leaf_index, + TreeLogEntry::ReadMissingKey => 0, + _ => unreachable!("Read instructions always transform to Read / ReadMissingKey log entries"), + } }, value_written, value_read: match log.base { @@ -243,7 +255,7 @@ impl ZkSyncTree { previous_value.0 } TreeLogEntry::Read { value, .. } => value.0, - TreeLogEntry::Inserted { .. } | TreeLogEntry::ReadMissingKey => [0_u8; 32], + TreeLogEntry::Inserted | TreeLogEntry::ReadMissingKey => [0_u8; 32], }, }; witness.push_merkle_path(log); @@ -254,12 +266,12 @@ impl ZkSyncTree { .logs .into_iter() .filter_map(|log| (!log.base.is_read()).then_some(log.base)); - let kvs = instructions.into_iter().filter_map(|(key, instruction)| { - let TreeInstruction::Write(value) = instruction else { - return None; - }; - Some((key, value)) - }); + let kvs = instructions + .iter() + .filter_map(|instruction| match instruction { + TreeInstruction::Write(entry) => Some(*entry), + TreeInstruction::Read(_) => None, + }); let (initial_writes, repeated_writes, state_diffs) = Self::extract_writes(logs, kvs); tracing::info!( @@ -281,21 +293,9 @@ impl ZkSyncTree { } } - fn transform_logs(storage_logs: &[StorageLog]) -> Vec<(StorageKey, TreeInstruction)> { - let instructions = storage_logs.iter().map(|log| { - let key = log.key; - let instruction = match log.kind { - StorageLogKind::Write => TreeInstruction::Write(log.value), - StorageLogKind::Read => TreeInstruction::Read, - }; - (key, instruction) - }); - instructions.collect() - } - fn extract_writes( logs: impl Iterator, - kvs: impl Iterator, + entries: impl Iterator>, ) -> ( Vec, Vec, @@ -304,13 +304,14 @@ impl ZkSyncTree { let mut initial_writes = vec![]; let mut repeated_writes = vec![]; let mut state_diffs = vec![]; - for (log_entry, (key, value)) in logs.zip(kvs) { + for (log_entry, input_entry) in logs.zip(entries) { + let key = &input_entry.key; match log_entry { - TreeLogEntry::Inserted { leaf_index } => { + TreeLogEntry::Inserted => { initial_writes.push(InitialStorageWrite { - index: leaf_index, + index: input_entry.leaf_index, key: key.hashed_key_u256(), - value, + value: input_entry.value, }); state_diffs.push(StateDiffRecord { address: *key.address(), @@ -318,25 +319,25 @@ impl ZkSyncTree { derived_key: StorageKey::raw_hashed_key(key.address(), key.key()), enumeration_index: 0u64, initial_value: U256::default(), - final_value: h256_to_u256(value), + final_value: h256_to_u256(input_entry.value), }); } TreeLogEntry::Updated { + previous_value: prev_value_hash, leaf_index, - previous_value, } => { - if previous_value != value { + if prev_value_hash != input_entry.value { repeated_writes.push(RepeatedStorageWrite { - index: leaf_index, - value, + index: input_entry.leaf_index, + value: input_entry.value, }); state_diffs.push(StateDiffRecord { address: *key.address(), key: h256_to_u256(*key.key()), derived_key: StorageKey::raw_hashed_key(key.address(), key.key()), enumeration_index: leaf_index, - initial_value: h256_to_u256(previous_value), - final_value: h256_to_u256(value), + initial_value: h256_to_u256(prev_value_hash), + final_value: h256_to_u256(input_entry.value), }); } // Else we have a no-op update that must be omitted from `repeated_writes`. @@ -348,8 +349,11 @@ impl ZkSyncTree { (initial_writes, repeated_writes, state_diffs) } - fn process_l1_batch_lightweight(&mut self, storage_logs: &[StorageLog]) -> TreeMetadata { - let kvs = Self::filter_write_logs(storage_logs); + fn process_l1_batch_lightweight( + &mut self, + instructions: &[TreeInstruction], + ) -> TreeMetadata { + let kvs = Self::filter_write_instructions(instructions); let l1_batch_number = self.next_l1_batch_number(); tracing::info!( "Extending Merkle tree with batch #{l1_batch_number} with {kv_count} writes \ @@ -357,10 +361,10 @@ impl ZkSyncTree { kv_count = kvs.len() ); - let kvs_with_derived_key = kvs + let kvs_with_derived_key: Vec<_> = kvs .iter() - .map(|(k, v)| (k.hashed_key_u256(), *v)) - .collect::>(); + .map(|entry| entry.map_key(StorageKey::hashed_key_u256)) + .collect(); let output = if let Some(thread_pool) = &self.thread_pool { thread_pool.install(|| self.tree.extend(kvs_with_derived_key.clone())) @@ -390,14 +394,15 @@ impl ZkSyncTree { } } - fn filter_write_logs(storage_logs: &[StorageLog]) -> Vec<(StorageKey, ValueHash)> { - let kvs = storage_logs.iter().filter_map(|log| match log.kind { - StorageLogKind::Write => { - let key = log.key; - Some((key, log.value)) - } - StorageLogKind::Read => None, - }); + fn filter_write_instructions( + instructions: &[TreeInstruction], + ) -> Vec> { + let kvs = instructions + .iter() + .filter_map(|instruction| match instruction { + TreeInstruction::Write(entry) => Some(*entry), + TreeInstruction::Read(_) => None, + }); kvs.collect() } diff --git a/core/lib/merkle_tree/src/getters.rs b/core/lib/merkle_tree/src/getters.rs index 67ce2aa9877..7fd6bfc96ed 100644 --- a/core/lib/merkle_tree/src/getters.rs +++ b/core/lib/merkle_tree/src/getters.rs @@ -26,7 +26,7 @@ impl MerkleTree { let node = patch_set.get(longest_prefix); match node { Some(Node::Leaf(leaf)) if &leaf.full_key == leaf_key => (*leaf).into(), - _ => TreeEntry::empty(), + _ => TreeEntry::empty(*leaf_key), } }, ) @@ -76,11 +76,12 @@ impl MerkleTree { |patch_set, &leaf_key, longest_prefix| { let (leaf, merkle_path) = patch_set.create_proof(&mut hasher, leaf_key, longest_prefix, 0); - let value_hash = leaf + let value = leaf .as_ref() .map_or_else(ValueHash::zero, |leaf| leaf.value_hash); TreeEntry { - value_hash, + key: leaf_key, + value, leaf_index: leaf.map_or(0, |leaf| leaf.leaf_index), } .with_merkle_path(merkle_path.into_inner()) @@ -107,26 +108,26 @@ mod tests { let entries = tree.entries_with_proofs(0, &[missing_key]).unwrap(); assert_eq!(entries.len(), 1); assert!(entries[0].base.is_empty()); - entries[0].verify(&tree.hasher, missing_key, tree.hasher.empty_tree_hash()); + entries[0].verify(&tree.hasher, tree.hasher.empty_tree_hash()); } #[test] fn entries_in_single_node_tree() { let mut tree = MerkleTree::new(PatchSet::default()); let key = Key::from(987_654); - let output = tree.extend(vec![(key, ValueHash::repeat_byte(1))]); + let output = tree.extend(vec![TreeEntry::new(key, 1, ValueHash::repeat_byte(1))]); let missing_key = Key::from(123); let entries = tree.entries(0, &[key, missing_key]).unwrap(); assert_eq!(entries.len(), 2); - assert_eq!(entries[0].value_hash, ValueHash::repeat_byte(1)); + assert_eq!(entries[0].value, ValueHash::repeat_byte(1)); assert_eq!(entries[0].leaf_index, 1); let entries = tree.entries_with_proofs(0, &[key, missing_key]).unwrap(); assert_eq!(entries.len(), 2); assert!(!entries[0].base.is_empty()); - entries[0].verify(&tree.hasher, key, output.root_hash); + entries[0].verify(&tree.hasher, output.root_hash); assert!(entries[1].base.is_empty()); - entries[1].verify(&tree.hasher, missing_key, output.root_hash); + entries[1].verify(&tree.hasher, output.root_hash); } } diff --git a/core/lib/merkle_tree/src/hasher/mod.rs b/core/lib/merkle_tree/src/hasher/mod.rs index 8b2478c43d3..9425a5836f0 100644 --- a/core/lib/merkle_tree/src/hasher/mod.rs +++ b/core/lib/merkle_tree/src/hasher/mod.rs @@ -11,7 +11,7 @@ pub(crate) use self::nodes::{InternalNodeCache, MerklePath}; pub use self::proofs::TreeRangeDigest; use crate::{ metrics::HashingStats, - types::{Key, ValueHash, TREE_DEPTH}, + types::{TreeEntry, ValueHash, TREE_DEPTH}, }; use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; @@ -65,17 +65,11 @@ impl dyn HashTree + '_ { empty_hashes.chain(path.iter().copied()) } - fn fold_merkle_path( - &self, - path: &[ValueHash], - key: Key, - value_hash: ValueHash, - leaf_index: u64, - ) -> ValueHash { - let mut hash = self.hash_leaf(&value_hash, leaf_index); + fn fold_merkle_path(&self, path: &[ValueHash], entry: TreeEntry) -> ValueHash { + let mut hash = self.hash_leaf(&entry.value, entry.leaf_index); let full_path = self.extend_merkle_path(path); for (depth, adjacent_hash) in full_path.enumerate() { - hash = if key.bit(depth) { + hash = if entry.key.bit(depth) { self.hash_branch(&adjacent_hash, &hash) } else { self.hash_branch(&hash, &adjacent_hash) @@ -254,7 +248,7 @@ mod tests { let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); let key = key.hashed_key_u256(); - let leaf = LeafNode::new(key, H256([1; 32]), 1); + let leaf = LeafNode::new(TreeEntry::new(key, 1, H256([1; 32]))); let stats = HashingStats::default(); let mut hasher = (&Blake2Hasher as &dyn HashTree).with_stats(&stats); @@ -265,7 +259,7 @@ mod tests { assert!(stats.hashed_bytes.into_inner() > 100); let hasher: &dyn HashTree = &Blake2Hasher; - let folded_hash = hasher.fold_merkle_path(&[], key, H256([1; 32]), 1); + let folded_hash = hasher.fold_merkle_path(&[], leaf.into()); assert_eq!(folded_hash, EXPECTED_HASH); } @@ -274,7 +268,7 @@ mod tests { let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); let key = key.hashed_key_u256(); - let leaf = LeafNode::new(key, H256([1; 32]), 1); + let leaf = LeafNode::new(TreeEntry::new(key, 1, H256([1; 32]))); let mut hasher = HasherWithStats::new(&Blake2Hasher); let leaf_hash = leaf.hash(&mut hasher, 2); @@ -283,9 +277,7 @@ mod tests { let expected_hash = hasher.hash_branch(&merkle_path[0], &leaf_hash); let expected_hash = hasher.hash_branch(&expected_hash, &merkle_path[1]); - let folded_hash = hasher - .inner - .fold_merkle_path(&merkle_path, key, H256([1; 32]), 1); + let folded_hash = hasher.inner.fold_merkle_path(&merkle_path, leaf.into()); assert_eq!(folded_hash, expected_hash); } } diff --git a/core/lib/merkle_tree/src/hasher/proofs.rs b/core/lib/merkle_tree/src/hasher/proofs.rs index d97df0ad97d..49d4bfe9295 100644 --- a/core/lib/merkle_tree/src/hasher/proofs.rs +++ b/core/lib/merkle_tree/src/hasher/proofs.rs @@ -22,36 +22,37 @@ impl BlockOutputWithProofs { &self, hasher: &dyn HashTree, old_root_hash: ValueHash, - instructions: &[(Key, TreeInstruction)], + instructions: &[TreeInstruction], ) { assert_eq!(instructions.len(), self.logs.len()); let mut root_hash = old_root_hash; - for (op, &(key, instruction)) in self.logs.iter().zip(instructions) { + for (op, &instruction) in self.logs.iter().zip(instructions) { assert!(op.merkle_path.len() <= TREE_DEPTH); - if matches!(instruction, TreeInstruction::Read) { + if matches!(instruction, TreeInstruction::Read(_)) { assert_eq!(op.root_hash, root_hash); assert!(op.base.is_read()); } else { assert!(!op.base.is_read()); } - let (prev_leaf_index, leaf_index, prev_value) = match op.base { - TreeLogEntry::Inserted { leaf_index } => (0, leaf_index, ValueHash::zero()), + let prev_entry = match op.base { + TreeLogEntry::Inserted | TreeLogEntry::ReadMissingKey => { + TreeEntry::empty(instruction.key()) + } TreeLogEntry::Updated { leaf_index, - previous_value, - } => (leaf_index, leaf_index, previous_value), - - TreeLogEntry::Read { leaf_index, value } => (leaf_index, leaf_index, value), - TreeLogEntry::ReadMissingKey => (0, 0, ValueHash::zero()), + previous_value: value, + } + | TreeLogEntry::Read { leaf_index, value } => { + TreeEntry::new(instruction.key(), leaf_index, value) + } }; - let prev_hash = - hasher.fold_merkle_path(&op.merkle_path, key, prev_value, prev_leaf_index); + let prev_hash = hasher.fold_merkle_path(&op.merkle_path, prev_entry); assert_eq!(prev_hash, root_hash); - if let TreeInstruction::Write(value) = instruction { - let next_hash = hasher.fold_merkle_path(&op.merkle_path, key, value, leaf_index); + if let TreeInstruction::Write(new_entry) = instruction { + let next_hash = hasher.fold_merkle_path(&op.merkle_path, new_entry); assert_eq!(next_hash, op.root_hash); } root_hash = op.root_hash; @@ -65,19 +66,14 @@ impl TreeEntryWithProof { /// # Panics /// /// Panics if the proof doesn't verify. - pub fn verify(&self, hasher: &dyn HashTree, key: Key, trusted_root_hash: ValueHash) { + pub fn verify(&self, hasher: &dyn HashTree, trusted_root_hash: ValueHash) { if self.base.leaf_index == 0 { assert!( - self.base.value_hash.is_zero(), + self.base.value.is_zero(), "Invalid missing value specification: leaf index is zero, but value is non-default" ); } - let root_hash = hasher.fold_merkle_path( - &self.merkle_path, - key, - self.base.value_hash, - self.base.leaf_index, - ); + let root_hash = hasher.fold_merkle_path(&self.merkle_path, self.base); assert_eq!(root_hash, trusted_root_hash, "Root hash mismatch"); } } @@ -146,11 +142,7 @@ impl<'a> TreeRangeDigest<'a> { let left_contour: Vec<_> = left_contour.collect(); Self { hasher: HasherWithStats::new(hasher), - current_leaf: LeafNode::new( - start_key, - start_entry.base.value_hash, - start_entry.base.leaf_index, - ), + current_leaf: LeafNode::new(start_entry.base), left_contour: left_contour.try_into().unwrap(), // ^ `unwrap()` is safe by construction; `left_contour` will always have necessary length } @@ -161,13 +153,13 @@ impl<'a> TreeRangeDigest<'a> { /// # Panics /// /// Panics if the provided `key` is not greater than the previous key provided to this digest. - pub fn update(&mut self, key: Key, entry: TreeEntry) { + pub fn update(&mut self, entry: TreeEntry) { assert!( - key > self.current_leaf.full_key, + entry.key > self.current_leaf.full_key, "Keys provided to a digest must be monotonically increasing" ); - let diverging_level = utils::find_diverging_bit(self.current_leaf.full_key, key) + 1; + let diverging_level = utils::find_diverging_bit(self.current_leaf.full_key, entry.key) + 1; // Hash the current leaf up to the `diverging_level`, taking current `left_contour` into account. let mut hash = self @@ -188,7 +180,7 @@ impl<'a> TreeRangeDigest<'a> { } // Record the computed hash. self.left_contour[TREE_DEPTH - diverging_level] = hash; - self.current_leaf = LeafNode::new(key, entry.value_hash, entry.leaf_index); + self.current_leaf = LeafNode::new(entry); } /// Finalizes this digest and returns the root hash of the tree. @@ -196,8 +188,8 @@ impl<'a> TreeRangeDigest<'a> { /// # Panics /// /// Panics if the provided `final_key` is not greater than the previous key provided to this digest. - pub fn finalize(mut self, final_key: Key, final_entry: &TreeEntryWithProof) -> ValueHash { - self.update(final_key, final_entry.base); + pub fn finalize(mut self, final_entry: &TreeEntryWithProof) -> ValueHash { + self.update(final_entry.base); let full_path = self .hasher @@ -206,9 +198,9 @@ impl<'a> TreeRangeDigest<'a> { let zipped_paths = self.left_contour.into_iter().zip(full_path); let mut hash = self .hasher - .hash_leaf(&final_entry.base.value_hash, final_entry.base.leaf_index); + .hash_leaf(&final_entry.base.value, final_entry.base.leaf_index); for (depth, (left, right)) in zipped_paths.enumerate() { - hash = if final_key.bit(depth) { + hash = if final_entry.base.key.bit(depth) { self.hasher.hash_branch(&left, &hash) } else { self.hasher.hash_branch(&hash, &right) diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 166400cbb64..85ace50aada 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -26,10 +26,15 @@ //! - Hash of a vacant leaf is `hash([0_u8; 40])`, where `hash` is the hash function used //! (Blake2s-256). //! - Hash of an occupied leaf is `hash(u64::to_be_bytes(leaf_index) ++ value_hash)`, -//! where `leaf_index` is the 1-based index of the leaf key in the order of insertion, +//! where `leaf_index` is a 1-based index of the leaf key provided when the leaf is inserted / updated, //! `++` is byte concatenation. //! - Hash of an internal node is `hash(left_child_hash ++ right_child_hash)`. //! +//! Currently in zksync, leaf indices enumerate leaves in the order of their insertion into the tree. +//! Indices are computed externally and are provided to the tree as inputs; the tree doesn't verify +//! index assignment and doesn't rely on particular index assignment assumptions (other than when +//! [verifying tree consistency](MerkleTree::verify_consistency())). +//! //! [Jellyfish Merkle tree]: https://developers.diem.com/papers/jellyfish-merkle-tree/2021-01-14.pdf // Linter settings. @@ -209,10 +214,10 @@ impl MerkleTree { /// # Return value /// /// Returns information about the update such as the final tree hash. - pub fn extend(&mut self, key_value_pairs: Vec<(Key, ValueHash)>) -> BlockOutput { + pub fn extend(&mut self, entries: Vec) -> BlockOutput { let next_version = self.db.manifest().unwrap_or_default().version_count; let storage = Storage::new(&self.db, &self.hasher, next_version, true); - let (output, patch) = storage.extend(key_value_pairs); + let (output, patch) = storage.extend(entries); self.db.apply_patch(patch); output } @@ -226,7 +231,7 @@ impl MerkleTree { /// instruction. pub fn extend_with_proofs( &mut self, - instructions: Vec<(Key, TreeInstruction)>, + instructions: Vec, ) -> BlockOutputWithProofs { let next_version = self.db.manifest().unwrap_or_default().version_count; let storage = Storage::new(&self.db, &self.hasher, next_version, true); diff --git a/core/lib/merkle_tree/src/pruning.rs b/core/lib/merkle_tree/src/pruning.rs index 21a3e8712fd..5b1911ca600 100644 --- a/core/lib/merkle_tree/src/pruning.rs +++ b/core/lib/merkle_tree/src/pruning.rs @@ -187,7 +187,7 @@ mod tests { use super::*; use crate::{ types::{Node, NodeKey}, - Database, Key, MerkleTree, PatchSet, ValueHash, + Database, Key, MerkleTree, PatchSet, TreeEntry, ValueHash, }; fn create_db() -> PatchSet { @@ -195,7 +195,7 @@ mod tests { for i in 0..5 { let key = Key::from(i); let value = ValueHash::from_low_u64_be(i); - MerkleTree::new(&mut db).extend(vec![(key, value)]); + MerkleTree::new(&mut db).extend(vec![TreeEntry::new(key, i + 1, value)]); } db } @@ -245,9 +245,9 @@ mod tests { assert!(start.elapsed() < Duration::from_secs(10)); } - fn generate_key_value_pairs(indexes: impl Iterator) -> Vec<(Key, ValueHash)> { + fn generate_key_value_pairs(indexes: impl Iterator) -> Vec { indexes - .map(|i| (Key::from(i), ValueHash::from_low_u64_be(i))) + .map(|i| TreeEntry::new(Key::from(i), i + 1, ValueHash::from_low_u64_be(i))) .collect() } @@ -273,7 +273,7 @@ mod tests { let mut tree = MerkleTree::new(&mut db); for version in first_retained_version..=latest_version { - tree.verify_consistency(version).unwrap(); + tree.verify_consistency(version, true).unwrap(); } let kvs = generate_key_value_pairs(100..200); @@ -290,7 +290,7 @@ mod tests { let tree = MerkleTree::new(&mut db); for version in first_retained_version..=latest_version { - tree.verify_consistency(version).unwrap(); + tree.verify_consistency(version, true).unwrap(); } assert_no_stale_keys(&db, first_retained_version); } @@ -318,8 +318,8 @@ mod tests { const ITERATIVE_BATCH_COUNT: usize = 10; let mut db = PatchSet::default(); - let kvs: Vec<_> = (0_u32..100) - .map(|i| (Key::from(i), ValueHash::zero())) + let kvs: Vec<_> = (0_u64..100) + .map(|i| TreeEntry::new(Key::from(i), i + 1, ValueHash::zero())) .collect(); let batch_count = if initialize_iteratively { @@ -335,8 +335,8 @@ mod tests { // Completely overwrite all keys. let new_value_hash = ValueHash::from_low_u64_be(1_000); - let new_kvs = (0_u32..100) - .map(|i| (Key::from(i), new_value_hash)) + let new_kvs = (0_u64..100) + .map(|i| TreeEntry::new(Key::from(i), i + 1, new_value_hash)) .collect(); MerkleTree::new(&mut db).extend(new_kvs); @@ -364,16 +364,16 @@ mod tests { prune_iteratively: bool, ) { let mut db = PatchSet::default(); - let kvs: Vec<_> = (0_u32..100) - .map(|i| (Key::from(i), ValueHash::zero())) + let kvs: Vec<_> = (0_u64..100) + .map(|i| TreeEntry::new(Key::from(i), i + 1, ValueHash::zero())) .collect(); MerkleTree::new(&mut db).extend(kvs); let leaf_keys_in_db = leaf_keys(&mut db); // Completely overwrite all keys in several batches. let new_value_hash = ValueHash::from_low_u64_be(1_000); - let new_kvs: Vec<_> = (0_u32..100) - .map(|i| (Key::from(i), new_value_hash)) + let new_kvs: Vec<_> = (0_u64..100) + .map(|i| TreeEntry::new(Key::from(i), i + 1, new_value_hash)) .collect(); for chunk in new_kvs.chunks(20) { MerkleTree::new(&mut db).extend(chunk.to_vec()); diff --git a/core/lib/merkle_tree/src/recovery.rs b/core/lib/merkle_tree/src/recovery.rs index 85ac578cc0a..d1f2618a5cd 100644 --- a/core/lib/merkle_tree/src/recovery.rs +++ b/core/lib/merkle_tree/src/recovery.rs @@ -40,23 +40,11 @@ use std::time::Instant; use crate::{ hasher::{HashTree, HasherWithStats}, storage::{PatchSet, PruneDatabase, PrunePatchSet, Storage}, - types::{Key, Manifest, Root, TreeTags, ValueHash}, + types::{Key, Manifest, Root, TreeEntry, TreeTags, ValueHash}, MerkleTree, }; use zksync_crypto::hasher::blake2::Blake2Hasher; -/// Entry in a Merkle tree used during recovery. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct RecoveryEntry { - /// Entry key. - pub key: Key, - /// Entry value. - pub value: ValueHash, - /// Leaf index associated with the entry. It is **not** checked whether leaf indices are well-formed - /// during recovery (e.g., that they are unique). - pub leaf_index: u64, -} - /// Handle to a Merkle tree during its recovery. #[derive(Debug)] pub struct MerkleTreeRecovery { @@ -154,7 +142,7 @@ impl MerkleTreeRecovery { %entries.key_range = entries_key_range(&entries), ), )] - pub fn extend_linear(&mut self, entries: Vec) { + pub fn extend_linear(&mut self, entries: Vec) { tracing::debug!("Started extending tree"); let started_at = Instant::now(); @@ -177,7 +165,7 @@ impl MerkleTreeRecovery { entries.len = entries.len(), ), )] - pub fn extend_random(&mut self, entries: Vec) { + pub fn extend_random(&mut self, entries: Vec) { tracing::debug!("Started extending tree"); let started_at = Instant::now(); @@ -242,7 +230,7 @@ impl MerkleTreeRecovery { } } -fn entries_key_range(entries: &[RecoveryEntry]) -> String { +fn entries_key_range(entries: &[TreeEntry]) -> String { let (Some(first), Some(last)) = (entries.first(), entries.last()) else { return "(empty)".to_owned(); }; @@ -280,11 +268,7 @@ mod tests { #[test] fn recovering_tree_with_single_node() { let mut recovery = MerkleTreeRecovery::new(PatchSet::default(), 42); - let recovery_entry = RecoveryEntry { - key: Key::from(123), - value: ValueHash::repeat_byte(1), - leaf_index: 1, - }; + let recovery_entry = TreeEntry::new(Key::from(123), 1, ValueHash::repeat_byte(1)); recovery.extend_linear(vec![recovery_entry]); let tree = recovery.finalize(); @@ -292,13 +276,8 @@ mod tests { let mut hasher = HasherWithStats::new(&Blake2Hasher); assert_eq!( tree.latest_root_hash(), - LeafNode::new( - recovery_entry.key, - recovery_entry.value, - recovery_entry.leaf_index - ) - .hash(&mut hasher, 0) + LeafNode::new(recovery_entry).hash(&mut hasher, 0) ); - tree.verify_consistency(42).unwrap(); + tree.verify_consistency(42, true).unwrap(); } } diff --git a/core/lib/merkle_tree/src/storage/mod.rs b/core/lib/merkle_tree/src/storage/mod.rs index c5a56abfca9..ae273d22f32 100644 --- a/core/lib/merkle_tree/src/storage/mod.rs +++ b/core/lib/merkle_tree/src/storage/mod.rs @@ -18,12 +18,10 @@ pub use self::{ use crate::{ hasher::HashTree, metrics::{TreeUpdaterStats, BLOCK_TIMINGS, GENERAL_METRICS}, - recovery::RecoveryEntry, types::{ BlockOutput, ChildRef, InternalNode, Key, LeafNode, Manifest, Nibbles, Node, Root, - TreeLogEntry, TreeTags, ValueHash, + TreeEntry, TreeLogEntry, TreeTags, ValueHash, }, - utils::increment_counter, }; /// Tree operation: either inserting a new version or updating an existing one (the latter is only @@ -132,17 +130,17 @@ impl TreeUpdater { /// hashes for all updated nodes in [`Self::finalize()`]. fn insert( &mut self, - key: Key, - value_hash: ValueHash, + entry: TreeEntry, parent_nibbles: &Nibbles, - leaf_index_fn: impl FnOnce() -> u64, ) -> (TreeLogEntry, NewLeafData) { let version = self.patch_set.root_version(); + let key = entry.key; + let traverse_outcome = self.patch_set.traverse(key, parent_nibbles); let (log, leaf_data) = match traverse_outcome { TraverseOutcome::LeafMatch(nibbles, mut leaf) => { - let log = TreeLogEntry::update(leaf.value_hash, leaf.leaf_index); - leaf.value_hash = value_hash; + let log = TreeLogEntry::update(leaf.leaf_index, leaf.value_hash); + leaf.update_from(entry); self.patch_set.insert(nibbles, leaf.into()); self.metrics.updated_leaves += 1; (log, NewLeafData::new(nibbles, leaf)) @@ -173,23 +171,20 @@ impl TreeUpdater { nibble_idx += 1; } - let leaf_index = leaf_index_fn(); - let new_leaf = LeafNode::new(key, value_hash, leaf_index); + let new_leaf = LeafNode::new(entry); let new_leaf_nibbles = Nibbles::new(&key, nibble_idx + 1); let leaf_data = NewLeafData::new(new_leaf_nibbles, new_leaf); let moved_leaf_nibbles = Nibbles::new(&leaf.full_key, nibble_idx + 1); let leaf_data = leaf_data.with_adjacent_leaf(moved_leaf_nibbles, leaf); - (TreeLogEntry::insert(leaf_index), leaf_data) + (TreeLogEntry::Inserted, leaf_data) } TraverseOutcome::MissingChild(nibbles) if nibbles.nibble_count() == 0 => { // The root is currently empty; we replace it with a leaf. - let leaf_index = leaf_index_fn(); - debug_assert_eq!(leaf_index, 1); - let root_leaf = LeafNode::new(key, value_hash, leaf_index); + let root_leaf = LeafNode::new(entry); self.set_root_node(root_leaf.into()); let leaf_data = NewLeafData::new(Nibbles::EMPTY, root_leaf); - (TreeLogEntry::insert(1), leaf_data) + (TreeLogEntry::Inserted, leaf_data) } TraverseOutcome::MissingChild(nibbles) => { @@ -198,10 +193,9 @@ impl TreeUpdater { unreachable!("Node parent must be an internal node"); }; parent.insert_child_ref(last_nibble, ChildRef::leaf(version)); - let leaf_index = leaf_index_fn(); - let new_leaf = LeafNode::new(key, value_hash, leaf_index); + let new_leaf = LeafNode::new(entry); let leaf_data = NewLeafData::new(nibbles, new_leaf); - (TreeLogEntry::insert(leaf_index), leaf_data) + (TreeLogEntry::Inserted, leaf_data) } }; @@ -289,19 +283,20 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { /// Extends the Merkle tree in the lightweight operation mode, without intermediate hash /// computations. - pub fn extend(mut self, key_value_pairs: Vec<(Key, ValueHash)>) -> (BlockOutput, PatchSet) { + pub fn extend(mut self, entries: Vec) -> (BlockOutput, PatchSet) { let load_nodes_latency = BLOCK_TIMINGS.load_nodes.start(); - let sorted_keys = SortedKeys::new(key_value_pairs.iter().map(|(key, _)| *key)); + let sorted_keys = SortedKeys::new(entries.iter().map(|entry| entry.key)); let parent_nibbles = self.updater.load_ancestors(&sorted_keys, self.db); let load_nodes_latency = load_nodes_latency.observe(); tracing::debug!("Load stage took {load_nodes_latency:?}"); let extend_patch_latency = BLOCK_TIMINGS.extend_patch.start(); - let mut logs = Vec::with_capacity(key_value_pairs.len()); - for ((key, value_hash), parent_nibbles) in key_value_pairs.into_iter().zip(parent_nibbles) { - let (log, _) = self.updater.insert(key, value_hash, &parent_nibbles, || { - increment_counter(&mut self.leaf_count) - }); + let mut logs = Vec::with_capacity(entries.len()); + for (entry, parent_nibbles) in entries.into_iter().zip(parent_nibbles) { + let (log, _) = self.updater.insert(entry, &parent_nibbles); + if matches!(log, TreeLogEntry::Inserted) { + self.leaf_count += 1; + } logs.push(log); } let extend_patch_latency = extend_patch_latency.observe(); @@ -321,10 +316,7 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { Some(self.updater.load_greatest_key(self.db)?.0.full_key) } - pub fn extend_during_linear_recovery( - mut self, - recovery_entries: Vec, - ) -> PatchSet { + pub fn extend_during_linear_recovery(mut self, recovery_entries: Vec) -> PatchSet { let (mut prev_key, mut prev_nibbles) = match self.updater.load_greatest_key(self.db) { Some((leaf, nibbles)) => (Some(leaf.full_key), nibbles), None => (None, Nibbles::EMPTY), @@ -343,9 +335,7 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { let key_nibbles = Nibbles::new(&entry.key, prev_nibbles.nibble_count()); let parent_nibbles = prev_nibbles.common_prefix(&key_nibbles); - let (_, new_leaf) = - self.updater - .insert(entry.key, entry.value, &parent_nibbles, || entry.leaf_index); + let (_, new_leaf) = self.updater.insert(entry, &parent_nibbles); prev_nibbles = new_leaf.nibbles; self.leaf_count += 1; } @@ -356,10 +346,7 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { patch } - pub fn extend_during_random_recovery( - mut self, - recovery_entries: Vec, - ) -> PatchSet { + pub fn extend_during_random_recovery(mut self, recovery_entries: Vec) -> PatchSet { let load_nodes_latency = BLOCK_TIMINGS.load_nodes.start(); let sorted_keys = SortedKeys::new(recovery_entries.iter().map(|entry| entry.key)); let parent_nibbles = self.updater.load_ancestors(&sorted_keys, self.db); @@ -368,8 +355,7 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { let extend_patch_latency = BLOCK_TIMINGS.extend_patch.start(); for (entry, parent_nibbles) in recovery_entries.into_iter().zip(parent_nibbles) { - self.updater - .insert(entry.key, entry.value, &parent_nibbles, || entry.leaf_index); + self.updater.insert(entry, &parent_nibbles); self.leaf_count += 1; } let extend_patch_latency = extend_patch_latency.observe(); diff --git a/core/lib/merkle_tree/src/storage/patch.rs b/core/lib/merkle_tree/src/storage/patch.rs index 6d0c38d6c9f..ff41fb2f6bf 100644 --- a/core/lib/merkle_tree/src/storage/patch.rs +++ b/core/lib/merkle_tree/src/storage/patch.rs @@ -680,7 +680,7 @@ mod tests { use super::*; use crate::{ storage::Storage, - types::{Key, LeafNode}, + types::{Key, LeafNode, TreeEntry}, }; fn patch_len(patch: &WorkingPatchSet) -> usize { @@ -697,7 +697,7 @@ mod tests { let key = Key::from_little_endian(&[i; 32]); let nibbles = Nibbles::new(&key, 2 + usize::from(i) % 4); // ^ We need nibble count at least 2 for all `nibbles` to be distinct. - let leaf = LeafNode::new(key, ValueHash::zero(), i.into()); + let leaf = LeafNode::new(TreeEntry::new(key, i.into(), ValueHash::zero())); patch.insert(nibbles, leaf.into()); nibbles }); @@ -742,7 +742,8 @@ mod tests { // Test DB with a single entry. let mut db = PatchSet::default(); let key = Key::from(1234_u64); - let (_, patch) = Storage::new(&db, &(), 0, true).extend(vec![(key, ValueHash::zero())]); + let (_, patch) = + Storage::new(&db, &(), 0, true).extend(vec![TreeEntry::new(key, 1, ValueHash::zero())]); db.apply_patch(patch); let mut patch = WorkingPatchSet::new(1, db.root(0).unwrap()); @@ -754,8 +755,11 @@ mod tests { // Test DB with multiple entries. let other_key = Key::from_little_endian(&[0xa0; 32]); - let (_, patch) = - Storage::new(&db, &(), 1, true).extend(vec![(other_key, ValueHash::zero())]); + let (_, patch) = Storage::new(&db, &(), 1, true).extend(vec![TreeEntry::new( + other_key, + 2, + ValueHash::zero(), + )]); db.apply_patch(patch); let mut patch = WorkingPatchSet::new(2, db.root(1).unwrap()); @@ -766,8 +770,11 @@ mod tests { assert_eq!(load_result.db_reads, 1); let greater_key = Key::from_little_endian(&[0xaf; 32]); - let (_, patch) = - Storage::new(&db, &(), 2, true).extend(vec![(greater_key, ValueHash::zero())]); + let (_, patch) = Storage::new(&db, &(), 2, true).extend(vec![TreeEntry::new( + greater_key, + 3, + ValueHash::zero(), + )]); db.apply_patch(patch); let mut patch = WorkingPatchSet::new(3, db.root(2).unwrap()); diff --git a/core/lib/merkle_tree/src/storage/proofs.rs b/core/lib/merkle_tree/src/storage/proofs.rs index 9e2d172bd6b..81f140088d3 100644 --- a/core/lib/merkle_tree/src/storage/proofs.rs +++ b/core/lib/merkle_tree/src/storage/proofs.rs @@ -15,26 +15,6 @@ //! with root at level 4 (= 1 nibble). Thus, the patch sets and Merkle proofs //! produced by each group are mostly disjoint; they intersect only at the root node level. //! -//! ## Computing leaf indices -//! -//! We need to determine leaf indices for all write instructions. Indices potentially depend -//! on the entire list of `instructions`, so we should determine leaf indices before -//! parallelization. Otherwise, we'd need to sync between parallelized tasks, which defeats -//! the purpose of parallelization. -//! -//! We precompute indices as a separate step using the following observations: -//! -//! - If a leaf is present in the tree *before* `instructions` are applied, its index -//! can be obtained from the node ancestors loaded on the first step of the process. -//! - Otherwise, a leaf may have been added by a previous instruction for the same key. -//! Since we already need [`SortedKeys`] to efficiently load ancestors, it's easy -//! to determine such pairs of instructions. -//! - Otherwise, we have a first write, and the leaf index is defined as the current leaf -//! count. -//! -//! In summary, we can determine leaf indices for all write `instructions` in linear time -//! and without synchronization required during the parallel steps of the process. -//! //! ## Merging Merkle proofs //! //! The proofs produced by different groups only intersect at levels 0..4. This can be dealt with @@ -68,7 +48,7 @@ use crate::{ BlockOutputWithProofs, InternalNode, Key, Nibbles, Node, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }, - utils::{increment_counter, merge_by_index}, + utils::merge_by_index, }; /// Number of subtrees used for parallel computations. @@ -93,16 +73,13 @@ impl TreeUpdater { for instruction in instructions { let InstructionWithPrecomputes { index, - key, instruction, parent_nibbles, - leaf_index, } = instruction; let log = match instruction { - TreeInstruction::Write(value_hash) => { - let (log, leaf_data) = - self.insert(key, value_hash, &parent_nibbles, || leaf_index); + TreeInstruction::Write(entry) => { + let (log, leaf_data) = self.insert(entry, &parent_nibbles); let (new_root_hash, merkle_path) = self.update_node_hashes(hasher, &leaf_data); root_hash = new_root_hash; TreeLogEntryWithProof { @@ -111,7 +88,7 @@ impl TreeUpdater { root_hash, } } - TreeInstruction::Read => { + TreeInstruction::Read(key) => { let (log, merkle_path) = self.prove(hasher, key, &parent_nibbles); TreeLogEntryWithProof { base: log, @@ -183,7 +160,7 @@ impl TreeUpdater { self.patch_set .create_proof(hasher, key, parent_nibbles, SUBTREE_ROOT_LEVEL / 4); let operation = leaf.map_or(TreeLogEntry::ReadMissingKey, |leaf| { - TreeLogEntry::read(leaf.value_hash, leaf.leaf_index) + TreeLogEntry::read(leaf.leaf_index, leaf.value_hash) }); if matches!(operation, TreeLogEntry::ReadMissingKey) { @@ -259,16 +236,14 @@ impl TreeUpdater { impl<'a, DB: Database + ?Sized> Storage<'a, DB> { pub fn extend_with_proofs( mut self, - instructions: Vec<(Key, TreeInstruction)>, + instructions: Vec, ) -> (BlockOutputWithProofs, PatchSet) { let load_nodes_latency = BLOCK_TIMINGS.load_nodes.start(); - let sorted_keys = SortedKeys::new(instructions.iter().map(|(key, _)| *key)); + let sorted_keys = SortedKeys::new(instructions.iter().map(TreeInstruction::key)); let parent_nibbles = self.updater.load_ancestors(&sorted_keys, self.db); load_nodes_latency.observe(); - let leaf_indices = self.compute_leaf_indices(&instructions, sorted_keys, &parent_nibbles); - let instruction_parts = - InstructionWithPrecomputes::split(instructions, parent_nibbles, leaf_indices); + let instruction_parts = InstructionWithPrecomputes::split(instructions, parent_nibbles); let initial_root = self.updater.patch_set.ensure_internal_root_node(); let initial_metrics = self.updater.metrics; let storage_parts = self.updater.split(); @@ -310,44 +285,13 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { output_with_proofs } - /// Computes leaf indices for all writes in `instructions`. Leaf indices are not used for reads; - /// thus, the corresponding entries are always 0. - fn compute_leaf_indices( - &mut self, - instructions: &[(Key, TreeInstruction)], - mut sorted_keys: SortedKeys, - parent_nibbles: &[Nibbles], - ) -> Vec { - sorted_keys.remove_read_instructions(instructions); - let key_mentions = sorted_keys.key_mentions(instructions.len()); - let patch_set = &self.updater.patch_set; - - let mut leaf_indices = Vec::with_capacity(instructions.len()); - let it = instructions.iter().zip(parent_nibbles).enumerate(); - for (idx, ((key, instruction), nibbles)) in it { - let leaf_index = match (instruction, key_mentions[idx]) { - (TreeInstruction::Read, _) => 0, - // ^ Leaf indices are not used for read instructions. - (TreeInstruction::Write(_), KeyMention::First) => { - let leaf_index = match patch_set.get(nibbles) { - Some(Node::Leaf(leaf)) if leaf.full_key == *key => Some(leaf.leaf_index), - _ => None, - }; - leaf_index.unwrap_or_else(|| increment_counter(&mut self.leaf_count)) - } - (TreeInstruction::Write(_), KeyMention::SameAs(prev_idx)) => leaf_indices[prev_idx], - }; - leaf_indices.push(leaf_index); - } - leaf_indices - } - fn finalize_with_proofs( mut self, hasher: &mut HasherWithStats<'_>, root: InternalNode, logs: Vec<(usize, TreeLogEntryWithProof)>, ) -> (BlockOutputWithProofs, PatchSet) { + self.leaf_count += self.updater.metrics.new_leaves; tracing::debug!( "Finished updating tree; total leaf count: {}, stats: {:?}", self.leaf_count, @@ -370,95 +314,35 @@ impl<'a, DB: Database + ?Sized> Storage<'a, DB> { } } -/// Mention of a key in a block: either the first mention, or the same mention as the specified -/// 0-based index in the block. -#[derive(Debug, Clone, Copy)] -enum KeyMention { - First, - SameAs(usize), -} - -impl SortedKeys { - fn remove_read_instructions(&mut self, instructions: &[(Key, TreeInstruction)]) { - debug_assert_eq!(instructions.len(), self.0.len()); - - self.0.retain(|(idx, key)| { - let (key_for_instruction, instruction) = &instructions[*idx]; - debug_assert_eq!(key_for_instruction, key); - matches!(instruction, TreeInstruction::Write(_)) - }); - } - - /// Determines for the original sequence of `Key`s whether a particular key mention - /// is the first one, or it follows after another mention. - fn key_mentions(&self, original_len: usize) -> Vec { - debug_assert!(original_len >= self.0.len()); - - let mut flags = vec![KeyMention::First; original_len]; - let [(mut first_key_mention, mut prev_key), tail @ ..] = self.0.as_slice() else { - return flags; - }; - - // Note that `SameAs(_)` doesn't necessarily reference the first mention of a key, - // just one with a lesser index. This is OK for our purposes. - for &(idx, key) in tail { - if prev_key == key { - if idx > first_key_mention { - flags[idx] = KeyMention::SameAs(first_key_mention); - } else { - debug_assert!(idx < first_key_mention); // all indices should be unique - flags[first_key_mention] = KeyMention::SameAs(idx); - first_key_mention = idx; - } - } else { - prev_key = key; - first_key_mention = idx; - } - } - flags - } -} - /// [`TreeInstruction`] together with precomputed data necessary to efficiently parallelize /// Merkle tree traversal. #[derive(Debug)] struct InstructionWithPrecomputes { /// 0-based index of the instruction. index: usize, - /// Key read / written by the instruction. - key: Key, instruction: TreeInstruction, /// Nibbles for the parent node computed by [`Storage::load_ancestors()`]. parent_nibbles: Nibbles, - /// Leaf index for the operation computed by [`Storage::compute_leaf_indices()`]. - /// Always 0 for reads. - leaf_index: u64, } impl InstructionWithPrecomputes { /// Creates groups of instructions to be used during parallelized tree traversal. fn split( - instructions: Vec<(Key, TreeInstruction)>, + instructions: Vec, parent_nibbles: Vec, - leaf_indices: Vec, ) -> [Vec; SUBTREE_COUNT] { const EMPTY_VEC: Vec = Vec::new(); // ^ Need to extract this to a constant to be usable as an array initializer. let mut parts = [EMPTY_VEC; SUBTREE_COUNT]; - let it = instructions - .into_iter() - .zip(parent_nibbles) - .zip(leaf_indices); - for (index, (((key, instruction), parent_nibbles), leaf_index)) in it.enumerate() { - let first_nibble = Nibbles::nibble(&key, 0); + let it = instructions.into_iter().zip(parent_nibbles); + for (index, (instruction, parent_nibbles)) in it.enumerate() { + let first_nibble = Nibbles::nibble(&instruction.key(), 0); let part = &mut parts[first_nibble as usize]; part.push(Self { index, - key, instruction, parent_nibbles, - leaf_index, }); } parts @@ -472,8 +356,6 @@ mod tests { use super::*; use crate::types::Root; - const HASH: ValueHash = ValueHash::zero(); - fn byte_key(byte: u8) -> Key { Key::from_little_endian(&[byte; 32]) } @@ -485,88 +367,14 @@ mod tests { assert_eq!(sorted_keys.0, [1, 3, 4, 0, 2].map(|i| (i, keys[i]))); } - #[test] - fn computing_key_mentions() { - let keys = [4, 1, 3, 4, 3, 3].map(byte_key); - let sorted_keys = SortedKeys::new(keys.into_iter()); - let mentions = sorted_keys.key_mentions(6); - - assert_matches!( - mentions.as_slice(), - [ - KeyMention::First, KeyMention::First, KeyMention::First, - KeyMention::SameAs(0), KeyMention::SameAs(2), KeyMention::SameAs(i) - ] if *i == 2 || *i == 4 - ); - } - - #[test] - fn computing_leaf_indices() { - let db = prepare_db(); - let (instructions, expected_indices) = get_instructions_and_leaf_indices(); - let mut storage = Storage::new(&db, &(), 1, true); - let sorted_keys = SortedKeys::new(instructions.iter().map(|(key, _)| *key)); - let parent_nibbles = storage.updater.load_ancestors(&sorted_keys, &db); - - let leaf_indices = - storage.compute_leaf_indices(&instructions, sorted_keys, &parent_nibbles); - assert_eq!(leaf_indices, expected_indices); - } - - fn prepare_db() -> PatchSet { - let mut db = PatchSet::default(); - let (_, patch) = - Storage::new(&db, &(), 0, true).extend(vec![(byte_key(2), HASH), (byte_key(1), HASH)]); - db.apply_patch(patch); - db - } - - fn get_instructions_and_leaf_indices() -> (Vec<(Key, TreeInstruction)>, Vec) { - let instructions_and_indices = vec![ - (byte_key(3), TreeInstruction::Read, 0), - (byte_key(1), TreeInstruction::Write(HASH), 2), - (byte_key(2), TreeInstruction::Read, 0), - (byte_key(3), TreeInstruction::Write(HASH), 3), - (byte_key(1), TreeInstruction::Read, 0), - (byte_key(3), TreeInstruction::Write(HASH), 3), - (byte_key(2), TreeInstruction::Write(HASH), 1), - (byte_key(0xc0), TreeInstruction::Write(HASH), 4), - (byte_key(2), TreeInstruction::Write(HASH), 1), - ]; - instructions_and_indices - .into_iter() - .map(|(key, instr, idx)| ((key, instr), idx)) - .unzip() - } - - #[test] - fn extending_storage_with_proofs() { - let db = prepare_db(); - let (instructions, expected_indices) = get_instructions_and_leaf_indices(); - let storage = Storage::new(&db, &(), 1, true); - let (block_output, _) = storage.extend_with_proofs(instructions); - assert_eq!(block_output.leaf_count, 4); - - assert_eq!(block_output.logs.len(), expected_indices.len()); - for (expected_idx, log) in expected_indices.into_iter().zip(&block_output.logs) { - match log.base { - TreeLogEntry::Inserted { leaf_index } - | TreeLogEntry::Updated { leaf_index, .. } => { - assert_eq!(leaf_index, expected_idx); - } - _ => {} - } - } - } - #[test] fn proofs_for_empty_storage() { let db = PatchSet::default(); let storage = Storage::new(&db, &(), 0, true); let instructions = vec![ - (byte_key(1), TreeInstruction::Read), - (byte_key(2), TreeInstruction::Read), - (byte_key(0xff), TreeInstruction::Read), + TreeInstruction::Read(byte_key(1)), + TreeInstruction::Read(byte_key(2)), + TreeInstruction::Read(byte_key(0xff)), ]; let (block_output, patch) = storage.extend_with_proofs(instructions); assert_eq!(block_output.leaf_count, 0); diff --git a/core/lib/merkle_tree/src/storage/serialization.rs b/core/lib/merkle_tree/src/storage/serialization.rs index 15d67604cc0..6a9216fa104 100644 --- a/core/lib/merkle_tree/src/storage/serialization.rs +++ b/core/lib/merkle_tree/src/storage/serialization.rs @@ -26,7 +26,11 @@ impl LeafNode { let leaf_index = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafIndex) })?; - Ok(Self::new(full_key, value_hash, leaf_index)) + Ok(Self { + full_key, + value_hash, + leaf_index, + }) } pub(super) fn serialize(&self, buffer: &mut Vec) { @@ -297,6 +301,7 @@ impl Manifest { #[cfg(test)] mod tests { use super::*; + use crate::types::TreeEntry; use zksync_types::H256; #[test] @@ -369,7 +374,7 @@ mod tests { #[test] fn serializing_leaf_node() { - let leaf = LeafNode::new(513.into(), H256([4; 32]), 42); + let leaf = LeafNode::new(TreeEntry::new(513.into(), 42, H256([4; 32]))); let mut buffer = vec![]; leaf.serialize(&mut buffer); assert_eq!(buffer[..30], [0; 30]); // padding for the key @@ -426,7 +431,7 @@ mod tests { #[test] fn serializing_root_with_leaf() { - let leaf = LeafNode::new(513.into(), H256([4; 32]), 42); + let leaf = LeafNode::new(TreeEntry::new(513.into(), 42, H256([4; 32]))); let root = Root::new(1, leaf.into()); let mut buffer = vec![]; root.serialize(&mut buffer); diff --git a/core/lib/merkle_tree/src/storage/tests.rs b/core/lib/merkle_tree/src/storage/tests.rs index 958c906289e..e70cb057280 100644 --- a/core/lib/merkle_tree/src/storage/tests.rs +++ b/core/lib/merkle_tree/src/storage/tests.rs @@ -25,7 +25,7 @@ pub(super) fn generate_nodes(version: u64, nibble_counts: &[usize]) -> HashMap) -> V fn reading_keys_does_not_change_child_version() { let mut db = PatchSet::default(); let storage = Storage::new(&db, &(), 0, true); - let kvs = vec![(FIRST_KEY, H256([0; 32])), (SECOND_KEY, H256([1; 32]))]; + let kvs = vec![ + TreeEntry::new(FIRST_KEY, 1, H256([0; 32])), + TreeEntry::new(SECOND_KEY, 2, H256([1; 32])), + ]; let (_, patch) = storage.extend(kvs); db.apply_patch(patch); let storage = Storage::new(&db, &(), 1, true); let instructions = vec![ - (FIRST_KEY, TreeInstruction::Read), - (E_KEY, TreeInstruction::Write(H256([2; 32]))), + TreeInstruction::Read(FIRST_KEY), + TreeInstruction::Write(TreeEntry::new(E_KEY, 3, H256([2; 32]))), ]; let (_, patch) = storage.extend_with_proofs(instructions); @@ -327,12 +339,15 @@ fn reading_keys_does_not_change_child_version() { fn read_ops_are_not_reflected_in_patch() { let mut db = PatchSet::default(); let storage = Storage::new(&db, &(), 0, true); - let kvs = vec![(FIRST_KEY, H256([0; 32])), (SECOND_KEY, H256([1; 32]))]; + let kvs = vec![ + TreeEntry::new(FIRST_KEY, 1, H256([0; 32])), + TreeEntry::new(SECOND_KEY, 2, H256([1; 32])), + ]; let (_, patch) = storage.extend(kvs); db.apply_patch(patch); let storage = Storage::new(&db, &(), 1, true); - let instructions = vec![(FIRST_KEY, TreeInstruction::Read)]; + let instructions = vec![TreeInstruction::Read(FIRST_KEY)]; let (_, patch) = storage.extend_with_proofs(instructions); assert!(patch.patches_by_version[&1].nodes.is_empty()); } @@ -351,7 +366,7 @@ fn read_instructions_do_not_lead_to_copied_nodes(writes_per_block: u64) { let mut database = PatchSet::default(); let storage = Storage::new(&database, &(), 0, true); let kvs = (0..key_count) - .map(|i| (big_endian_key(i), H256::zero())) + .map(|i| TreeEntry::new(big_endian_key(i), i + 1, H256::zero())) .collect(); let (_, patch) = storage.extend(kvs); database.apply_patch(patch); @@ -361,10 +376,11 @@ fn read_instructions_do_not_lead_to_copied_nodes(writes_per_block: u64) { // Select some existing keys to read. Keys may be repeated, this is fine for our purpose. let reads = (0..writes_per_block).map(|_| { let key = big_endian_key(rng.gen_range(0..key_count)); - (key, TreeInstruction::Read) + TreeInstruction::Read(key) + }); + let writes = (key_count..key_count + writes_per_block).map(|i| { + TreeInstruction::Write(TreeEntry::new(big_endian_key(i), i + 1, H256::zero())) }); - let writes = (key_count..key_count + writes_per_block) - .map(|i| (big_endian_key(i), TreeInstruction::Write(H256::zero()))); let mut instructions: Vec<_> = reads.chain(writes).collect(); instructions.shuffle(&mut rng); @@ -400,7 +416,7 @@ fn replaced_keys_are_correctly_tracked(writes_per_block: usize, with_proofs: boo let mut database = PatchSet::default(); let storage = Storage::new(&database, &(), 0, true); let kvs = (0..100) - .map(|i| (big_endian_key(i), H256::zero())) + .map(|i| TreeEntry::new(big_endian_key(i), i + 1, H256::zero())) .collect(); let (_, patch) = storage.extend(kvs); @@ -412,11 +428,11 @@ fn replaced_keys_are_correctly_tracked(writes_per_block: usize, with_proofs: boo let updates = (0..100) .choose_multiple(&mut rng, writes_per_block) .into_iter() - .map(|i| (big_endian_key(i), H256::zero())); + .map(|i| TreeEntry::new(big_endian_key(i), i + 1, H256::zero())); let storage = Storage::new(&database, &(), new_version, true); let patch = if with_proofs { - let instructions = updates.map(|(key, value)| (key, TreeInstruction::Write(value))); + let instructions = updates.map(TreeInstruction::Write); storage.extend_with_proofs(instructions.collect()).1 } else { storage.extend(updates.collect()).1 @@ -454,14 +470,18 @@ fn assert_replaced_keys(db: &PatchSet, patch: &PatchSet) { #[test] fn tree_handles_keys_at_terminal_level() { let mut db = PatchSet::default(); - let kvs = (0_u32..100) - .map(|i| (Key::from(i), ValueHash::zero())) + let kvs = (0_u64..100) + .map(|i| TreeEntry::new(Key::from(i), i + 1, ValueHash::zero())) .collect(); let (_, patch) = Storage::new(&db, &(), 0, true).extend(kvs); db.apply_patch(patch); // Overwrite a key and check that we don't panic. - let new_kvs = vec![(Key::from(0), ValueHash::from_low_u64_be(1))]; + let new_kvs = vec![TreeEntry::new( + Key::from(0), + 1, + ValueHash::from_low_u64_be(1), + )]; let (_, patch) = Storage::new(&db, &(), 1, true).extend(new_kvs); assert_eq!( @@ -483,7 +503,7 @@ fn tree_handles_keys_at_terminal_level() { #[test] fn recovery_flattens_node_versions() { let recovery_version = 100; - let recovery_entries = (0_u64..10).map(|i| RecoveryEntry { + let recovery_entries = (0_u64..10).map(|i| TreeEntry { key: Key::from(i) << 252, // the first key nibbles are distinct value: ValueHash::zero(), leaf_index: i + 1, @@ -516,7 +536,7 @@ fn recovery_flattens_node_versions() { #[test_casing(7, [256, 4, 5, 20, 69, 127, 128])] fn recovery_with_node_hierarchy(chunk_size: usize) { let recovery_version = 100; - let recovery_entries = (0_u64..256).map(|i| RecoveryEntry { + let recovery_entries = (0_u64..256).map(|i| TreeEntry { key: Key::from(i) << 248, // the first two key nibbles are distinct value: ValueHash::zero(), leaf_index: i + 1, @@ -567,7 +587,7 @@ fn recovery_with_node_hierarchy(chunk_size: usize) { #[test_casing(7, [256, 5, 7, 20, 59, 127, 128])] fn recovery_with_deep_node_hierarchy(chunk_size: usize) { let recovery_version = 1_000; - let recovery_entries = (0_u64..256).map(|i| RecoveryEntry { + let recovery_entries = (0_u64..256).map(|i| TreeEntry { key: Key::from(i), // the last two key nibbles are distinct value: ValueHash::zero(), leaf_index: i + 1, @@ -630,7 +650,7 @@ fn recovery_with_deep_node_hierarchy(chunk_size: usize) { fn recovery_workflow_with_multiple_stages() { let mut db = PatchSet::default(); let recovery_version = 100; - let recovery_entries = (0_u64..100).map(|i| RecoveryEntry { + let recovery_entries = (0_u64..100).map(|i| TreeEntry { key: Key::from(i), value: ValueHash::zero(), leaf_index: i, @@ -640,7 +660,7 @@ fn recovery_workflow_with_multiple_stages() { assert_eq!(patch.root(recovery_version).unwrap().leaf_count(), 100); db.apply_patch(patch); - let more_recovery_entries = (100_u64..200).map(|i| RecoveryEntry { + let more_recovery_entries = (100_u64..200).map(|i| TreeEntry { key: Key::from(i), value: ValueHash::zero(), leaf_index: i, @@ -653,7 +673,7 @@ fn recovery_workflow_with_multiple_stages() { // Check that all entries can be accessed let storage = Storage::new(&db, &(), recovery_version + 1, true); - let instructions = (0_u32..200).map(|i| (Key::from(i), TreeInstruction::Read)); + let instructions = (0_u32..200).map(|i| TreeInstruction::Read(Key::from(i))); let (output, _) = storage.extend_with_proofs(instructions.collect()); assert_eq!(output.leaf_count, 200); assert_eq!(output.logs.len(), 200); @@ -687,17 +707,15 @@ fn test_recovery_pruning_equivalence( ); let mut rng = StdRng::seed_from_u64(RNG_SEED); - let kvs = (0..100).map(|i| { - ( - U256([rng.gen(), rng.gen(), rng.gen(), rng.gen()]), - ValueHash::repeat_byte(i), - ) + let entries = (0..100).map(|i| { + let key = U256([rng.gen(), rng.gen(), rng.gen(), rng.gen()]); + TreeEntry::new(key, u64::from(i) + 1, ValueHash::repeat_byte(i)) }); - let kvs: Vec<_> = kvs.collect(); + let entries: Vec<_> = entries.collect(); // Add `kvs` into the tree in several commits. let mut db = PatchSet::default(); - for (version, chunk) in kvs.chunks(chunk_size).enumerate() { + for (version, chunk) in entries.chunks(chunk_size).enumerate() { let (_, patch) = Storage::new(&db, hasher, version as u64, true).extend(chunk.to_vec()); db.apply_patch(patch); } @@ -716,11 +734,7 @@ fn test_recovery_pruning_equivalence( // Generate recovery entries. let recovery_entries = all_nodes.values().filter_map(|node| { if let Node::Leaf(leaf) = node { - return Some(RecoveryEntry { - key: leaf.full_key, - value: leaf.value_hash, - leaf_index: leaf.leaf_index, - }); + return Some(TreeEntry::from(*leaf)); } None }); diff --git a/core/lib/merkle_tree/src/types/internal.rs b/core/lib/merkle_tree/src/types/internal.rs index 5e875f6e28a..cb35b0281c2 100644 --- a/core/lib/merkle_tree/src/types/internal.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -4,10 +4,9 @@ use std::{fmt, num::NonZeroU64}; -use zksync_types::{H256, U256}; - use crate::{ hasher::{HashTree, InternalNodeCache}, + types::{Key, TreeEntry, ValueHash}, utils::SmallMap, }; @@ -323,11 +322,6 @@ impl fmt::Display for NodeKey { } } -/// Key stored in the tree. -pub type Key = U256; -/// Hashed value stored in the tree. -pub type ValueHash = H256; - /// Leaf node of the tree. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] @@ -338,13 +332,18 @@ pub struct LeafNode { } impl LeafNode { - pub(crate) fn new(full_key: Key, value_hash: ValueHash, leaf_index: u64) -> Self { + pub(crate) fn new(entry: TreeEntry) -> Self { Self { - full_key, - value_hash, - leaf_index, + full_key: entry.key, + value_hash: entry.value, + leaf_index: entry.leaf_index, } } + + pub(crate) fn update_from(&mut self, entry: TreeEntry) { + self.value_hash = entry.value; + self.leaf_index = entry.leaf_index; + } } /// Reference to a child in an [`InternalNode`]. @@ -556,6 +555,7 @@ impl StaleNodeKey { #[cfg(test)] mod tests { use super::*; + use zksync_types::U256; // `U256` uses little-endian `u64` ordering; i.e., this is // 0x_dead_beef_0000_0000_.._0000. diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index de35d9024b7..15ab72b6911 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -5,22 +5,53 @@ mod internal; pub(crate) use self::internal::{ ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, }; -pub use self::internal::{InternalNode, Key, LeafNode, Manifest, Node, NodeKey, Root, ValueHash}; +pub use self::internal::{InternalNode, LeafNode, Manifest, Node, NodeKey, Root}; + +use zksync_types::{H256, U256}; + +/// Key stored in the tree. +pub type Key = U256; +/// Hash type of values and intermediate nodes in the tree. +pub type ValueHash = H256; /// Instruction to read or write a tree value at a certain key. #[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum TreeInstruction { - /// Read the current tree value. - Read, - /// Write the specified value. - Write(ValueHash), +pub enum TreeInstruction { + /// Read the current tree value at the specified key. + Read(K), + /// Write the specified entry. + Write(TreeEntry), +} + +impl TreeInstruction { + /// Creates a write instruction. + pub fn write(key: K, leaf_index: u64, value: ValueHash) -> Self { + Self::Write(TreeEntry::new(key, leaf_index, value)) + } + + /// Returns the tree key this instruction is related to. + pub fn key(&self) -> K { + match self { + Self::Read(key) => *key, + Self::Write(entry) => entry.key, + } + } + + pub(crate) fn map_key(&self, map_fn: impl FnOnce(&K) -> U) -> TreeInstruction { + match self { + Self::Read(key) => TreeInstruction::Read(map_fn(key)), + Self::Write(entry) => TreeInstruction::Write(entry.map_key(map_fn)), + } + } } /// Entry in a Merkle tree associated with a key. -#[derive(Debug, Clone, Copy)] -pub struct TreeEntry { +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct TreeEntry { + /// Tree key. + pub key: K, /// Value associated with the key. - pub value_hash: ValueHash, + pub value: ValueHash, /// Enumeration index of the key. pub leaf_index: u64, } @@ -28,23 +59,40 @@ pub struct TreeEntry { impl From for TreeEntry { fn from(leaf: LeafNode) -> Self { Self { - value_hash: leaf.value_hash, + key: leaf.full_key, + value: leaf.value_hash, leaf_index: leaf.leaf_index, } } } +impl TreeEntry { + /// Creates a new entry with the specified fields. + pub fn new(key: K, leaf_index: u64, value: ValueHash) -> Self { + Self { + key, + value, + leaf_index, + } + } + + pub(crate) fn map_key(&self, map_fn: impl FnOnce(&K) -> U) -> TreeEntry { + TreeEntry::new(map_fn(&self.key), self.leaf_index, self.value) + } +} + impl TreeEntry { - pub(crate) fn empty() -> Self { + pub(crate) fn empty(key: Key) -> Self { Self { - value_hash: ValueHash::zero(), + key, + value: ValueHash::zero(), leaf_index: 0, } } /// Returns `true` if and only if this entry encodes lack of a value. pub fn is_empty(&self) -> bool { - self.leaf_index == 0 && self.value_hash.is_zero() + self.leaf_index == 0 && self.value.is_zero() } pub(crate) fn with_merkle_path(self, merkle_path: Vec) -> TreeEntryWithProof { @@ -53,6 +101,12 @@ impl TreeEntry { merkle_path, } } + + /// Replaces the value in this entry and returns the modified entry. + #[must_use] + pub fn with_value(self, value: H256) -> Self { + Self { value, ..self } + } } /// Entry in a Merkle tree together with a proof of authenticity. @@ -86,10 +140,7 @@ pub struct BlockOutput { #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum TreeLogEntry { /// A node was inserted into the tree. - Inserted { - /// Index of the inserted node. - leaf_index: u64, - }, + Inserted, /// A node with the specified index was updated. Updated { /// Index of the updated node. @@ -109,18 +160,14 @@ pub enum TreeLogEntry { } impl TreeLogEntry { - pub(crate) fn insert(leaf_index: u64) -> Self { - Self::Inserted { leaf_index } - } - - pub(crate) fn update(previous_value: ValueHash, leaf_index: u64) -> Self { + pub(crate) fn update(leaf_index: u64, previous_value: ValueHash) -> Self { Self::Updated { leaf_index, previous_value, } } - pub(crate) fn read(value: ValueHash, leaf_index: u64) -> Self { + pub(crate) fn read(leaf_index: u64, value: ValueHash) -> Self { Self::Read { leaf_index, value } } diff --git a/core/lib/merkle_tree/src/utils.rs b/core/lib/merkle_tree/src/utils.rs index 9542b24bbd3..4771a940f2c 100644 --- a/core/lib/merkle_tree/src/utils.rs +++ b/core/lib/merkle_tree/src/utils.rs @@ -114,11 +114,6 @@ impl SmallMap { } } -pub(crate) fn increment_counter(counter: &mut u64) -> u64 { - *counter += 1; - *counter -} - pub(crate) fn find_diverging_bit(lhs: Key, rhs: Key) -> usize { let diff = lhs ^ rhs; diff.leading_zeros() as usize diff --git a/core/lib/merkle_tree/tests/integration/common.rs b/core/lib/merkle_tree/tests/integration/common.rs index fd9e00855c2..096a54ce711 100644 --- a/core/lib/merkle_tree/tests/integration/common.rs +++ b/core/lib/merkle_tree/tests/integration/common.rs @@ -5,23 +5,22 @@ use once_cell::sync::Lazy; use std::collections::HashMap; use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; -use zksync_merkle_tree::{HashTree, TreeInstruction}; +use zksync_merkle_tree::{HashTree, TreeEntry, TreeInstruction}; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -pub fn generate_key_value_pairs(indexes: impl Iterator) -> Vec<(U256, H256)> { +pub fn generate_key_value_pairs(indexes: impl Iterator) -> Vec { let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); let kvs = indexes.map(|idx| { let key = H256::from_low_u64_be(idx); let key = StorageKey::new(AccountTreeId::new(address), key); - (key.hashed_key_u256(), H256::from_low_u64_be(idx + 1)) + let value = H256::from_low_u64_be(idx + 1); + TreeEntry::new(key.hashed_key_u256(), idx + 1, value) }); kvs.collect() } -pub fn compute_tree_hash(kvs: impl Iterator) -> H256 { - let kvs_with_indices = kvs - .enumerate() - .map(|(i, (key, value))| (key, value, i as u64 + 1)); +pub fn compute_tree_hash(kvs: impl Iterator) -> H256 { + let kvs_with_indices = kvs.map(|entry| (entry.key, entry.value, entry.leaf_index)); compute_tree_hash_with_indices(kvs_with_indices) } @@ -70,17 +69,18 @@ fn compute_tree_hash_with_indices(kvs: impl Iterator) } // Computing the expected hash takes some time in the debug mode, so we memoize it. -pub static KVS_AND_HASH: Lazy<(Vec<(U256, H256)>, H256)> = Lazy::new(|| { - let kvs = generate_key_value_pairs(0..100); - let expected_hash = compute_tree_hash(kvs.iter().copied()); - (kvs, expected_hash) +pub static ENTRIES_AND_HASH: Lazy<(Vec, H256)> = Lazy::new(|| { + let entries = generate_key_value_pairs(0..100); + let expected_hash = compute_tree_hash(entries.iter().copied()); + (entries, expected_hash) }); -pub fn convert_to_writes(kvs: &[(U256, H256)]) -> Vec<(U256, TreeInstruction)> { - let kvs = kvs +pub fn convert_to_writes(entries: &[TreeEntry]) -> Vec { + entries .iter() - .map(|&(key, hash)| (key, TreeInstruction::Write(hash))); - kvs.collect() + .copied() + .map(TreeInstruction::Write) + .collect() } /// Emulates leaf index assignment in a real Merkle tree. @@ -88,22 +88,22 @@ pub fn convert_to_writes(kvs: &[(U256, H256)]) -> Vec<(U256, TreeInstruction)> { pub struct TreeMap(HashMap); impl TreeMap { - pub fn new(initial_entries: &[(U256, H256)]) -> Self { + pub fn new(initial_entries: &[TreeEntry]) -> Self { let map = initial_entries .iter() - .enumerate() - .map(|(i, (key, value))| (*key, (*value, i as u64 + 1))) + .map(|entry| (entry.key, (entry.value, entry.leaf_index))) .collect(); Self(map) } - pub fn extend(&mut self, kvs: &[(U256, H256)]) { - for &(key, new_value) in kvs { - if let Some((value, _)) = self.0.get_mut(&key) { - *value = new_value; + pub fn extend(&mut self, kvs: &[TreeEntry]) { + for &new_entry in kvs { + if let Some((value, leaf_index)) = self.0.get_mut(&new_entry.key) { + assert_eq!(*leaf_index, new_entry.leaf_index); // sanity check + *value = new_entry.value; } else { - let leaf_index = self.0.len() as u64 + 1; - self.0.insert(key, (new_value, leaf_index)); + self.0 + .insert(new_entry.key, (new_entry.value, new_entry.leaf_index)); } } } @@ -112,7 +112,7 @@ impl TreeMap { let entries = self .0 .iter() - .map(|(key, (value, idx))| (*key, *value, *idx)); + .map(|(key, (value, leaf_index))| (*key, *value, *leaf_index)); compute_tree_hash_with_indices(entries) } } diff --git a/core/lib/merkle_tree/tests/integration/consistency.rs b/core/lib/merkle_tree/tests/integration/consistency.rs index 7c1d69657bf..da3312d2002 100644 --- a/core/lib/merkle_tree/tests/integration/consistency.rs +++ b/core/lib/merkle_tree/tests/integration/consistency.rs @@ -26,7 +26,7 @@ fn five_thousand_angry_monkeys_vs_merkle_tree() { let kvs = generate_key_value_pairs(0..100); tree.extend(kvs); - tree.verify_consistency(0).unwrap(); + tree.verify_consistency(0, true).unwrap(); let mut raw_db = db.into_inner(); let cf = MerkleTreeColumnFamily::Tree; @@ -53,7 +53,9 @@ fn five_thousand_angry_monkeys_vs_merkle_tree() { raw_db.write(batch).unwrap(); let mut db = RocksDBWrapper::from(raw_db); - let err = MerkleTree::new(&mut db).verify_consistency(0).unwrap_err(); + let err = MerkleTree::new(&mut db) + .verify_consistency(0, true) + .unwrap_err(); println!("{err}"); // Restore the value back so that it doesn't influence the following cases. diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index d3b666c8849..f3febda5f06 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -7,14 +7,14 @@ use tempfile::TempDir; use std::slice; use zksync_crypto::hasher::blake2::Blake2Hasher; -use zksync_merkle_tree::{domain::ZkSyncTree, HashTree}; +use zksync_merkle_tree::{domain::ZkSyncTree, HashTree, TreeEntry, TreeInstruction}; use zksync_storage::RocksDB; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_types::{ - proofs::StorageLogMetadata, AccountTreeId, Address, L1BatchNumber, StorageKey, StorageLog, H256, + proofs::StorageLogMetadata, AccountTreeId, Address, L1BatchNumber, StorageKey, H256, }; -fn gen_storage_logs() -> Vec { +fn gen_storage_logs() -> Vec> { let addrs = vec![ "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2", "ef4bb7b21c5fe7432a7d63876cc59ecc23b46636", @@ -32,7 +32,11 @@ fn gen_storage_logs() -> Vec { proof_keys .zip(proof_values) - .map(|(proof_key, proof_value)| StorageLog::new_write_log(proof_key, proof_value)) + .enumerate() + .map(|(i, (proof_key, proof_value))| { + let entry = TreeEntry::new(proof_key, i as u64 + 1, proof_value); + TreeInstruction::Write(entry) + }) .collect() } @@ -54,7 +58,11 @@ fn basic_workflow() { assert_eq!(metadata.rollup_last_leaf_index, 101); assert_eq!(metadata.initial_writes.len(), logs.len()); for (write, log) in metadata.initial_writes.iter().zip(&logs) { - assert_eq!(write.value, log.value); + let expected_value = match log { + TreeInstruction::Write(entry) => entry.value, + TreeInstruction::Read(_) => unreachable!(), + }; + assert_eq!(write.value, expected_value); } assert!(metadata.repeated_writes.is_empty()); @@ -124,7 +132,10 @@ fn filtering_out_no_op_writes() { // Add some actual repeated writes. let mut expected_writes_count = 0; for log in logs.iter_mut().step_by(3) { - log.value = H256::repeat_byte(0xff); + let TreeInstruction::Write(entry) = log else { + unreachable!("Unexpected instruction: {log:?}"); + }; + entry.value = H256::repeat_byte(0xff); expected_writes_count += 1; } let new_metadata = tree.process_l1_batch(&logs); @@ -155,14 +166,16 @@ fn revert_blocks() { // Add couple of blocks of distinct keys/values let mut logs: Vec<_> = proof_keys .zip(proof_values) - .map(|(proof_key, proof_value)| StorageLog::new_write_log(proof_key, proof_value)) + .map(|(proof_key, proof_value)| { + let entry = TreeEntry::new(proof_key, proof_value.to_low_u64_be() + 1, proof_value); + TreeInstruction::Write(entry) + }) .collect(); // Add a block with repeated keys let extra_logs = (0..block_size).map(move |i| { - StorageLog::new_write_log( - StorageKey::new(AccountTreeId::new(address), H256::from_low_u64_be(i as u64)), - H256::from_low_u64_be((i + 1) as u64), - ) + let key = StorageKey::new(AccountTreeId::new(address), H256::from_low_u64_be(i as u64)); + let entry = TreeEntry::new(key, i as u64 + 1, H256::from_low_u64_be(i as u64 + 1)); + TreeInstruction::Write(entry) }); logs.extend(extra_logs); @@ -277,7 +290,7 @@ fn read_logs() { let mut tree = ZkSyncTree::new_lightweight(db); let read_logs: Vec<_> = logs .into_iter() - .map(|log| StorageLog::new_read_log(log.key, log.value)) + .map(|instr| TreeInstruction::Read(instr.key())) .collect(); let read_metadata = tree.process_l1_batch(&read_logs); @@ -285,14 +298,13 @@ fn read_logs() { } fn create_write_log( + leaf_index: u64, address: Address, address_storage_key: [u8; 32], value: [u8; 32], -) -> StorageLog { - StorageLog::new_write_log( - StorageKey::new(AccountTreeId::new(address), H256(address_storage_key)), - H256(value), - ) +) -> TreeInstruction { + let key = StorageKey::new(AccountTreeId::new(address), H256(address_storage_key)); + TreeInstruction::Write(TreeEntry::new(key, leaf_index, H256(value))) } fn subtract_from_max_value(diff: u8) -> [u8; 32] { @@ -315,28 +327,33 @@ fn root_hash_compatibility() { ); let storage_logs = vec![ - create_write_log(ACCOUNT_CODE_STORAGE_ADDRESS, [0; 32], [1; 32]), + create_write_log(1, ACCOUNT_CODE_STORAGE_ADDRESS, [0; 32], [1; 32]), create_write_log( + 2, Address::from_low_u64_be(9223372036854775808), [254; 32], subtract_from_max_value(1), ), create_write_log( + 3, Address::from_low_u64_be(9223372036854775809), [253; 32], subtract_from_max_value(2), ), create_write_log( + 4, Address::from_low_u64_be(9223372036854775810), [252; 32], subtract_from_max_value(3), ), create_write_log( + 5, Address::from_low_u64_be(9223372036854775811), [251; 32], subtract_from_max_value(4), ), create_write_log( + 6, Address::from_low_u64_be(9223372036854775812), [250; 32], subtract_from_max_value(5), diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index 9f3eb970cd3..e4f052bb03c 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -7,12 +7,14 @@ use std::{cmp, mem}; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ - Database, HashTree, MerkleTree, PatchSet, Patched, TreeInstruction, TreeLogEntry, + Database, HashTree, MerkleTree, PatchSet, Patched, TreeEntry, TreeInstruction, TreeLogEntry, TreeRangeDigest, }; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use crate::common::{compute_tree_hash, convert_to_writes, generate_key_value_pairs, KVS_AND_HASH}; +use crate::common::{ + compute_tree_hash, convert_to_writes, generate_key_value_pairs, ENTRIES_AND_HASH, +}; #[test] fn compute_tree_hash_works_correctly() { @@ -25,7 +27,7 @@ fn compute_tree_hash_works_correctly() { let address: Address = "4b3af74f66ab1f0da3f2e4ec7a3cb99baf1af7b2".parse().unwrap(); let key = StorageKey::new(AccountTreeId::new(address), H256::zero()); let key = key.hashed_key_u256(); - let hash = compute_tree_hash([(key, H256([1; 32]))].into_iter()); + let hash = compute_tree_hash([TreeEntry::new(key, 1, H256([1; 32]))].into_iter()); assert_eq!(hash, EXPECTED_HASH); } @@ -59,7 +61,7 @@ fn output_proofs_are_computed_correctly_on_empty_tree(kv_count: u64) { let reads = instructions .iter() - .map(|(key, _)| (*key, TreeInstruction::Read)); + .map(|instr| TreeInstruction::Read(instr.key())); let mut reads: Vec<_> = reads.collect(); reads.shuffle(&mut rng); let output = tree.extend_with_proofs(reads.clone()); @@ -77,25 +79,26 @@ fn entry_proofs_are_computed_correctly_on_empty_tree(kv_count: u64) { let expected_hash = compute_tree_hash(kvs.iter().copied()); tree.extend(kvs.clone()); - let existing_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let existing_keys: Vec<_> = kvs.iter().map(|entry| entry.key).collect(); let entries = tree.entries_with_proofs(0, &existing_keys).unwrap(); assert_eq!(entries.len(), existing_keys.len()); - for ((key, value), entry) in kvs.iter().zip(entries) { - entry.verify(&Blake2Hasher, *key, expected_hash); - assert_eq!(entry.base.value_hash, *value); + for (input_entry, entry) in kvs.iter().zip(entries) { + entry.verify(&Blake2Hasher, expected_hash); + assert_eq!(entry.base, *input_entry); } // Test some keys adjacent to existing ones. - let adjacent_keys = kvs.iter().flat_map(|(key, _)| { + let adjacent_keys = kvs.iter().flat_map(|entry| { + let key = entry.key; [ - *key ^ (U256::one() << rng.gen_range(0..256)), - *key ^ (U256::one() << rng.gen_range(0..256)), - *key ^ (U256::one() << rng.gen_range(0..256)), + key ^ (U256::one() << rng.gen_range(0..256)), + key ^ (U256::one() << rng.gen_range(0..256)), + key ^ (U256::one() << rng.gen_range(0..256)), ] }); let random_keys = generate_key_value_pairs(kv_count..(kv_count * 2)) .into_iter() - .map(|(key, _)| key); + .map(|entry| entry.key); let mut missing_keys: Vec<_> = adjacent_keys.chain(random_keys).collect(); missing_keys.shuffle(&mut rng); @@ -103,7 +106,8 @@ fn entry_proofs_are_computed_correctly_on_empty_tree(kv_count: u64) { assert_eq!(entries.len(), missing_keys.len()); for (key, entry) in missing_keys.iter().zip(entries) { assert!(entry.base.is_empty()); - entry.verify(&Blake2Hasher, *key, expected_hash); + assert_eq!(entry.base.key, *key); + entry.verify(&Blake2Hasher, expected_hash); } } @@ -117,10 +121,13 @@ fn proofs_are_computed_correctly_for_mixed_instructions() { let output = tree.extend(kvs.clone()); let old_root_hash = output.root_hash; - let reads = kvs.iter().map(|(key, _)| (*key, TreeInstruction::Read)); + let reads = kvs.iter().map(|entry| TreeInstruction::Read(entry.key)); let mut instructions: Vec<_> = reads.collect(); // Overwrite all keys in the tree. - let writes: Vec<_> = kvs.iter().map(|(key, _)| (*key, H256::zero())).collect(); + let writes: Vec<_> = kvs + .iter() + .map(|entry| entry.with_value(H256::zero())) + .collect(); let expected_hash = compute_tree_hash(writes.iter().copied()); instructions.extend(convert_to_writes(&writes)); instructions.shuffle(&mut rng); @@ -145,7 +152,7 @@ fn proofs_are_computed_correctly_for_missing_keys() { let mut instructions = convert_to_writes(&kvs); let missing_reads = generate_key_value_pairs(20..50) .into_iter() - .map(|(key, _)| (key, TreeInstruction::Read)); + .map(|entry| TreeInstruction::Read(entry.key)); instructions.extend(missing_reads); instructions.shuffle(&mut rng); @@ -161,7 +168,7 @@ fn proofs_are_computed_correctly_for_missing_keys() { } fn test_intermediate_commits(db: &mut impl Database, chunk_size: usize) { - let (kvs, expected_hash) = &*KVS_AND_HASH; + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; let mut final_hash = H256::zero(); let mut tree = MerkleTree::new(db); for chunk in kvs.chunks(chunk_size) { @@ -172,7 +179,7 @@ fn test_intermediate_commits(db: &mut impl Database, chunk_size: usize) { let latest_version = tree.latest_version().unwrap(); for version in 0..=latest_version { - tree.verify_consistency(version).unwrap(); + tree.verify_consistency(version, true).unwrap(); } } @@ -183,7 +190,7 @@ fn root_hash_is_computed_correctly_with_intermediate_commits(chunk_size: usize) #[test_casing(6, [3, 5, 10, 17, 28, 42])] fn output_proofs_are_computed_correctly_with_intermediate_commits(chunk_size: usize) { - let (kvs, expected_hash) = &*KVS_AND_HASH; + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; let mut tree = MerkleTree::new(PatchSet::default()); let mut root_hash = Blake2Hasher.empty_subtree_hash(256); @@ -198,8 +205,8 @@ fn output_proofs_are_computed_correctly_with_intermediate_commits(chunk_size: us #[test_casing(4, [10, 17, 28, 42])] fn entry_proofs_are_computed_correctly_with_intermediate_commits(chunk_size: usize) { - let (kvs, _) = &*KVS_AND_HASH; - let all_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let (kvs, _) = &*ENTRIES_AND_HASH; + let all_keys: Vec<_> = kvs.iter().map(|entry| entry.key).collect(); let mut tree = MerkleTree::new(PatchSet::default()); let mut root_hashes = vec![]; for chunk in kvs.chunks(chunk_size) { @@ -210,8 +217,9 @@ fn entry_proofs_are_computed_correctly_with_intermediate_commits(chunk_size: usi let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); assert_eq!(entries.len(), all_keys.len()); for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { + assert_eq!(entry.base.key, *key); assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); - entry.verify(&Blake2Hasher, *key, output.root_hash); + entry.verify(&Blake2Hasher, output.root_hash); } } @@ -220,14 +228,15 @@ fn entry_proofs_are_computed_correctly_with_intermediate_commits(chunk_size: usi let entries = tree.entries_with_proofs(version as u64, &all_keys).unwrap(); assert_eq!(entries.len(), all_keys.len()); for (i, (key, entry)) in all_keys.iter().zip(entries).enumerate() { + assert_eq!(entry.base.key, *key); assert_eq!(entry.base.is_empty(), i >= (version + 1) * chunk_size); - entry.verify(&Blake2Hasher, *key, root_hash); + entry.verify(&Blake2Hasher, root_hash); } } } fn test_accumulated_commits(db: DB, chunk_size: usize) -> DB { - let (kvs, expected_hash) = &*KVS_AND_HASH; + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; let mut db = Patched::new(db); let mut final_hash = H256::zero(); for chunk in kvs.chunks(chunk_size) { @@ -242,7 +251,7 @@ fn test_accumulated_commits(db: DB, chunk_size: usize) -> DB { let tree = MerkleTree::new(&mut db); let latest_version = tree.latest_version().unwrap(); for version in 0..=latest_version { - tree.verify_consistency(version).unwrap(); + tree.verify_consistency(version, true).unwrap(); } db } @@ -253,9 +262,12 @@ fn accumulating_commits(chunk_size: usize) { } fn test_root_hash_computing_with_reverts(db: &mut impl Database) { - let (kvs, expected_hash) = &*KVS_AND_HASH; + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; let (initial_update, final_update) = kvs.split_at(75); - let key_updates: Vec<_> = kvs.iter().map(|(key, _)| (*key, H256([255; 32]))).collect(); + let key_updates: Vec<_> = kvs + .iter() + .map(|entry| entry.with_value(H256([255; 32]))) + .collect(); let key_inserts = generate_key_value_pairs(100..200); let mut tree = MerkleTree::new(db); @@ -300,7 +312,7 @@ fn test_root_hash_computing_with_key_updates(db: impl Database) { // Overwrite some `kvs` entries and add some new ones. let changed_kvs = kvs.iter_mut().enumerate().filter_map(|(i, kv)| { if i % 3 == 1 { - kv.1 = H256::from_low_u64_be((i + 100) as u64); + *kv = kv.with_value(H256::from_low_u64_be((i + 100) as u64)); return Some(*kv); } None @@ -361,12 +373,12 @@ fn root_hash_is_computed_correctly_with_key_updates() { fn proofs_are_computed_correctly_with_key_updates(updated_keys: usize) { const RNG_SEED: u64 = 1_234; - let (kvs, expected_hash) = &*KVS_AND_HASH; + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; let mut rng = StdRng::seed_from_u64(RNG_SEED); let old_instructions: Vec<_> = kvs[..updated_keys] .iter() - .map(|(key, _)| (*key, TreeInstruction::Write(H256([255; 32])))) + .map(|entry| TreeInstruction::Write(entry.with_value(H256([255; 32])))) .collect(); // Move the updated keys to the random places in the `kvs` vector. let mut writes = convert_to_writes(kvs); @@ -386,11 +398,11 @@ fn proofs_are_computed_correctly_with_key_updates(updated_keys: usize) { assert_eq!(output.root_hash(), Some(*expected_hash)); output.verify_proofs(&Blake2Hasher, root_hash, &instructions); - let keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let keys: Vec<_> = kvs.iter().map(|entry| entry.key).collect(); let proofs = tree.entries_with_proofs(1, &keys).unwrap(); - for ((key, value), proof) in kvs.iter().zip(proofs) { - assert_eq!(proof.base.value_hash, *value); - proof.verify(&Blake2Hasher, *key, *expected_hash); + for (entry, proof) in kvs.iter().zip(proofs) { + assert_eq!(proof.base, *entry); + proof.verify(&Blake2Hasher, *expected_hash); } } @@ -417,7 +429,11 @@ fn test_root_hash_equals_to_previous_implementation(db: &mut impl Database) { }) }); let values = (0..100).map(H256::from_low_u64_be); - let kvs: Vec<_> = keys.zip(values).collect(); + let kvs: Vec<_> = keys + .zip(values) + .enumerate() + .map(|(idx, (key, value))| TreeEntry::new(key, idx as u64 + 1, value)) + .collect(); let expected_hash = compute_tree_hash(kvs.iter().copied()); assert_eq!(expected_hash, PREV_IMPL_HASH); @@ -437,13 +453,13 @@ fn root_hash_equals_to_previous_implementation() { #[test_casing(7, [2, 3, 5, 10, 17, 28, 42])] fn range_proofs_with_multiple_existing_items(range_size: usize) { - let (kvs, expected_hash) = &*KVS_AND_HASH; + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; assert!(range_size >= 2 && range_size <= kvs.len()); let mut tree = MerkleTree::new(PatchSet::default()); tree.extend(kvs.clone()); - let mut sorted_keys: Vec<_> = kvs.iter().map(|(key, _)| *key).collect(); + let mut sorted_keys: Vec<_> = kvs.iter().map(|entry| entry.key).collect(); sorted_keys.sort_unstable(); for start_idx in 0..(sorted_keys.len() - range_size) { @@ -460,10 +476,10 @@ fn range_proofs_with_multiple_existing_items(range_size: usize) { let other_entries = tree.entries(0, other_keys).unwrap(); let mut range = TreeRangeDigest::new(&Blake2Hasher, *first_key, &first_entry); - for (key, entry) in other_keys.iter().zip(other_entries) { - range.update(*key, entry); + for entry in other_entries { + range.update(entry); } - let range_hash = range.finalize(*last_key, &last_entry); + let range_hash = range.finalize(&last_entry); assert_eq!(range_hash, *expected_hash); } } @@ -479,7 +495,7 @@ fn range_proofs_with_random_ranges() { const RNG_SEED: u64 = 321; let mut rng = StdRng::seed_from_u64(RNG_SEED); - let (kvs, expected_hash) = &*KVS_AND_HASH; + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; let mut tree = MerkleTree::new(PatchSet::default()); tree.extend(kvs.clone()); @@ -493,9 +509,9 @@ fn range_proofs_with_random_ranges() { } // Find out keys falling into the range. - let keys_in_range = kvs - .iter() - .filter_map(|&(key, _)| (key > start_key && key < end_key).then_some(key)); + let keys_in_range = kvs.iter().filter_map(|entry| { + (entry.key > start_key && entry.key < end_key).then_some(entry.key) + }); let mut keys_in_range: Vec<_> = keys_in_range.collect(); keys_in_range.sort_unstable(); println!("Proving range with {} keys", keys_in_range.len()); @@ -506,10 +522,10 @@ fn range_proofs_with_random_ranges() { let other_entries = tree.entries(0, &keys_in_range).unwrap(); let mut range = TreeRangeDigest::new(&Blake2Hasher, start_key, &first_entry); - for (key, entry) in keys_in_range.iter().zip(other_entries) { - range.update(*key, entry); + for entry in other_entries { + range.update(entry); } - let range_hash = range.finalize(end_key, &last_entry); + let range_hash = range.finalize(&last_entry); assert_eq!(range_hash, *expected_hash); } } @@ -633,7 +649,7 @@ mod rocksdb { fn tree_tags_mismatch() { let Harness { mut db, dir: _dir } = Harness::new(); let mut tree = MerkleTree::new(&mut db); - tree.extend(vec![(U256::zero(), H256::zero())]); + tree.extend(vec![TreeEntry::new(U256::zero(), 1, H256::zero())]); MerkleTree::with_hasher(&mut db, ()); } @@ -643,7 +659,7 @@ mod rocksdb { fn tree_tags_mismatch_with_cold_restart() { let Harness { db, dir } = Harness::new(); let mut tree = MerkleTree::new(db); - tree.extend(vec![(U256::zero(), H256::zero())]); + tree.extend(vec![TreeEntry::new(U256::zero(), 1, H256::zero())]); drop(tree); let db = RocksDBWrapper::new(dir.path()); diff --git a/core/lib/merkle_tree/tests/integration/recovery.rs b/core/lib/merkle_tree/tests/integration/recovery.rs index fda57f78851..6739e4ffe02 100644 --- a/core/lib/merkle_tree/tests/integration/recovery.rs +++ b/core/lib/merkle_tree/tests/integration/recovery.rs @@ -5,11 +5,10 @@ use test_casing::test_casing; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ - recovery::{MerkleTreeRecovery, RecoveryEntry}, - Database, MerkleTree, PatchSet, PruneDatabase, ValueHash, + recovery::MerkleTreeRecovery, Database, MerkleTree, PatchSet, PruneDatabase, ValueHash, }; -use crate::common::{convert_to_writes, generate_key_value_pairs, TreeMap, KVS_AND_HASH}; +use crate::common::{convert_to_writes, generate_key_value_pairs, TreeMap, ENTRIES_AND_HASH}; #[derive(Debug, Clone, Copy)] enum RecoveryKind { @@ -23,16 +22,8 @@ impl RecoveryKind { #[test] fn recovery_basics() { - let (kvs, expected_hash) = &*KVS_AND_HASH; - let recovery_entries = kvs - .iter() - .enumerate() - .map(|(i, &(key, value))| RecoveryEntry { - key, - value, - leaf_index: i as u64 + 1, - }); - let mut recovery_entries: Vec<_> = recovery_entries.collect(); + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; + let mut recovery_entries: Vec<_> = kvs.clone(); recovery_entries.sort_unstable_by_key(|entry| entry.key); let greatest_key = recovery_entries[99].key; @@ -44,20 +35,12 @@ fn recovery_basics() { assert_eq!(recovery.root_hash(), *expected_hash); let tree = recovery.finalize(); - tree.verify_consistency(recovered_version).unwrap(); + tree.verify_consistency(recovered_version, true).unwrap(); } fn test_recovery_in_chunks(mut db: impl PruneDatabase, kind: RecoveryKind, chunk_size: usize) { - let (kvs, expected_hash) = &*KVS_AND_HASH; - let recovery_entries = kvs - .iter() - .enumerate() - .map(|(i, &(key, value))| RecoveryEntry { - key, - value, - leaf_index: i as u64 + 1, - }); - let mut recovery_entries: Vec<_> = recovery_entries.collect(); + let (kvs, expected_hash) = &*ENTRIES_AND_HASH; + let mut recovery_entries = kvs.clone(); if matches!(kind, RecoveryKind::Linear) { recovery_entries.sort_unstable_by_key(|entry| entry.key); } @@ -84,7 +67,7 @@ fn test_recovery_in_chunks(mut db: impl PruneDatabase, kind: RecoveryKind, chunk assert_eq!(recovery.root_hash(), *expected_hash); let mut tree = recovery.finalize(); - tree.verify_consistency(recovered_version).unwrap(); + tree.verify_consistency(recovered_version, true).unwrap(); // Check that new tree versions can be built and function as expected. test_tree_after_recovery(&mut tree, recovered_version, *expected_hash); } @@ -107,13 +90,13 @@ fn test_tree_after_recovery( let mut rng = StdRng::seed_from_u64(RNG_SEED); let mut kvs = generate_key_value_pairs(100..=150); let mut modified_kvs = generate_key_value_pairs(50..=100); - for (_, value) in &mut modified_kvs { - *value = ValueHash::repeat_byte(1); + for entry in &mut modified_kvs { + entry.value = ValueHash::repeat_byte(1); } + modified_kvs.shuffle(&mut rng); kvs.extend(modified_kvs); - kvs.shuffle(&mut rng); - let mut tree_map = TreeMap::new(&KVS_AND_HASH.0); + let mut tree_map = TreeMap::new(&ENTRIES_AND_HASH.0); let mut prev_root_hash = root_hash; for (i, chunk) in kvs.chunks(CHUNK_SIZE).enumerate() { tree_map.extend(chunk); @@ -129,7 +112,7 @@ fn test_tree_after_recovery( }; assert_eq!(new_root_hash, tree_map.root_hash()); - tree.verify_consistency(recovered_version + i as u64) + tree.verify_consistency(recovered_version + i as u64, true) .unwrap(); prev_root_hash = new_root_hash; } diff --git a/core/lib/zksync_core/src/api_server/tree/mod.rs b/core/lib/zksync_core/src/api_server/tree/mod.rs index 74dd3e5b70c..7b4c9086ac6 100644 --- a/core/lib/zksync_core/src/api_server/tree/mod.rs +++ b/core/lib/zksync_core/src/api_server/tree/mod.rs @@ -54,7 +54,7 @@ impl TreeEntryWithProof { let mut merkle_path = src.merkle_path; merkle_path.reverse(); // Use root-to-leaf enumeration direction as in Ethereum Self { - value: src.base.value_hash, + value: src.base.value, index: src.base.leaf_index, merkle_path, } diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index 32f39276a1e..9ae936febfe 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -16,10 +16,10 @@ use zksync_dal::StorageProcessor; use zksync_health_check::{Health, HealthStatus}; use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, - Key, MerkleTreeColumnFamily, NoVersionError, TreeEntryWithProof, + Key, MerkleTreeColumnFamily, NoVersionError, TreeEntryWithProof, TreeInstruction, }; use zksync_storage::{RocksDB, RocksDBOptions, StalledWritesRetries}; -use zksync_types::{block::L1BatchHeader, L1BatchNumber, StorageLog, H256}; +use zksync_types::{block::L1BatchHeader, L1BatchNumber, StorageKey, H256}; use super::metrics::{LoadChangesStage, TreeUpdateStage, METRICS}; @@ -147,7 +147,10 @@ impl AsyncTree { self.as_ref().root_hash() } - pub async fn process_l1_batch(&mut self, storage_logs: Vec) -> TreeMetadata { + pub async fn process_l1_batch( + &mut self, + storage_logs: Vec>, + ) -> TreeMetadata { let mut tree = self.inner.take().expect(Self::INCONSISTENT_MSG); let (tree, metadata) = tokio::task::spawn_blocking(move || { let metadata = tree.process_l1_batch(&storage_logs); @@ -242,7 +245,7 @@ impl Delayer { #[cfg_attr(test, derive(PartialEq))] pub(crate) struct L1BatchWithLogs { pub header: L1BatchHeader, - pub storage_logs: Vec, + pub storage_logs: Vec>, } impl L1BatchWithLogs { @@ -276,15 +279,22 @@ impl L1BatchWithLogs { .await; touched_slots_latency.observe_with_count(touched_slots.len()); + let leaf_indices_latency = METRICS.start_load_stage(LoadChangesStage::LoadLeafIndices); + let hashed_keys_for_writes: Vec<_> = + touched_slots.keys().map(StorageKey::hashed_key).collect(); + let l1_batches_for_initial_writes = storage + .storage_logs_dal() + .get_l1_batches_and_indices_for_initial_writes(&hashed_keys_for_writes) + .await; + leaf_indices_latency.observe_with_count(hashed_keys_for_writes.len()); + let mut storage_logs = BTreeMap::new(); for storage_key in protective_reads { touched_slots.remove(&storage_key); // ^ As per deduplication rules, all keys in `protective_reads` haven't *really* changed // in the considered L1 batch. Thus, we can remove them from `touched_slots` in order to simplify // their further processing. - - let log = StorageLog::new_read_log(storage_key, H256::zero()); - // ^ The tree doesn't use the read value, so we set it to zero. + let log = TreeInstruction::Read(storage_key); storage_logs.insert(storage_key, log); } tracing::debug!( @@ -292,45 +302,17 @@ impl L1BatchWithLogs { touched_slots.len() ); - // We don't want to update the tree with zero values which were never written to per storage log - // deduplication rules. If we write such values to the tree, it'd result in bogus tree hashes because - // new (bogus) leaf indices would be allocated for them. To filter out those values, it's sufficient - // to check when a `storage_key` was first written per `initial_writes` table. If this never occurred - // or occurred after the considered `l1_batch_number`, this means that the write must be ignored. - // - // Note that this approach doesn't filter out no-op writes of the same value, but this is fine; - // since no new leaf indices are allocated in the tree for them, such writes are no-op on the tree side as well. - let hashed_keys_for_zero_values: Vec<_> = touched_slots - .iter() - .filter(|(_, value)| { - // Only zero values are worth checking for initial writes; non-zero values are always - // written per deduplication rules. - value.is_zero() - }) - .map(|(key, _)| key.hashed_key()) - .collect(); - METRICS - .load_changes_zero_values - .observe(hashed_keys_for_zero_values.len()); - - let latency = METRICS.start_load_stage(LoadChangesStage::LoadInitialWritesForZeroValues); - let l1_batches_for_initial_writes = storage - .storage_logs_dal() - .get_l1_batches_and_indices_for_initial_writes(&hashed_keys_for_zero_values) - .await; - latency.observe_with_count(hashed_keys_for_zero_values.len()); - for (storage_key, value) in touched_slots { - let write_matters = if value.is_zero() { - let initial_write_batch_for_key = - l1_batches_for_initial_writes.get(&storage_key.hashed_key()); - initial_write_batch_for_key.map_or(false, |&(number, _)| number <= l1_batch_number) - } else { - true - }; - - if write_matters { - storage_logs.insert(storage_key, StorageLog::new_write_log(storage_key, value)); + if let Some(&(initial_write_batch_for_key, leaf_index)) = + l1_batches_for_initial_writes.get(&storage_key.hashed_key()) + { + // Filter out logs that correspond to deduplicated writes. + if initial_write_batch_for_key <= l1_batch_number { + storage_logs.insert( + storage_key, + TreeInstruction::write(storage_key, leaf_index, value), + ); + } } } @@ -347,7 +329,7 @@ mod tests { use tempfile::TempDir; use zksync_dal::ConnectionPool; - use zksync_types::{proofs::PrepareBasicCircuitsJob, L2ChainId, StorageKey, StorageLogKind}; + use zksync_types::{proofs::PrepareBasicCircuitsJob, L2ChainId, StorageKey, StorageLog}; use super::*; use crate::{ @@ -386,6 +368,10 @@ mod tests { .storage_logs_dal() .get_previous_storage_values(&hashed_keys, l1_batch_number) .await; + let l1_batches_for_initial_writes = storage + .storage_logs_dal() + .get_l1_batches_and_indices_for_initial_writes(&hashed_keys) + .await; for storage_key in protective_reads { let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); @@ -397,16 +383,17 @@ mod tests { ); } - storage_logs.insert( - storage_key, - StorageLog::new_read_log(storage_key, previous_value), - ); + storage_logs.insert(storage_key, TreeInstruction::Read(storage_key)); } for (storage_key, value) in touched_slots { let previous_value = previous_values[&storage_key.hashed_key()].unwrap_or_default(); if previous_value != value { - storage_logs.insert(storage_key, StorageLog::new_write_log(storage_key, value)); + let (_, leaf_index) = l1_batches_for_initial_writes[&storage_key.hashed_key()]; + storage_logs.insert( + storage_key, + TreeInstruction::write(storage_key, leaf_index, value), + ); } } @@ -608,7 +595,7 @@ mod tests { let read_logs_count = l1_batch_with_logs .storage_logs .iter() - .filter(|log| log.kind == StorageLogKind::Read) + .filter(|log| matches!(log, TreeInstruction::Read(_))) .count(); assert_eq!(read_logs_count, 7); diff --git a/core/lib/zksync_core/src/metadata_calculator/metrics.rs b/core/lib/zksync_core/src/metadata_calculator/metrics.rs index f2bedf47229..f8ef8f85b64 100644 --- a/core/lib/zksync_core/src/metadata_calculator/metrics.rs +++ b/core/lib/zksync_core/src/metadata_calculator/metrics.rs @@ -35,7 +35,7 @@ pub(super) enum LoadChangesStage { LoadL1BatchHeader, LoadProtectiveReads, LoadTouchedSlots, - LoadInitialWritesForZeroValues, + LoadLeafIndices, } /// Latency metric for a certain stage of the tree update. From 15d7eaf872e222338810243865cec9dff7f6e799 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 30 Nov 2023 17:17:05 +0200 Subject: [PATCH 074/115] feat(en): Support arbitrary genesis block for external nodes (#537) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Support non-zero genesis block specified in executor configuration. Check whether this block exists on initialization; validate its correspondence if it does, and persist consensus fields if it doesn't. ## Why ❔ This is necessary to support gossip-based syncing in practice; we likely won't back-sign all blocks in all envs. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/types/src/block.rs | 10 +- core/lib/zksync_core/src/consensus/payload.rs | 2 +- .../zksync_core/src/sync_layer/external_io.rs | 31 ++- .../lib/zksync_core/src/sync_layer/fetcher.rs | 16 +- .../src/sync_layer/gossip/buffered/tests.rs | 2 + .../src/sync_layer/gossip/conversions.rs | 1 - .../zksync_core/src/sync_layer/gossip/mod.rs | 4 +- .../src/sync_layer/gossip/storage/mod.rs | 165 ++++++++++-- .../src/sync_layer/gossip/storage/tests.rs | 224 +++++++++++++++- .../src/sync_layer/gossip/tests.rs | 249 +++++++++++++++--- .../zksync_core/src/sync_layer/sync_action.rs | 4 +- core/lib/zksync_core/src/sync_layer/tests.rs | 13 +- 12 files changed, 611 insertions(+), 110 deletions(-) diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 80a4d131e21..b4026468868 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -89,7 +89,7 @@ pub struct MiniblockHeader { } /// Consensus-related L2 block (= miniblock) fields. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct ConsensusBlockFields { /// Hash of the previous consensus block. pub parent: validator::BlockHeaderHash, @@ -99,12 +99,14 @@ pub struct ConsensusBlockFields { impl ProtoFmt for ConsensusBlockFields { type Proto = crate::proto::ConsensusBlockFields; - fn read(r: &Self::Proto) -> anyhow::Result { + + fn read(proto: &Self::Proto) -> anyhow::Result { Ok(Self { - parent: read_required(&r.parent).context("parent")?, - justification: read_required(&r.justification).context("justification")?, + parent: read_required(&proto.parent).context("parent")?, + justification: read_required(&proto.justification).context("justification")?, }) } + fn build(&self) -> Self::Proto { Self::Proto { parent: Some(self.parent.build()), diff --git a/core/lib/zksync_core/src/consensus/payload.rs b/core/lib/zksync_core/src/consensus/payload.rs index 8d53fdf21f3..dbe276196b0 100644 --- a/core/lib/zksync_core/src/consensus/payload.rs +++ b/core/lib/zksync_core/src/consensus/payload.rs @@ -6,7 +6,7 @@ use zksync_types::api::en::SyncBlock; use zksync_types::{Address, L1BatchNumber, Transaction, H256}; /// L2 block (= miniblock) payload. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub(crate) struct Payload { pub hash: H256, pub l1_batch_number: L1BatchNumber, diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index dcc38334a99..8e3ca863072 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -1,4 +1,5 @@ use async_trait::async_trait; +use futures::future; use std::{ collections::HashMap, @@ -108,7 +109,6 @@ impl ExternalIO { async fn load_previous_l1_batch_hash(&self) -> U256 { let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let wait_latency = KEEPER_METRICS.wait_for_prev_hash_time.start(); let (hash, _) = extractors::wait_for_prev_l1_batch_params(&mut storage, self.current_l1_batch_number) @@ -117,6 +117,18 @@ impl ExternalIO { hash } + async fn load_previous_miniblock_hash(&self) -> H256 { + let prev_miniblock_number = self.current_miniblock_number - 1; + let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); + let header = storage + .blocks_dal() + .get_miniblock_header(prev_miniblock_number) + .await + .unwrap() + .unwrap_or_else(|| panic!("Miniblock #{prev_miniblock_number} is missing")); + header.hash + } + async fn load_base_system_contracts_by_version_id( &self, id: ProtocolVersionId, @@ -307,15 +319,20 @@ impl StateKeeperIO for ExternalIO { operator_address, protocol_version, first_miniblock_info: (miniblock_number, virtual_blocks), - prev_miniblock_hash, }) => { assert_eq!( number, self.current_l1_batch_number, "Batch number mismatch" ); - tracing::info!("Getting previous L1 batch hash"); - let previous_l1_batch_hash = self.load_previous_l1_batch_hash().await; - tracing::info!("Previous L1 batch hash: {previous_l1_batch_hash}"); + tracing::info!("Getting previous L1 batch hash and miniblock hash"); + let (previous_l1_batch_hash, previous_miniblock_hash) = future::join( + self.load_previous_l1_batch_hash(), + self.load_previous_miniblock_hash(), + ) + .await; + tracing::info!( + "Previous L1 batch hash: {previous_l1_batch_hash}, previous miniblock hash: {previous_miniblock_hash}" + ); let base_system_contracts = self .load_base_system_contracts_by_version_id(protocol_version) @@ -328,7 +345,7 @@ impl StateKeeperIO for ExternalIO { l1_gas_price, l2_fair_gas_price, miniblock_number, - prev_miniblock_hash, + previous_miniblock_hash, base_system_contracts, self.validation_computational_gas_limit, protocol_version, @@ -539,6 +556,8 @@ impl StateKeeperIO for ExternalIO { // Mimic the metric emitted by the main node to reuse existing Grafana charts. APP_METRICS.block_number[&BlockStage::Sealed].set(self.current_l1_batch_number.0.into()); + self.sync_state + .set_local_block(self.current_miniblock_number); self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. self.current_l1_batch_number += 1; Ok(()) diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 4aabd163f21..9cdd7e64fd1 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -6,7 +6,7 @@ use std::time::Duration; use zksync_dal::StorageProcessor; use zksync_types::{ api::en::SyncBlock, block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, - ProtocolVersionId, H256, + ProtocolVersionId, }; use zksync_web3_decl::jsonrpsee::core::Error as RpcError; @@ -29,7 +29,6 @@ pub(super) struct FetchedBlock { pub last_in_batch: bool, pub protocol_version: ProtocolVersionId, pub timestamp: u64, - pub hash: H256, pub l1_gas_price: u64, pub l2_fair_gas_price: u64, pub virtual_blocks: u32, @@ -38,15 +37,14 @@ pub(super) struct FetchedBlock { pub consensus: Option, } -impl FetchedBlock { - fn from_sync_block(block: SyncBlock) -> Self { +impl From for FetchedBlock { + fn from(block: SyncBlock) -> Self { Self { number: block.number, l1_batch_number: block.l1_batch_number, last_in_batch: block.last_in_batch, protocol_version: block.protocol_version, timestamp: block.timestamp, - hash: block.hash.unwrap_or_default(), l1_gas_price: block.l1_gas_price, l2_fair_gas_price: block.l2_fair_gas_price, virtual_blocks: block.virtual_blocks.unwrap_or(0), @@ -64,7 +62,6 @@ impl FetchedBlock { pub struct FetcherCursor { // Fields are public for testing purposes. pub(super) next_miniblock: MiniblockNumber, - pub(super) prev_miniblock_hash: H256, pub(super) l1_batch: L1BatchNumber, } @@ -93,7 +90,6 @@ impl FetcherCursor { // Miniblocks are always fully processed. let next_miniblock = last_miniblock_header.number + 1; - let prev_miniblock_hash = last_miniblock_header.hash; // Decide whether the next batch should be explicitly opened or not. let l1_batch = if was_new_batch_open { // No `OpenBatch` action needed. @@ -106,7 +102,6 @@ impl FetcherCursor { Ok(Self { next_miniblock, l1_batch, - prev_miniblock_hash, }) } @@ -136,7 +131,6 @@ impl FetcherCursor { protocol_version: block.protocol_version, // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. first_miniblock_info: (block.number, block.virtual_blocks), - prev_miniblock_hash: self.prev_miniblock_hash, }); FETCHER_METRICS.l1_batch[&L1BatchStage::Open].set(block.l1_batch_number.0.into()); self.l1_batch += 1; @@ -168,7 +162,6 @@ impl FetcherCursor { new_actions.push(SyncAction::SealMiniblock(block.consensus)); } self.next_miniblock += 1; - self.prev_miniblock_hash = block.hash; new_actions } @@ -280,8 +273,7 @@ impl MainNodeFetcher { request_latency.observe(); let block_number = block.number; - let fetched_block = FetchedBlock::from_sync_block(block); - let new_actions = self.cursor.advance(fetched_block); + let new_actions = self.cursor.advance(block.into()); tracing::info!( "New miniblock: {block_number} / {}", diff --git a/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs index de5ef8a88cb..62c81bca7ca 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs @@ -11,6 +11,7 @@ use zksync_concurrency::{ ctx::{self, channel}, scope, sync::{self, watch}, + testonly::abort_on_panic, time, }; use zksync_consensus_roles::validator::{BlockHeader, BlockNumber, FinalBlock, Payload}; @@ -131,6 +132,7 @@ async fn test_buffered_storage( block_interval: time::Duration, shuffle_blocks: impl FnOnce(&mut StdRng, &mut [FinalBlock]), ) { + abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); diff --git a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs index 8face4e6942..410c2bfe204 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs @@ -42,7 +42,6 @@ impl FetchedBlock { last_in_batch, protocol_version: ProtocolVersionId::latest(), // FIXME timestamp: payload.timestamp, - hash: payload.hash, l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, virtual_blocks: payload.virtual_blocks, diff --git a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs index 630ded95345..2fd9f46aabb 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs @@ -67,7 +67,9 @@ async fn run_gossip_fetcher_inner( let cursor = FetcherCursor::new(&mut storage).await?; drop(storage); - let store = PostgresBlockStorage::new(pool, actions, cursor); + let store = + PostgresBlockStorage::new(ctx, pool, actions, cursor, &executor_config.genesis_block) + .await?; let buffered = Arc::new(Buffered::new(store)); let store = buffered.inner(); let executor = Executor::new(executor_config, node_key, buffered.clone()) diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs index d4e95c9e2d4..a490147512e 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs @@ -13,15 +13,18 @@ use zksync_concurrency::{ use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; use zksync_consensus_storage::{BlockStore, StorageError, StorageResult}; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_types::{Address, MiniblockNumber}; +use zksync_types::{api::en::SyncBlock, block::ConsensusBlockFields, Address, MiniblockNumber}; #[cfg(test)] mod tests; use super::{buffered::ContiguousBlockStore, conversions::sync_block_to_consensus_block}; -use crate::sync_layer::{ - fetcher::{FetchedBlock, FetcherCursor}, - sync_action::{ActionQueueSender, SyncAction}, +use crate::{ + consensus, + sync_layer::{ + fetcher::{FetchedBlock, FetcherCursor}, + sync_action::{ActionQueueSender, SyncAction}, + }, }; #[derive(Debug)] @@ -67,6 +70,7 @@ impl CursorWithCachedBlock { #[derive(Debug)] pub(super) struct PostgresBlockStorage { pool: ConnectionPool, + first_block_number: MiniblockNumber, actions: ActionQueueSender, block_sender: watch::Sender, cursor: Mutex, @@ -74,16 +78,109 @@ pub(super) struct PostgresBlockStorage { impl PostgresBlockStorage { /// Creates a new storage handle. `pool` should have multiple connections to work efficiently. - pub fn new(pool: ConnectionPool, actions: ActionQueueSender, cursor: FetcherCursor) -> Self { + pub async fn new( + ctx: &ctx::Ctx, + pool: ConnectionPool, + actions: ActionQueueSender, + cursor: FetcherCursor, + genesis_block: &FinalBlock, + ) -> StorageResult { + let mut storage = ctx + .wait(pool.access_storage_tagged("sync_layer")) + .await? + .map_err(StorageError::Database)?; + Self::ensure_genesis_block(ctx, &mut storage, genesis_block).await?; + drop(storage); + + let first_block_number = u32::try_from(genesis_block.header.number.0) + .context("Block number overflow for genesis block") + .map_err(StorageError::Database)?; + let first_block_number = MiniblockNumber(first_block_number); + + Ok(Self::new_unchecked( + pool, + first_block_number, + actions, + cursor, + )) + } + + fn new_unchecked( + pool: ConnectionPool, + first_block_number: MiniblockNumber, + actions: ActionQueueSender, + cursor: FetcherCursor, + ) -> Self { let current_block_number = cursor.next_miniblock.0.saturating_sub(1).into(); Self { pool, + first_block_number, actions, block_sender: watch::channel(BlockNumber(current_block_number)).0, cursor: Mutex::new(cursor.into()), } } + async fn ensure_genesis_block( + ctx: &ctx::Ctx, + storage: &mut StorageProcessor<'_>, + genesis_block: &FinalBlock, + ) -> StorageResult<()> { + let block_number = u32::try_from(genesis_block.header.number.0) + .context("Block number overflow for genesis block") + .map_err(StorageError::Database)?; + let block = Self::sync_block(ctx, storage, MiniblockNumber(block_number)).await?; + let block = block + .with_context(|| { + format!("Genesis block #{block_number} (first block with consensus data) is not present in Postgres") + }) + .map_err(StorageError::Database)?; + let actual_consensus_fields = block.consensus.clone(); + + // Some of the following checks are duplicated in `Executor` initialization, but it's necessary + // to run them if the genesis consensus block is not present locally. + let expected_payload = consensus::Payload::decode(&genesis_block.payload) + .context("Cannot decode genesis block payload") + .map_err(StorageError::Database)?; + let actual_payload: consensus::Payload = + block.try_into().map_err(StorageError::Database)?; + if actual_payload != expected_payload { + let err = anyhow::anyhow!( + "Genesis block payload from Postgres {actual_payload:?} does not match the configured one \ + {expected_payload:?}" + ); + return Err(StorageError::Database(err)); + } + + let expected_consensus_fields = ConsensusBlockFields { + parent: genesis_block.header.parent, + justification: genesis_block.justification.clone(), + }; + if let Some(actual_consensus_fields) = &actual_consensus_fields { + // While justifications may differ among nodes for an arbitrary block, we assume that + // the genesis block has a hardcoded justification. + if *actual_consensus_fields != expected_consensus_fields { + let err = anyhow::anyhow!( + "Genesis block consensus fields in Postgres {actual_consensus_fields:?} do not match \ + the configured ones {expected_consensus_fields:?}" + ); + return Err(StorageError::Database(err)); + } + } else { + tracing::info!( + "Postgres doesn't have consensus fields for genesis block; saving {expected_consensus_fields:?}" + ); + ctx.wait(storage.blocks_dal().set_miniblock_consensus_fields( + MiniblockNumber(block_number), + &expected_consensus_fields, + )) + .await? + .context("Failed saving consensus fields for genesis block") + .map_err(StorageError::Database)?; + } + Ok(()) + } + /// Runs background tasks for this store. This method **must** be spawned as a background task /// which should be running as long at the [`PostgresBlockStorage`] is in use; otherwise, /// it will function incorrectly. @@ -116,22 +213,28 @@ impl PostgresBlockStorage { .map_err(StorageError::Database) } + async fn sync_block( + ctx: &ctx::Ctx, + storage: &mut StorageProcessor<'_>, + number: MiniblockNumber, + ) -> StorageResult> { + let operator_address = Address::default(); // FIXME: where to get this address from? + ctx.wait( + storage + .sync_dal() + .sync_block(number, operator_address, true), + ) + .await? + .with_context(|| format!("Failed getting miniblock #{number} from Postgres")) + .map_err(StorageError::Database) + } + async fn block( ctx: &ctx::Ctx, storage: &mut StorageProcessor<'_>, number: MiniblockNumber, ) -> StorageResult> { - let operator_address = Address::default(); // FIXME: where to get this address from? - let Some(block) = ctx - .wait( - storage - .sync_dal() - .sync_block(number, operator_address, true), - ) - .await? - .with_context(|| format!("Failed getting miniblock #{number} from Postgres")) - .map_err(StorageError::Database)? - else { + let Some(block) = Self::sync_block(ctx, storage, number).await? else { return Ok(None); }; let block = sync_block_to_consensus_block(block).map_err(StorageError::Database)?; @@ -167,7 +270,7 @@ impl BlockStore for PostgresBlockStorage { async fn first_block(&self, ctx: &ctx::Ctx) -> StorageResult { let mut storage = self.storage(ctx).await?; - Self::block(ctx, &mut storage, MiniblockNumber(0)) + Self::block(ctx, &mut storage, self.first_block_number) .await? .context("Genesis miniblock not present in Postgres") .map_err(StorageError::Database) @@ -182,19 +285,33 @@ impl BlockStore for PostgresBlockStorage { ctx: &ctx::Ctx, number: BlockNumber, ) -> StorageResult> { - let number = u32::try_from(number.0) - .context("block number is too large") - .map_err(StorageError::Database)?; + let Ok(number) = u32::try_from(number.0) else { + return Ok(None); + }; + let number = MiniblockNumber(number); + if number < self.first_block_number { + return Ok(None); + } let mut storage = self.storage(ctx).await?; - Self::block(ctx, &mut storage, MiniblockNumber(number)).await + Self::block(ctx, &mut storage, number).await } async fn missing_block_numbers( &self, - _ctx: &ctx::Ctx, - _range: ops::Range, + ctx: &ctx::Ctx, + range: ops::Range, ) -> StorageResult> { - Ok(vec![]) // The storage never has missing blocks by construction + let mut output = vec![]; + let first_block_number = u64::from(self.first_block_number.0); + let numbers_before_first_block = (range.start.0..first_block_number).map(BlockNumber); + output.extend(numbers_before_first_block); + + let last_block_number = self.sealed_miniblock_number(ctx).await?; + let numbers_after_last_block = (last_block_number.next().0..range.end.0).map(BlockNumber); + output.extend(numbers_after_last_block); + + // By design, no blocks are missing in the `first_block_number..=last_block_number` range. + Ok(output) } fn subscribe_to_block_writes(&self) -> watch::Receiver { diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs index 437c5188330..cfd14f78411 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs @@ -2,7 +2,8 @@ use rand::{thread_rng, Rng}; -use zksync_concurrency::scope; +use zksync_concurrency::{scope, testonly::abort_on_panic}; +use zksync_consensus_roles::validator; use zksync_types::L2ChainId; use super::*; @@ -11,7 +12,7 @@ use crate::{ sync_layer::{ gossip::tests::{ add_consensus_fields, assert_first_block_actions, assert_second_block_actions, - load_final_block, + block_payload, create_genesis_block, load_final_block, }, tests::run_state_keeper_with_multiple_miniblocks, ActionQueue, @@ -22,15 +23,21 @@ const TEST_TIMEOUT: time::Duration = time::Duration::seconds(10); #[tokio::test] async fn block_store_basics_for_postgres() { + abort_on_panic(); let pool = ConnectionPool::test_pool().await; run_state_keeper_with_multiple_miniblocks(pool.clone()).await; let mut storage = pool.access_storage().await.unwrap(); - add_consensus_fields(&mut storage, &thread_rng().gen(), 3).await; + add_consensus_fields(&mut storage, &thread_rng().gen(), 0..3).await; let cursor = FetcherCursor::new(&mut storage).await.unwrap(); drop(storage); let (actions_sender, _) = ActionQueue::new(); - let storage = PostgresBlockStorage::new(pool.clone(), actions_sender, cursor); + let storage = PostgresBlockStorage::new_unchecked( + pool.clone(), + MiniblockNumber(0), + actions_sender, + cursor, + ); let ctx = &ctx::test_root(&ctx::RealClock); let genesis_block = BlockStore::first_block(&storage, ctx).await.unwrap(); @@ -52,6 +59,7 @@ async fn block_store_basics_for_postgres() { #[tokio::test] async fn subscribing_to_block_updates_for_postgres() { + abort_on_panic(); let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); if storage.blocks_dal().is_genesis_needed().await.unwrap() { @@ -64,7 +72,12 @@ async fn subscribing_to_block_updates_for_postgres() { // `ContiguousBlockStore`), but for testing subscriptions this is fine. drop(storage); let (actions_sender, _) = ActionQueue::new(); - let storage = PostgresBlockStorage::new(pool.clone(), actions_sender, cursor); + let storage = PostgresBlockStorage::new_unchecked( + pool.clone(), + MiniblockNumber(0), + actions_sender, + cursor, + ); let mut subscriber = storage.subscribe_to_block_writes(); let ctx = &ctx::test_root(&ctx::RealClock); @@ -90,11 +103,12 @@ async fn subscribing_to_block_updates_for_postgres() { #[tokio::test] async fn processing_new_blocks() { + abort_on_panic(); let pool = ConnectionPool::test_pool().await; run_state_keeper_with_multiple_miniblocks(pool.clone()).await; let mut storage = pool.access_storage().await.unwrap(); - add_consensus_fields(&mut storage, &thread_rng().gen(), 3).await; + add_consensus_fields(&mut storage, &thread_rng().gen(), 0..3).await; let first_block = load_final_block(&mut storage, 1).await; let second_block = load_final_block(&mut storage, 2).await; storage @@ -110,7 +124,12 @@ async fn processing_new_blocks() { drop(storage); let (actions_sender, mut actions) = ActionQueue::new(); - let storage = PostgresBlockStorage::new(pool.clone(), actions_sender, cursor); + let storage = PostgresBlockStorage::new_unchecked( + pool.clone(), + MiniblockNumber(0), + actions_sender, + cursor, + ); let ctx = &ctx::test_root(&ctx::RealClock); let ctx = &ctx.with_timeout(TEST_TIMEOUT); storage @@ -125,3 +144,194 @@ async fn processing_new_blocks() { .unwrap(); assert_second_block_actions(&mut actions).await; } + +#[tokio::test] +async fn ensuring_consensus_fields_for_genesis_block() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + if storage.blocks_dal().is_genesis_needed().await.unwrap() { + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + } + let cursor = FetcherCursor::new(&mut storage).await.unwrap(); + let block_payload = block_payload(&mut storage, 0).await.encode(); + drop(storage); + + let validator_key = validator::SecretKey::generate(&mut ctx.rng()); + let genesis_block = create_genesis_block(&validator_key, 0, block_payload.clone()); + + let (actions_sender, _) = ActionQueue::new(); + PostgresBlockStorage::new(ctx, pool.clone(), actions_sender, cursor, &genesis_block) + .await + .unwrap(); + + // Check that the consensus fields are persisted for the genesis block. + let mut storage = pool.access_storage().await.unwrap(); + let sync_block = storage + .sync_dal() + .sync_block(MiniblockNumber(0), Address::default(), false) + .await + .unwrap() + .expect("No genesis block"); + assert!(sync_block.consensus.is_some()); + let cursor = FetcherCursor::new(&mut storage).await.unwrap(); + let other_cursor = FetcherCursor::new(&mut storage).await.unwrap(); + drop(storage); + + // Check that the storage can be initialized again. + let (actions_sender, _) = ActionQueue::new(); + PostgresBlockStorage::new(ctx, pool.clone(), actions_sender, cursor, &genesis_block) + .await + .unwrap(); + + // Create a genesis block with another validator. + let validator_key = validator::SecretKey::generate(&mut ctx.rng()); + let other_genesis_block = create_genesis_block(&validator_key, 0, block_payload); + + // Storage should not be able to initialize with other genesis block. + let (actions_sender, _) = ActionQueue::new(); + PostgresBlockStorage::new( + ctx, + pool, + actions_sender, + other_cursor, + &other_genesis_block, + ) + .await + .unwrap_err(); +} + +#[tokio::test] +async fn genesis_block_payload_mismatch() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + if storage.blocks_dal().is_genesis_needed().await.unwrap() { + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + } + let cursor = FetcherCursor::new(&mut storage).await.unwrap(); + let other_cursor = FetcherCursor::new(&mut storage).await.unwrap(); + + let bogus_block_payload = validator::Payload(vec![]); + let validator_key = validator::SecretKey::generate(&mut ctx.rng()); + let genesis_block = create_genesis_block(&validator_key, 0, bogus_block_payload); + + let (actions_sender, _) = ActionQueue::new(); + PostgresBlockStorage::new(ctx, pool.clone(), actions_sender, cursor, &genesis_block) + .await + .unwrap_err(); + + let mut bogus_block_payload = block_payload(&mut storage, 0).await; + bogus_block_payload.timestamp += 1; + let genesis_block = create_genesis_block(&validator_key, 0, bogus_block_payload.encode()); + + let (actions_sender, _) = ActionQueue::new(); + PostgresBlockStorage::new( + ctx, + pool.clone(), + actions_sender, + other_cursor, + &genesis_block, + ) + .await + .unwrap_err(); +} + +#[tokio::test] +async fn missing_genesis_block() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + if storage.blocks_dal().is_genesis_needed().await.unwrap() { + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + } + let cursor = FetcherCursor::new(&mut storage).await.unwrap(); + let block_payload = block_payload(&mut storage, 0).await.encode(); + drop(storage); + + // Create a genesis block for the (non-existing) block #2. + let validator_key = validator::SecretKey::generate(&mut ctx.rng()); + let genesis_block = create_genesis_block(&validator_key, 2, block_payload.clone()); + + let (actions_sender, _) = ActionQueue::new(); + PostgresBlockStorage::new(ctx, pool, actions_sender, cursor, &genesis_block) + .await + .unwrap_err(); +} + +#[tokio::test] +async fn using_non_zero_genesis_block() { + abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let pool = ConnectionPool::test_pool().await; + run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + + let mut storage = pool.access_storage().await.unwrap(); + let cursor = FetcherCursor::new(&mut storage).await.unwrap(); + let block_payload = block_payload(&mut storage, 2).await.encode(); + drop(storage); + + let validator_key = validator::SecretKey::generate(&mut ctx.rng()); + let genesis_block = create_genesis_block(&validator_key, 2, block_payload.clone()); + + let (actions_sender, _) = ActionQueue::new(); + let store = PostgresBlockStorage::new(ctx, pool, actions_sender, cursor, &genesis_block) + .await + .unwrap(); + + let head_block = store.head_block(ctx).await.unwrap(); + assert_eq!(head_block.header.number, BlockNumber(2)); + assert_eq!( + head_block.header.parent, + validator::BlockHeaderHash::from_bytes([0; 32]) + ); + let first_block = store.first_block(ctx).await.unwrap(); + assert_eq!(first_block.header.number, BlockNumber(2)); + let last_contiguous_block_number = store.last_contiguous_block_number(ctx).await.unwrap(); + assert_eq!(last_contiguous_block_number, BlockNumber(2)); + + let block = store.block(ctx, BlockNumber(2)).await.unwrap(); + assert_eq!(block, Some(head_block)); + for number in [0, 1, 3] { + let missing_block = store.block(ctx, BlockNumber(number)).await.unwrap(); + assert!(missing_block.is_none()); + } + + let missing_blocks = store + .missing_block_numbers(ctx, BlockNumber(0)..BlockNumber(5)) + .await + .unwrap(); + assert_eq!( + missing_blocks, + [ + BlockNumber(0), + BlockNumber(1), + BlockNumber(3), + BlockNumber(4) + ] + ); + let missing_blocks = store + .missing_block_numbers(ctx, BlockNumber(0)..BlockNumber(2)) + .await + .unwrap(); + assert_eq!(missing_blocks, [BlockNumber(0), BlockNumber(1)]); + let missing_blocks = store + .missing_block_numbers(ctx, BlockNumber(2)..BlockNumber(5)) + .await + .unwrap(); + assert_eq!(missing_blocks, [BlockNumber(3), BlockNumber(4)]); + let missing_blocks = store + .missing_block_numbers(ctx, BlockNumber(2)..BlockNumber(3)) + .await + .unwrap(); + assert_eq!(missing_blocks, []); +} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs index ca3ce29f4d3..30597189f0b 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs @@ -3,12 +3,16 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; -use zksync_concurrency::{ctx, scope, time}; +use std::ops; + +use zksync_concurrency::{ctx, scope, testonly::abort_on_panic, time}; use zksync_consensus_executor::testonly::FullValidatorConfig; use zksync_consensus_roles::validator::{self, FinalBlock}; use zksync_consensus_storage::{InMemoryStorage, WriteBlockStore}; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_types::{block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber}; +use zksync_types::{ + api::en::SyncBlock, block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, H256, +}; use super::*; use crate::{ @@ -26,40 +30,49 @@ use crate::{ const CLOCK_SPEEDUP: i64 = 20; const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50 * CLOCK_SPEEDUP); +async fn load_sync_block(storage: &mut StorageProcessor<'_>, number: u32) -> SyncBlock { + storage + .sync_dal() + .sync_block(MiniblockNumber(number), Address::default(), true) + .await + .unwrap() + .unwrap_or_else(|| panic!("no sync block #{number}")) +} + /// Loads a block from the storage and converts it to a `FinalBlock`. pub(super) async fn load_final_block( storage: &mut StorageProcessor<'_>, number: u32, ) -> FinalBlock { - let sync_block = storage - .sync_dal() - .sync_block(MiniblockNumber(number), Address::repeat_byte(1), true) - .await - .unwrap() - .unwrap_or_else(|| panic!("no sync block #{number}")); + let sync_block = load_sync_block(storage, number).await; conversions::sync_block_to_consensus_block(sync_block).unwrap() } -pub async fn block_payload(storage: &mut StorageProcessor<'_>, number: u32) -> validator::Payload { - let sync_block = storage - .sync_dal() - .sync_block(MiniblockNumber(number), Address::repeat_byte(1), true) - .await - .unwrap() - .unwrap_or_else(|| panic!("no sync block #{number}")); - consensus::Payload::try_from(sync_block).unwrap().encode() +fn convert_sync_blocks(sync_blocks: Vec) -> Vec { + sync_blocks + .into_iter() + .map(|sync_block| conversions::sync_block_to_consensus_block(sync_block).unwrap()) + .collect() +} + +pub(super) async fn block_payload( + storage: &mut StorageProcessor<'_>, + number: u32, +) -> consensus::Payload { + let sync_block = load_sync_block(storage, number).await; + consensus::Payload::try_from(sync_block).unwrap() } /// Adds consensus information for the specified `count` of miniblocks, starting from the genesis. pub(super) async fn add_consensus_fields( storage: &mut StorageProcessor<'_>, validator_key: &validator::SecretKey, - count: u32, + block_numbers: ops::Range, ) { let mut prev_block_hash = validator::BlockHeaderHash::from_bytes([0; 32]); let validator_set = validator::ValidatorSet::new([validator_key.public()]).unwrap(); - for number in 0..count { - let payload = block_payload(storage, number).await; + for number in block_numbers { + let payload = block_payload(storage, number).await.encode(); let block_header = validator::BlockHeader { parent: prev_block_hash, number: validator::BlockNumber(number.into()), @@ -87,6 +100,33 @@ pub(super) async fn add_consensus_fields( } } +/// Creates a genesis block for the consensus with the specified number / payload authored by a single validator. +pub(super) fn create_genesis_block( + validator_key: &validator::SecretKey, + number: u64, + payload: validator::Payload, +) -> FinalBlock { + let block_header = validator::BlockHeader { + parent: validator::BlockHeaderHash::from_bytes([0; 32]), + number: validator::BlockNumber(number), + payload: payload.hash(), + }; + let validator_set = validator::ValidatorSet::new([validator_key.public()]).unwrap(); + let replica_commit = validator::ReplicaCommit { + protocol_version: validator::CURRENT_VERSION, + view: validator::ViewNumber(number), + proposal: block_header, + }; + let replica_commit = validator_key.sign_msg(replica_commit); + let justification = + validator::CommitQC::from(&[replica_commit], &validator_set).expect("Failed creating QC"); + FinalBlock { + header: block_header, + payload, + justification, + } +} + pub(super) async fn assert_first_block_actions(actions: &mut ActionQueue) -> Vec { let mut received_actions = vec![]; while !matches!(received_actions.last(), Some(SyncAction::SealMiniblock(_))) { @@ -137,20 +177,21 @@ pub(super) async fn assert_second_block_actions(actions: &mut ActionQueue) -> Ve #[test_casing(4, Product(([false, true], [false, true])))] #[tokio::test] async fn syncing_via_gossip_fetcher(delay_first_block: bool, delay_second_block: bool) { - zksync_concurrency::testonly::abort_on_panic(); + abort_on_panic(); let pool = ConnectionPool::test_pool().await; let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; let mut storage = pool.access_storage().await.unwrap(); - let genesis_block_payload = block_payload(&mut storage, 0).await; + let genesis_block_payload = block_payload(&mut storage, 0).await.encode(); let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); let rng = &mut ctx.rng(); let mut validator = FullValidatorConfig::for_single_validator(rng, genesis_block_payload); let validator_set = validator.node_config.validators.clone(); let external_node = validator.connect_full_node(rng); - let (genesis_block, blocks) = - get_blocks_and_reset_storage(storage, &validator.validator_key).await; + let genesis_block = validator.node_config.genesis_block.clone(); + add_consensus_fields(&mut storage, &validator.validator_key, 0..3).await; + let blocks = convert_sync_blocks(reset_storage(storage).await); let [first_block, second_block] = blocks.as_slice() else { unreachable!("Unexpected blocks in storage: {blocks:?}"); }; @@ -228,21 +269,16 @@ async fn syncing_via_gossip_fetcher(delay_first_block: bool, delay_second_block: } } -async fn get_blocks_and_reset_storage( - mut storage: StorageProcessor<'_>, - validator_key: &validator::SecretKey, -) -> (FinalBlock, Vec) { +/// Returns the removed blocks. +async fn reset_storage(mut storage: StorageProcessor<'_>) -> Vec { let sealed_miniblock_number = storage .blocks_dal() .get_sealed_miniblock_number() .await .unwrap(); - add_consensus_fields(&mut storage, validator_key, sealed_miniblock_number.0 + 1).await; - let genesis_block = load_final_block(&mut storage, 0).await; - - let mut blocks = Vec::with_capacity(sealed_miniblock_number.0 as usize); + let mut blocks = vec![]; for number in 1..=sealed_miniblock_number.0 { - blocks.push(load_final_block(&mut storage, number).await); + blocks.push(load_sync_block(&mut storage, number).await); } storage @@ -259,29 +295,30 @@ async fn get_blocks_and_reset_storage( .delete_l1_batches(L1BatchNumber(0)) .await .unwrap(); - (genesis_block, blocks) + blocks } #[test_casing(4, [3, 2, 1, 0])] #[tokio::test] async fn syncing_via_gossip_fetcher_with_multiple_l1_batches(initial_block_count: usize) { assert!(initial_block_count <= 3); - zksync_concurrency::testonly::abort_on_panic(); + abort_on_panic(); let pool = ConnectionPool::test_pool().await; let tx_hashes = run_state_keeper_with_multiple_l1_batches(pool.clone()).await; let tx_hashes: Vec<_> = tx_hashes.iter().map(Vec::as_slice).collect(); let mut storage = pool.access_storage().await.unwrap(); - let genesis_block_payload = block_payload(&mut storage, 0).await; + let genesis_block_payload = block_payload(&mut storage, 0).await.encode(); let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); let rng = &mut ctx.rng(); let mut validator = FullValidatorConfig::for_single_validator(rng, genesis_block_payload); let validator_set = validator.node_config.validators.clone(); let external_node = validator.connect_full_node(rng); - let (genesis_block, blocks) = - get_blocks_and_reset_storage(storage, &validator.validator_key).await; + let genesis_block = validator.node_config.genesis_block.clone(); + add_consensus_fields(&mut storage, &validator.validator_key, 0..4).await; + let blocks = convert_sync_blocks(reset_storage(storage).await); assert_eq!(blocks.len(), 3); // 2 real + 1 fictive blocks tracing::trace!("Node storage reset"); let (initial_blocks, delayed_blocks) = blocks.split_at(initial_block_count); @@ -309,9 +346,8 @@ async fn syncing_via_gossip_fetcher_with_multiple_l1_batches(initial_block_count Ok(()) }); - let cloned_pool = pool.clone(); s.spawn_bg(async { - mock_l1_batch_hash_computation(cloned_pool, 1).await; + mock_l1_batch_hash_computation(pool.clone(), 1).await; Ok(()) }); s.spawn_bg(run_gossip_fetcher_inner( @@ -337,3 +373,138 @@ async fn syncing_via_gossip_fetcher_with_multiple_l1_batches(initial_block_count block.justification.verify(&validator_set, 1).unwrap(); } } + +#[test_casing(2, [1, 2])] +#[tokio::test] +async fn syncing_from_non_zero_block(first_block_number: u32) { + abort_on_panic(); + let pool = ConnectionPool::test_pool().await; + let tx_hashes = run_state_keeper_with_multiple_l1_batches(pool.clone()).await; + let tx_hashes: Vec<_> = tx_hashes.iter().map(Vec::as_slice).collect(); + + let mut storage = pool.access_storage().await.unwrap(); + let genesis_block_payload = block_payload(&mut storage, first_block_number) + .await + .encode(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); + let rng = &mut ctx.rng(); + let mut validator = + FullValidatorConfig::for_single_validator(rng, genesis_block_payload.clone()); + // Override the genesis block since it has an incorrect block number. + let genesis_block = create_genesis_block( + &validator.validator_key, + first_block_number.into(), + genesis_block_payload, + ); + validator.node_config.genesis_block = genesis_block.clone(); + let validator_set = validator.node_config.validators.clone(); + let external_node = validator.connect_full_node(rng); + + add_consensus_fields( + &mut storage, + &validator.validator_key, + first_block_number..4, + ) + .await; + let mut initial_blocks = reset_storage(storage).await; + let delayed_blocks = initial_blocks.split_off(first_block_number as usize); + assert!(!initial_blocks.is_empty()); + assert!(!delayed_blocks.is_empty()); + let delayed_blocks = convert_sync_blocks(delayed_blocks); + + // Re-insert initial blocks to the storage. This allows to more precisely emulate node syncing + // (e.g., missing L1 batch relation for the latest blocks). + insert_sync_blocks(pool.clone(), initial_blocks, &tx_hashes).await; + tracing::trace!("Re-inserted blocks to node storage"); + + let validator_storage = Arc::new(InMemoryStorage::new(genesis_block)); + let validator = Executor::new( + validator.node_config, + validator.node_key, + validator_storage.clone(), + ) + .unwrap(); + + let tx_hashes = if first_block_number >= 2 { + &tx_hashes[1..] // Skip transactions in L1 batch #1, since they won't be executed + } else { + &tx_hashes + }; + let (actions_sender, actions) = ActionQueue::new(); + let state_keeper = StateKeeperHandles::new(pool.clone(), actions, tx_hashes).await; + scope::run!(ctx, |ctx, s| async { + s.spawn_bg(validator.run(ctx)); + s.spawn_bg(async { + for block in &delayed_blocks { + ctx.sleep(POLL_INTERVAL).await?; + validator_storage.put_block(ctx, block).await?; + } + Ok(()) + }); + + if first_block_number < 2 { + // L1 batch #1 will be sealed during the state keeper operation; we need to emulate + // computing metadata for it. + s.spawn_bg(async { + mock_l1_batch_hash_computation(pool.clone(), 1).await; + Ok(()) + }); + } + + s.spawn_bg(run_gossip_fetcher_inner( + ctx, + pool.clone(), + actions_sender, + external_node.node_config, + external_node.node_key, + )); + + state_keeper + .wait(|state| state.get_local_block() == MiniblockNumber(3)) + .await; + Ok(()) + }) + .await + .unwrap(); + + // Check that received blocks have consensus fields persisted. + let mut storage = pool.access_storage().await.unwrap(); + for number in first_block_number..4 { + let block = load_final_block(&mut storage, number).await; + block.justification.verify(&validator_set, 1).unwrap(); + } +} + +async fn insert_sync_blocks(pool: ConnectionPool, blocks: Vec, tx_hashes: &[&[H256]]) { + let expected_block_number = blocks.last().expect("blocks cannot be empty").number; + let sealed_l1_batches = blocks + .iter() + .filter_map(|block| block.last_in_batch.then_some(block.l1_batch_number)); + let sealed_l1_batches: Vec<_> = sealed_l1_batches.collect(); + + let mut fetcher = FetcherCursor::new(&mut pool.access_storage().await.unwrap()) + .await + .unwrap(); + let (actions_sender, actions) = ActionQueue::new(); + let state_keeper = StateKeeperHandles::new(pool.clone(), actions, tx_hashes).await; + for block in blocks { + let block_actions = fetcher.advance(block.into()); + actions_sender.push_actions(block_actions).await; + } + + let hash_tasks: Vec<_> = sealed_l1_batches + .into_iter() + .map(|l1_batch_number| { + tokio::spawn(mock_l1_batch_hash_computation( + pool.clone(), + l1_batch_number.0, + )) + }) + .collect(); + state_keeper + .wait(|state| state.get_local_block() == expected_block_number) + .await; + for hash_task in hash_tasks { + hash_task.await.unwrap(); + } +} diff --git a/core/lib/zksync_core/src/sync_layer/sync_action.rs b/core/lib/zksync_core/src/sync_layer/sync_action.rs index b4f56999d4f..b278cb1c98e 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_action.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_action.rs @@ -2,7 +2,7 @@ use tokio::sync::mpsc; use zksync_types::{ block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, - Transaction, H256, + Transaction, }; use super::metrics::QUEUE_METRICS; @@ -137,7 +137,6 @@ pub(crate) enum SyncAction { protocol_version: ProtocolVersionId, // Miniblock number and virtual blocks count. first_miniblock_info: (MiniblockNumber, u32), - prev_miniblock_hash: H256, }, Miniblock { number: MiniblockNumber, @@ -180,7 +179,6 @@ mod tests { operator_address: Default::default(), protocol_version: ProtocolVersionId::latest(), first_miniblock_info: (1.into(), 1), - prev_miniblock_hash: H256::default(), } } diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 4a337bbf5dc..20bafc51cf6 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -137,7 +137,6 @@ fn open_l1_batch(number: u32, timestamp: u64, first_miniblock_number: u32) -> Sy operator_address: Default::default(), protocol_version: ProtocolVersionId::latest(), first_miniblock_info: (MiniblockNumber(first_miniblock_number), 1), - prev_miniblock_hash: H256::default(), } } @@ -404,7 +403,7 @@ pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: let metadata = create_l1_batch_metadata(number); storage .blocks_dal() - .save_l1_batch_metadata(L1BatchNumber(1), &metadata, H256::zero(), false) + .save_l1_batch_metadata(L1BatchNumber(number), &metadata, H256::zero(), false) .await .unwrap(); break; @@ -574,15 +573,6 @@ async fn fetcher_with_real_server() { // Fill in transactions grouped in multiple miniblocks in the storage. let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; let mut tx_hashes = VecDeque::from(tx_hashes); - let mut connection = pool.access_storage().await.unwrap(); - let genesis_miniblock_hash = connection - .blocks_dal() - .get_miniblock_header(MiniblockNumber(0)) - .await - .unwrap() - .expect("No genesis miniblock") - .hash; - drop(connection); // Start the API server. let network_config = NetworkConfig::for_tests(); @@ -598,7 +588,6 @@ async fn fetcher_with_real_server() { let client = ::json_rpc(&format!("http://{server_addr}/")).unwrap(); let fetcher_cursor = FetcherCursor { next_miniblock: MiniblockNumber(1), - prev_miniblock_hash: genesis_miniblock_hash, l1_batch: L1BatchNumber(0), }; let fetcher = fetcher_cursor.into_fetcher( From e05d955036c76a29f9b6e900872c69e20278e045 Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Fri, 1 Dec 2023 11:10:25 +0100 Subject: [PATCH 075/115] fix(witness_generator): Disable BWIP dependency (#573) This revert is done to facilitate boojum upgrade on mainnet2. Without this, old provers would be halt and boojum upgrade could take longer than anticipated. `waiting_for_artifacts` forced witness to wait on BWIP run. `queued` makes them run instantly. - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Signed-off-by: Danil Co-authored-by: Danil --- core/lib/dal/sqlx-data.json | 30 +++++++++++------------ core/lib/dal/src/witness_generator_dal.rs | 2 +- spellcheck/era.dic | 1 + 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 3776b4f84b3..2c958ce0394 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -7470,21 +7470,6 @@ }, "query": "SELECT number, timestamp, is_finished, l1_tx_count, l2_tx_count, fee_account_address, bloom, priority_ops_onchain_data, hash, parent_hash, commitment, compressed_write_logs, compressed_contracts, eth_prove_tx_id, eth_commit_tx_id, eth_execute_tx_id, merkle_root_hash, l2_to_l1_logs, l2_to_l1_messages, used_contract_hashes, compressed_initial_writes, compressed_repeated_writes, l2_l1_compressed_messages, l2_l1_merkle_root, l1_gas_price, l2_fair_gas_price, rollup_last_leaf_index, zkporter_is_available, bootloader_code_hash, default_aa_code_hash, base_fee_per_gas, aux_data_hash, pass_through_data_hash, meta_parameters_hash, protocol_version, compressed_state_diffs, system_logs, events_queue_commitment, bootloader_initial_content_commitment FROM l1_batches LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number WHERE eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL ORDER BY number LIMIT $1" }, - "8ff9d76b4791af1177231661847b6c8879ad625fd11c15de51a16c81d8712129": { - "describe": { - "columns": [], - "nullable": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Text", - "Int4" - ] - } - }, - "query": "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, protocol_version, created_at, updated_at) VALUES ($1, $2, $3, 'waiting_for_artifacts', $4, now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING" - }, "9051cc1a715e152afdd0c19739c76666b1a9b134e17601ef9fdf3dec5d2fc561": { "describe": { "columns": [ @@ -11182,6 +11167,21 @@ }, "query": "UPDATE l1_batches SET predicted_commit_gas_cost = $2, updated_at = now() WHERE number = $1" }, + "ec35fc5128cf59d19e6d65ed6d84fcc50fedce921405c4ce700dd2e08c990642": { + "describe": { + "columns": [], + "nullable": [], + "parameters": { + "Left": [ + "Int8", + "Bytea", + "Text", + "Int4" + ] + } + }, + "query": "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, protocol_version, created_at, updated_at) VALUES ($1, $2, $3, 'queued', $4, now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING" + }, "ed50c609371b4588964e29f8757c41973706710090a80eb025ec263ce3d019b4": { "describe": { "columns": [], diff --git a/core/lib/dal/src/witness_generator_dal.rs b/core/lib/dal/src/witness_generator_dal.rs index 983112ab35c..a8079a9dcce 100644 --- a/core/lib/dal/src/witness_generator_dal.rs +++ b/core/lib/dal/src/witness_generator_dal.rs @@ -728,7 +728,7 @@ impl WitnessGeneratorDal<'_, '_> { { sqlx::query!( "INSERT INTO witness_inputs(l1_batch_number, merkle_tree_paths, merkel_tree_paths_blob_url, status, protocol_version, created_at, updated_at) \ - VALUES ($1, $2, $3, 'waiting_for_artifacts', $4, now(), now()) \ + VALUES ($1, $2, $3, 'queued', $4, now(), now()) \ ON CONFLICT (l1_batch_number) DO NOTHING", block_number.0 as i64, // TODO(SMA-1476): remove the below column once blob is migrated to GCS. diff --git a/spellcheck/era.dic b/spellcheck/era.dic index 214efbcd595..666ee047fd2 100644 --- a/spellcheck/era.dic +++ b/spellcheck/era.dic @@ -262,6 +262,7 @@ sidechain sidechains tokenomics validator's +validator CHAINID PREVRANDAO ECDSA From 5994aaef62f43910629c5ec916799d6cd6ff0967 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Fri, 1 Dec 2023 12:12:27 +0100 Subject: [PATCH 076/115] chore(main): release core 18.4.0 (#560) :robot: I have created a release *beep* *boop* --- ## [18.4.0](https://github.com/matter-labs/zksync-era/compare/core-v18.3.1...core-v18.4.0) (2023-12-01) ### Features * adds spellchecker workflow, and corrects misspelled words ([#559](https://github.com/matter-labs/zksync-era/issues/559)) ([beac0a8](https://github.com/matter-labs/zksync-era/commit/beac0a85bb1535b05c395057171f197cd976bf82)) * **en:** Support arbitrary genesis block for external nodes ([#537](https://github.com/matter-labs/zksync-era/issues/537)) ([15d7eaf](https://github.com/matter-labs/zksync-era/commit/15d7eaf872e222338810243865cec9dff7f6e799)) * **merkle tree:** Remove enumeration index assignment from Merkle tree ([#551](https://github.com/matter-labs/zksync-era/issues/551)) ([e2c1b20](https://github.com/matter-labs/zksync-era/commit/e2c1b20e361e6ee2f5ac69cefe75d9c5575eb2f7)) * Restore commitment test in Boojum integration ([#539](https://github.com/matter-labs/zksync-era/issues/539)) ([06f510d](https://github.com/matter-labs/zksync-era/commit/06f510d00f855ddafaebb504f7ea799700221072)) ### Bug Fixes * Change no pending batches 404 error into a success response ([#279](https://github.com/matter-labs/zksync-era/issues/279)) ([e8fd805](https://github.com/matter-labs/zksync-era/commit/e8fd805c8be7980de7676bca87cfc2d445aab9e1)) * **vm:** Expose additional types and traits ([#563](https://github.com/matter-labs/zksync-era/issues/563)) ([bd268ac](https://github.com/matter-labs/zksync-era/commit/bd268ac02bc3530c1d3247cb9496c3e13c2e52d9)) * **witness_generator:** Disable BWIP dependency ([#573](https://github.com/matter-labs/zksync-era/issues/573)) ([e05d955](https://github.com/matter-labs/zksync-era/commit/e05d955036c76a29f9b6e900872c69e20278e045)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index f5774af5944..9c7f15805a9 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.3.1", + "core": "18.4.0", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index d34f9d4faf5..04f3f13e716 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,22 @@ # Changelog +## [18.4.0](https://github.com/matter-labs/zksync-era/compare/core-v18.3.1...core-v18.4.0) (2023-12-01) + + +### Features + +* adds spellchecker workflow, and corrects misspelled words ([#559](https://github.com/matter-labs/zksync-era/issues/559)) ([beac0a8](https://github.com/matter-labs/zksync-era/commit/beac0a85bb1535b05c395057171f197cd976bf82)) +* **en:** Support arbitrary genesis block for external nodes ([#537](https://github.com/matter-labs/zksync-era/issues/537)) ([15d7eaf](https://github.com/matter-labs/zksync-era/commit/15d7eaf872e222338810243865cec9dff7f6e799)) +* **merkle tree:** Remove enumeration index assignment from Merkle tree ([#551](https://github.com/matter-labs/zksync-era/issues/551)) ([e2c1b20](https://github.com/matter-labs/zksync-era/commit/e2c1b20e361e6ee2f5ac69cefe75d9c5575eb2f7)) +* Restore commitment test in Boojum integration ([#539](https://github.com/matter-labs/zksync-era/issues/539)) ([06f510d](https://github.com/matter-labs/zksync-era/commit/06f510d00f855ddafaebb504f7ea799700221072)) + + +### Bug Fixes + +* Change no pending batches 404 error into a success response ([#279](https://github.com/matter-labs/zksync-era/issues/279)) ([e8fd805](https://github.com/matter-labs/zksync-era/commit/e8fd805c8be7980de7676bca87cfc2d445aab9e1)) +* **vm:** Expose additional types and traits ([#563](https://github.com/matter-labs/zksync-era/issues/563)) ([bd268ac](https://github.com/matter-labs/zksync-era/commit/bd268ac02bc3530c1d3247cb9496c3e13c2e52d9)) +* **witness_generator:** Disable BWIP dependency ([#573](https://github.com/matter-labs/zksync-era/issues/573)) ([e05d955](https://github.com/matter-labs/zksync-era/commit/e05d955036c76a29f9b6e900872c69e20278e045)) + ## [18.3.1](https://github.com/matter-labs/zksync-era/compare/core-v18.3.0...core-v18.3.1) (2023-11-28) From 0cd2c6b5ada568ffe01bce6d2dd8951457369141 Mon Sep 17 00:00:00 2001 From: Yury Akudovich Date: Fri, 1 Dec 2023 12:42:44 +0100 Subject: [PATCH 077/115] ci: Runs spellcheck in merge queue. (#574) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Runs spellcheck in merge queue. ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- .github/workflows/check-spelling.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check-spelling.yml b/.github/workflows/check-spelling.yml index 76fd6352c8e..9073401eab9 100644 --- a/.github/workflows/check-spelling.yml +++ b/.github/workflows/check-spelling.yml @@ -5,8 +5,9 @@ on: branches: - main pull_request: + merge_group: -env: +env: CARGO_TERM_COLOR: always jobs: @@ -17,7 +18,7 @@ jobs: uses: taiki-e/install-action@v2 with: tool: cargo-spellcheck - + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4 - name: Run cargo-spellcheck From 94a331952de64c02aeed6b0416b71ca7215ddece Mon Sep 17 00:00:00 2001 From: Karma <148863819+0xKarm@users.noreply.github.com> Date: Sat, 2 Dec 2023 05:11:48 +0800 Subject: [PATCH 078/115] chore: fix typo (#575) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - fix typo ## Why ❔ - fix typo ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov --- .../lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs index 0b6c7ebcfa8..0ab697f626f 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs @@ -91,7 +91,7 @@ pub(crate) fn get_debug_log( let msg = String::from_utf8(msg).expect("Invalid debug message"); let data = U256::from_big_endian(&data); - // For long data, it is better to use hex-encoding for greater readibility + // For long data, it is better to use hex-encoding for greater readability let data_str = if data > U256::from(u64::max_value()) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); From 6e6e118c496c4a693f657da8e841a0bdf55b35db Mon Sep 17 00:00:00 2001 From: Todd <148772493+toddfil@users.noreply.github.com> Date: Sat, 2 Dec 2023 05:40:08 +0800 Subject: [PATCH 079/115] chore: fix typos in document (#577) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - fixed typo ## Why ❔ fix typos in document ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov --- .../multivm/src/versions/vm_virtual_blocks/utils/overhead.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs index 59b54888ee1..c17d619b464 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs @@ -23,7 +23,7 @@ pub fn derive_overhead( let gas_limit = U256::from(gas_limit); let encoded_len = U256::from(encoded_len); - // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // The MAX_TX_ERGS_LIMIT is formed in a way that may fulfills a single-instance circuits // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance // circuits. let overhead_for_single_instance_circuits = From c594323054f5243ca08d6db3f59e007f14e30f4f Mon Sep 17 00:00:00 2001 From: Santala <31094102+tranhoaison@users.noreply.github.com> Date: Sat, 2 Dec 2023 06:07:21 +0700 Subject: [PATCH 080/115] chore: Fix typos (#567) Hi, I have just resolve conflict #432 Co-authored-by: Igor Aleksanov --- core/lib/multivm/src/tracers/validator/mod.rs | 2 +- .../lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs | 2 +- .../src/versions/vm_1_3_2/oracles/tracer/validation.rs | 2 +- core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs | 2 +- core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs | 2 +- core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs | 2 +- core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs | 2 +- .../multivm/src/versions/vm_latest/bootloader_state/utils.rs | 2 +- .../multivm/src/versions/vm_latest/tests/require_eip712.rs | 2 +- core/lib/multivm/src/versions/vm_latest/tracers/utils.rs | 2 +- core/lib/multivm/src/versions/vm_latest/utils/overhead.rs | 2 +- core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs | 4 ++-- core/lib/multivm/src/versions/vm_m5/transaction_data.rs | 2 +- core/lib/multivm/src/versions/vm_m5/vm_instance.rs | 2 +- core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs | 2 +- .../multivm/src/versions/vm_m6/oracles/tracer/validation.rs | 2 +- core/lib/multivm/src/versions/vm_m6/transaction_data.rs | 2 +- core/lib/multivm/src/versions/vm_m6/vm_instance.rs | 2 +- core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs | 2 +- .../versions/vm_refunds_enhancement/bootloader_state/utils.rs | 2 +- .../versions/vm_refunds_enhancement/tests/require_eip712.rs | 2 +- .../src/versions/vm_refunds_enhancement/tracers/utils.rs | 2 +- .../src/versions/vm_refunds_enhancement/utils/overhead.rs | 2 +- .../src/versions/vm_virtual_blocks/bootloader_state/utils.rs | 2 +- .../src/versions/vm_virtual_blocks/tests/require_eip712.rs | 2 +- .../contracts/custom-account/custom-paymaster.sol | 2 +- etc/env/base/rust.toml | 2 +- etc/env/ext-node-docker.toml | 2 +- etc/env/ext-node.toml | 2 +- 29 files changed, 30 insertions(+), 30 deletions(-) diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index 26d3b0ad926..718edf1a964 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -104,7 +104,7 @@ impl ValidationTracer { return true; } - // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transfering ETH + // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transferring ETH // that is safe for the DDoS protection rules. if valid_eth_token_call(address, msg_sender) { return true; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs index 9c9e87c065d..3b3b99991ed 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs @@ -84,7 +84,7 @@ pub(crate) fn get_debug_log( let msg = String::from_utf8(msg).expect("Invalid debug message"); let data = U256::from_big_endian(&data); - // For long data, it is better to use hex-encoding for greater readibility + // For long data, it is better to use hex-encoding for greater readability let data_str = if data > U256::from(u64::max_value()) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs index ee1587df3b0..c9ee54f35ba 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs @@ -223,7 +223,7 @@ impl ValidationTracer { return true; } - // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transfering ETH + // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transferring ETH // that is safe for the DDoS protection rules. if valid_eth_token_call(address, msg_sender) { return true; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs index da9087afedd..b42c17363b0 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/tests/bootloader.rs @@ -2087,7 +2087,7 @@ // vm_test_env.get_eth_balance(&beneficiary), // U256::from(888000088) // ); -// // Make sure that the tokens were transfered from the AA account. +// // Make sure that the tokens were transferred from the AA account. // assert_eq!( // private_account_balance, // vm_test_env.get_eth_balance(&private_address) diff --git a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs index 2d9dd1cb7aa..d3a96dc06a7 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs @@ -242,7 +242,7 @@ pub fn derive_overhead( let gas_limit = U256::from(gas_limit); let encoded_len = U256::from(encoded_len); - // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // The MAX_TX_ERGS_LIMIT is formed in a way that may fulfills a single-instance circuits // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance // circuits. let overhead_for_single_instance_circuits = diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs index 3e157e74c02..8b7c416522e 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs @@ -85,7 +85,7 @@ pub struct VmExecutionResult { pub l2_to_l1_logs: Vec, pub return_data: Vec, - /// Value denoting the amount of gas spent withing VM invocation. + /// Value denoting the amount of gas spent within VM invocation. /// Note that return value represents the difference between the amount of gas /// available to VM before and after execution. /// diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index d5d3cba4a23..c2ff035c669 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -589,7 +589,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( let encoding_length = encoded_tx.len(); memory.extend((tx_description_offset..tx_description_offset + encoding_length).zip(encoded_tx)); - // Note, +1 is moving for poitner + // Note, +1 is moving for pointer let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 78b98f0a404..7e76f3faeff 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -71,7 +71,7 @@ pub(super) fn apply_tx_to_memory( }; apply_l2_block(memory, &bootloader_l2_block, tx_index); - // Note, +1 is moving for poitner + // Note, +1 is moving for pointer let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + compressed_bytecodes_size; let encoded_compressed_bytecodes = diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index ad1d405a075..1ad6f351206 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -109,7 +109,7 @@ async fn test_require_eip712() { vm.get_eth_balance(beneficiary.address), U256::from(888000088) ); - // Make sure that the tokens were transfered from the AA account. + // Make sure that the tokens were transferred from the AA account. assert_eq!( private_account_balance, vm.get_eth_balance(private_account.address) diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index c91d2f3ce0c..52ff84f8c3c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -94,7 +94,7 @@ pub(crate) fn get_debug_log( let msg = String::from_utf8(msg).expect("Invalid debug message"); let data = U256::from_big_endian(&data); - // For long data, it is better to use hex-encoding for greater readibility + // For long data, it is better to use hex-encoding for greater readability let data_str = if data > U256::from(u64::max_value()) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); diff --git a/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs b/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs index a4012e540ed..c977267db8f 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs @@ -23,7 +23,7 @@ pub fn derive_overhead( let gas_limit = U256::from(gas_limit); let encoded_len = U256::from(encoded_len); - // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // The MAX_TX_ERGS_LIMIT is formed in a way that may fulfills a single-instance circuits // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance // circuits. let overhead_for_single_instance_circuits = diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs index 96ba04e85aa..a9e3c32786a 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs @@ -306,7 +306,7 @@ impl ValidationTracer { return true; } - // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transfering ETH + // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transferring ETH // that is safe for the DDoS protection rules. if valid_eth_token_call(address, msg_sender) { return true; @@ -801,7 +801,7 @@ fn get_debug_log(state: &VmLocalStateData<'_>, memory: &SimpleMemory) -> String let msg = String::from_utf8(msg).expect("Invalid debug message"); let data = U256::from_big_endian(&data); - // For long data, it is better to use hex-encoding for greater readibility + // For long data, it is better to use hex-encoding for greater readability let data_str = if data > U256::from(u64::max_value()) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); diff --git a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs index b749ff09275..819f22a5324 100644 --- a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs @@ -225,7 +225,7 @@ pub fn derive_overhead(gas_limit: u32, gas_price_per_pubdata: u32, encoded_len: let gas_price_per_pubdata = U256::from(gas_price_per_pubdata); let encoded_len = U256::from(encoded_len); - // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // The MAX_TX_ERGS_LIMIT is formed in a way that may fulfills a single-instance circuits // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance // circuits. let overhead_for_single_instance_circuits = diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index e92305003c7..5638ed1c023 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -103,7 +103,7 @@ pub struct VmExecutionResult { pub l2_to_l1_logs: Vec, pub return_data: Vec, - /// Value denoting the amount of gas spent withing VM invocation. + /// Value denoting the amount of gas spent within VM invocation. /// Note that return value represents the difference between the amount of gas /// available to VM before and after execution. /// diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs index 87aa81d69db..b256575726a 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs @@ -84,7 +84,7 @@ pub(crate) fn get_debug_log( let msg = String::from_utf8(msg).expect("Invalid debug message"); let data = U256::from_big_endian(&data); - // For long data, it is better to use hex-encoding for greater readibility + // For long data, it is better to use hex-encoding for greater readability let data_str = if data > U256::from(u64::max_value()) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs index 4e55ad4db00..13a0badd442 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs @@ -238,7 +238,7 @@ impl ValidationTracer { return true; } - // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transfering ETH + // The pair of MSG_VALUE_SIMULATOR_ADDRESS & L2_ETH_TOKEN_ADDRESS simulates the behavior of transferring ETH // that is safe for the DDoS protection rules. if valid_eth_token_call(address, msg_sender) { return true; diff --git a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs index bdecb9bf454..6779ce95fc3 100644 --- a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs @@ -243,7 +243,7 @@ pub fn derive_overhead( let gas_limit = U256::from(gas_limit); let encoded_len = U256::from(encoded_len); - // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // The MAX_TX_ERGS_LIMIT is formed in a way that may fulfills a single-instance circuits // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance // circuits. let overhead_for_single_instance_circuits = diff --git a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs index 468dd3fc72d..f15adde2584 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs @@ -103,7 +103,7 @@ pub struct VmExecutionResult { pub l2_to_l1_logs: Vec, pub return_data: Vec, - /// Value denoting the amount of gas spent withing VM invocation. + /// Value denoting the amount of gas spent within VM invocation. /// Note that return value represents the difference between the amount of gas /// available to VM before and after execution. /// diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 306c0ffc6de..998f41275b4 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -766,7 +766,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( let encoding_length = encoded_tx.len(); memory.extend((tx_description_offset..tx_description_offset + encoding_length).zip(encoded_tx)); - // Note, +1 is moving for poitner + // Note, +1 is moving for pointer let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index dbb3fa0dff2..fed5108d7f3 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -69,7 +69,7 @@ pub(super) fn apply_tx_to_memory( }; apply_l2_block(memory, &bootloader_l2_block, tx_index); - // Note, +1 is moving for poitner + // Note, +1 is moving for pointer let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + compressed_bytecodes_size; let encoded_compressed_bytecodes = diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs index 253a3463c53..03a704841b0 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs @@ -109,7 +109,7 @@ async fn test_require_eip712() { vm.get_eth_balance(beneficiary.address), U256::from(888000088) ); - // Make sure that the tokens were transfered from the AA account. + // Make sure that the tokens were transferred from the AA account. assert_eq!( private_account_balance, vm.get_eth_balance(private_account.address) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs index a9170c5a442..3026afea007 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs @@ -94,7 +94,7 @@ pub(crate) fn get_debug_log( let msg = String::from_utf8(msg).expect("Invalid debug message"); let data = U256::from_big_endian(&data); - // For long data, it is better to use hex-encoding for greater readibility + // For long data, it is better to use hex-encoding for greater readability let data_str = if data > U256::from(u64::max_value()) { let mut bytes = [0u8; 32]; data.to_big_endian(&mut bytes); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs index cce2f2914e3..6c56515cfd7 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs @@ -23,7 +23,7 @@ pub fn derive_overhead( let gas_limit = U256::from(gas_limit); let encoded_len = U256::from(encoded_len); - // The MAX_TX_ERGS_LIMIT is formed in a way that may fullfills a single-instance circuits + // The MAX_TX_ERGS_LIMIT is formed in a way that may fulfills a single-instance circuits // if used in full. That is, within MAX_TX_ERGS_LIMIT it is possible to fully saturate all the single-instance // circuits. let overhead_for_single_instance_circuits = diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index ffe0be2f03b..6e836ad201d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -69,7 +69,7 @@ pub(super) fn apply_tx_to_memory( }; apply_l2_block(memory, &bootloader_l2_block, tx_index); - // Note, +1 is moving for poitner + // Note, +1 is moving for pointer let compressed_bytecodes_offset = COMPRESSED_BYTECODES_OFFSET + 1 + compressed_bytecodes_size; let encoded_compressed_bytecodes = diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs index 82c1a052792..988841e90ce 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs @@ -107,7 +107,7 @@ async fn test_require_eip712() { vm.get_eth_balance(beneficiary.address), U256::from(888000088) ); - // Make sure that the tokens were transfered from the AA account. + // Make sure that the tokens were transferred from the AA account. assert_eq!( private_account_balance, vm.get_eth_balance(private_account.address) diff --git a/core/tests/ts-integration/contracts/custom-account/custom-paymaster.sol b/core/tests/ts-integration/contracts/custom-account/custom-paymaster.sol index 164aee98518..e55f093cb78 100644 --- a/core/tests/ts-integration/contracts/custom-account/custom-paymaster.sol +++ b/core/tests/ts-integration/contracts/custom-account/custom-paymaster.sol @@ -63,7 +63,7 @@ contract CustomPaymaster is IPaymaster { bool success = _transaction.payToTheBootloader(); require(success, "Failed to transfer funds to the bootloader"); - // For now, refunds are not supported, so we just test the fact that the transfered context is correct + // For now, refunds are not supported, so we just test the fact that the transferred context is correct txCounter += 1; context = abi.encode(txCounter); } else { diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 2842d3bbcaf..4c0ebb6ed05 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -2,7 +2,7 @@ # We don't provide the group name like `[rust]` here, because we don't want # these variables to be prefixed during the compiling. -# `RUST_LOG` environmnet variable for `env_logger` +# `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. RUST_LOG="""\ zksync_core=debug,\ diff --git a/etc/env/ext-node-docker.toml b/etc/env/ext-node-docker.toml index 4db60a3c19d..b14a35ffef1 100644 --- a/etc/env/ext-node-docker.toml +++ b/etc/env/ext-node-docker.toml @@ -33,7 +33,7 @@ bootloader_hash="0x0100038581be3d0e201b3cc45d151ef5cc59eb3a0f146ad44f0f72abf00b5 default_aa_hash="0x0100038dc66b69be75ec31653c64cb931678299b9b659472772b2550b703f41c" [rust] -# `RUST_LOG` environmnet variable for `env_logger` +# `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. log="""\ warn,\ diff --git a/etc/env/ext-node.toml b/etc/env/ext-node.toml index 697580d1938..61f74a87ce0 100644 --- a/etc/env/ext-node.toml +++ b/etc/env/ext-node.toml @@ -33,7 +33,7 @@ bootloader_hash="0x0100038581be3d0e201b3cc45d151ef5cc59eb3a0f146ad44f0f72abf00b5 default_aa_hash="0x0100038dc66b69be75ec31653c64cb931678299b9b659472772b2550b703f41c" [rust] -# `RUST_LOG` environmnet variable for `env_logger` +# `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. log="""\ warn,\ From 19c84ce624d53735133fa3b12c7f980e8c14260d Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 4 Dec 2023 11:51:17 +0200 Subject: [PATCH 081/115] feat: Add metric to CallTracer for calculating maximum depth of the calls (#535) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add metric to CallTracer for calculating maximum depth of the calls ## Why ❔ We need to know what our limits are. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../src/tracers/call_tracer/metrics.rs | 15 ++++++ .../multivm/src/tracers/call_tracer/mod.rs | 46 +++++++++++++++++++ .../src/tracers/call_tracer/vm_latest/mod.rs | 15 ++---- .../call_tracer/vm_refunds_enhancement/mod.rs | 16 +++---- .../call_tracer/vm_virtual_blocks/mod.rs | 15 ++---- 5 files changed, 77 insertions(+), 30 deletions(-) create mode 100644 core/lib/multivm/src/tracers/call_tracer/metrics.rs diff --git a/core/lib/multivm/src/tracers/call_tracer/metrics.rs b/core/lib/multivm/src/tracers/call_tracer/metrics.rs new file mode 100644 index 00000000000..b3d94464f50 --- /dev/null +++ b/core/lib/multivm/src/tracers/call_tracer/metrics.rs @@ -0,0 +1,15 @@ +use vise::{Buckets, Histogram, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "vm_call_tracer")] +pub struct CallMetrics { + /// Maximum call stack depth during the execution of the transaction. + #[metrics(buckets = Buckets::exponential(1.0..=64.0, 2.0))] + pub call_stack_depth: Histogram, + /// Maximum number of near calls during the execution of the transaction. + #[metrics(buckets = Buckets::exponential(1.0..=64.0, 2.0))] + pub max_near_calls: Histogram, +} + +#[vise::register] +pub static CALL_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/multivm/src/tracers/call_tracer/mod.rs b/core/lib/multivm/src/tracers/call_tracer/mod.rs index 90343a53bf6..90f15fb68d4 100644 --- a/core/lib/multivm/src/tracers/call_tracer/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/mod.rs @@ -1,7 +1,9 @@ +use crate::tracers::call_tracer::metrics::CALL_METRICS; use once_cell::sync::OnceCell; use std::sync::Arc; use zksync_types::vm_trace::Call; +mod metrics; pub mod vm_latest; pub mod vm_refunds_enhancement; pub mod vm_virtual_blocks; @@ -10,12 +12,23 @@ pub mod vm_virtual_blocks; pub struct CallTracer { stack: Vec, result: Arc>>, + + max_stack_depth: usize, + max_near_calls: usize, } #[derive(Debug, Clone)] struct FarcallAndNearCallCount { farcall: Call, near_calls_after: usize, + stack_depth_on_prefix: usize, +} + +impl Drop for CallTracer { + fn drop(&mut self) { + CALL_METRICS.call_stack_depth.observe(self.max_stack_depth); + CALL_METRICS.max_near_calls.observe(self.max_near_calls); + } } impl CallTracer { @@ -23,6 +36,8 @@ impl CallTracer { Self { stack: vec![], result, + max_stack_depth: 0, + max_near_calls: 0, } } @@ -38,4 +53,35 @@ impl CallTracer { let cell = self.result.as_ref(); cell.set(result).unwrap(); } + + fn push_call_and_update_stats(&mut self, farcall: Call, near_calls_after: usize) { + let stack_depth = self + .stack + .last() + .map(|x| x.stack_depth_on_prefix) + .unwrap_or(0); + + let depth_on_prefix = stack_depth + 1 + near_calls_after; + + let call = FarcallAndNearCallCount { + farcall, + near_calls_after, + stack_depth_on_prefix: depth_on_prefix, + }; + + self.stack.push(call); + + self.max_stack_depth = self.max_stack_depth.max(depth_on_prefix); + self.max_near_calls = self.max_near_calls.max(near_calls_after); + } + + fn increase_near_call_count(&mut self) { + if let Some(last) = self.stack.last_mut() { + last.near_calls_after += 1; + last.stack_depth_on_prefix += 1; + + self.max_near_calls = self.max_near_calls.max(last.near_calls_after); + self.max_stack_depth = self.max_stack_depth.max(last.stack_depth_on_prefix); + } + } } diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs index 2b6fc144bd4..f5f5c1077d3 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs @@ -15,7 +15,7 @@ use crate::interface::{ tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, VmRevertReason, }; -use crate::tracers::call_tracer::{CallTracer, FarcallAndNearCallCount}; +use crate::tracers::call_tracer::CallTracer; use crate::vm_latest::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}; impl DynTracer> for CallTracer { @@ -28,9 +28,7 @@ impl DynTracer> for CallTracer { ) { match data.opcode.variant.opcode { Opcode::NearCall(_) => { - if let Some(last) = self.stack.last_mut() { - last.near_calls_after += 1; - } + self.increase_near_call_count(); } Opcode::FarCall(far_call) => { // We use parent gas for properly calculating gas used in the trace. @@ -51,10 +49,7 @@ impl DynTracer> for CallTracer { }; self.handle_far_call_op_code_latest(state, memory, &mut current_call); - self.stack.push(FarcallAndNearCallCount { - farcall: current_call, - near_calls_after: 0, - }); + self.push_call_and_update_stats(current_call, 0); } Opcode::Ret(ret_code) => { self.handle_ret_op_code_latest(state, memory, ret_code); @@ -187,7 +182,7 @@ impl CallTracer { if current_call.near_calls_after > 0 { current_call.near_calls_after -= 1; - self.stack.push(current_call); + self.push_call_and_update_stats(current_call.farcall, current_call.near_calls_after); return; } @@ -203,7 +198,7 @@ impl CallTracer { if let Some(parent_call) = self.stack.last_mut() { parent_call.farcall.calls.push(current_call.farcall); } else { - self.stack.push(current_call); + self.push_call_and_update_stats(current_call.farcall, current_call.near_calls_after); } } } diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs index 43dd363dcea..fab4ee0ff0f 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs @@ -15,7 +15,7 @@ use crate::interface::{ tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_3_3::DynTracer, VmRevertReason, }; -use crate::tracers::call_tracer::{CallTracer, FarcallAndNearCallCount}; +use crate::tracers::call_tracer::CallTracer; use crate::vm_refunds_enhancement::{ BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState, }; @@ -30,9 +30,7 @@ impl DynTracer> for CallTracer { ) { match data.opcode.variant.opcode { Opcode::NearCall(_) => { - if let Some(last) = self.stack.last_mut() { - last.near_calls_after += 1; - } + self.increase_near_call_count(); } Opcode::FarCall(far_call) => { // We use parent gas for properly calculating gas used in the trace. @@ -53,10 +51,8 @@ impl DynTracer> for CallTracer { }; self.handle_far_call_op_code_refunds_enhancement(state, memory, &mut current_call); - self.stack.push(FarcallAndNearCallCount { - farcall: current_call, - near_calls_after: 0, - }); + + self.push_call_and_update_stats(current_call, 0); } Opcode::Ret(ret_code) => { self.handle_ret_op_code_refunds_enhancement(state, memory, ret_code); @@ -189,7 +185,7 @@ impl CallTracer { if current_call.near_calls_after > 0 { current_call.near_calls_after -= 1; - self.stack.push(current_call); + self.push_call_and_update_stats(current_call.farcall, current_call.near_calls_after); return; } @@ -205,7 +201,7 @@ impl CallTracer { if let Some(parent_call) = self.stack.last_mut() { parent_call.farcall.calls.push(current_call.farcall); } else { - self.stack.push(current_call); + self.push_call_and_update_stats(current_call.farcall, current_call.near_calls_after); } } } diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs index c78593b40e7..631d4d2081c 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs @@ -12,7 +12,7 @@ use zksync_types::U256; use crate::interface::{ dyn_tracers::vm_1_3_3::DynTracer, VmExecutionResultAndLogs, VmRevertReason, }; -use crate::tracers::call_tracer::{CallTracer, FarcallAndNearCallCount}; +use crate::tracers::call_tracer::CallTracer; use crate::vm_virtual_blocks::{ ExecutionEndTracer, ExecutionProcessing, HistoryMode, SimpleMemory, VmTracer, }; @@ -27,9 +27,7 @@ impl DynTracer> for CallTracer { ) { match data.opcode.variant.opcode { Opcode::NearCall(_) => { - if let Some(last) = self.stack.last_mut() { - last.near_calls_after += 1; - } + self.increase_near_call_count(); } Opcode::FarCall(far_call) => { // We use parent gas for properly calculating gas used in the trace. @@ -50,10 +48,7 @@ impl DynTracer> for CallTracer { }; self.handle_far_call_op_code_virtual_blocks(state, data, memory, &mut current_call); - self.stack.push(FarcallAndNearCallCount { - farcall: current_call, - near_calls_after: 0, - }); + self.push_call_and_update_stats(current_call, 0); } Opcode::Ret(ret_code) => { self.handle_ret_op_code_virtual_blocks(state, data, memory, ret_code); @@ -187,7 +182,7 @@ impl CallTracer { if current_call.near_calls_after > 0 { current_call.near_calls_after -= 1; - self.stack.push(current_call); + self.push_call_and_update_stats(current_call.farcall, current_call.near_calls_after); return; } @@ -203,7 +198,7 @@ impl CallTracer { if let Some(parent_call) = self.stack.last_mut() { parent_call.farcall.calls.push(current_call.farcall); } else { - self.stack.push(current_call); + self.push_call_and_update_stats(current_call.farcall, current_call.near_calls_after); } } } From 58a4e6c4c22bd7f002ede1c6def0dc260706185e Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Mon, 4 Dec 2023 12:06:40 +0200 Subject: [PATCH 082/115] feat: Add various metrics to the Prover subsystems (#541) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ 1. Add various metrics to the Prover subsystems, especially: * oldest block, that wasn't sent to prover(`fri_prover.oldest_unprocessed_block`) * oldest block, that didn't go through basic/leaf/node aggregation levels (`fri_prover.oldest_unprocessed_block_by_round`) * how much time is spent on waiting for available prover to send data to (`prover_fri_witness_vector_generator.prover_waiting_time) * count for attempts to send data to prover (`prover_fri_witness_vector_generator.prover_attempts_count`) 2. Refactor metrics in prover to use vise. ## Why ❔ We have some metric coverage on the prover subsystem, but it's incomplete. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/dal/sqlx-data.json | 74 +++++++++++++++++++ core/lib/dal/src/fri_proof_compressor_dal.rs | 16 ++++ core/lib/dal/src/fri_prover_dal.rs | 22 ++++++ core/lib/dal/src/proof_generation_dal.rs | 32 ++++++++ core/lib/types/src/proofs.rs | 11 +++ .../fri_proof_compressor_queue_monitor.rs | 20 +++++ .../house_keeper/fri_prover_queue_monitor.rs | 39 ++++++++++ .../zksync_core/src/witness_generator/mod.rs | 7 +- prover/Cargo.lock | 16 ++-- prover/circuit_synthesizer/Cargo.toml | 3 +- .../src/circuit_synthesizer.rs | 14 +--- prover/circuit_synthesizer/src/main.rs | 1 + prover/circuit_synthesizer/src/metrics.rs | 14 ++++ prover/proof_fri_compressor/Cargo.toml | 3 +- prover/proof_fri_compressor/src/compressor.rs | 24 +++--- prover/proof_fri_compressor/src/main.rs | 1 + prover/proof_fri_compressor/src/metrics.rs | 16 ++++ prover/prover/Cargo.toml | 3 +- prover/prover/src/main.rs | 1 + prover/prover/src/metrics.rs | 40 ++++++++++ prover/prover/src/prover.rs | 57 ++++---------- prover/prover/src/run.rs | 4 +- .../src/synthesized_circuit_provider.rs | 9 +-- prover/prover_fri/Cargo.toml | 3 +- .../src/gpu_prover_job_processor.rs | 24 +++--- prover/prover_fri/src/lib.rs | 1 + prover/prover_fri/src/main.rs | 2 + prover/prover_fri/src/metrics.rs | 43 +++++++++++ prover/prover_fri/src/prover_job_processor.rs | 41 +++++----- prover/prover_fri/src/socket_listener.rs | 18 ++--- prover/prover_fri/src/utils.rs | 16 ++-- prover/prover_fri_gateway/Cargo.toml | 3 +- .../src/api_data_fetcher.rs | 3 +- prover/prover_fri_gateway/src/main.rs | 1 + prover/prover_fri_gateway/src/metrics.rs | 11 +++ prover/prover_fri_utils/Cargo.toml | 3 +- prover/prover_fri_utils/src/lib.rs | 15 ++-- prover/prover_fri_utils/src/metrics.rs | 36 +++++++++ prover/witness_generator/Cargo.toml | 3 +- .../witness_generator/src/basic_circuits.rs | 31 ++++---- .../witness_generator/src/leaf_aggregation.rs | 35 ++++----- prover/witness_generator/src/lib.rs | 2 + prover/witness_generator/src/main.rs | 10 +-- prover/witness_generator/src/metrics.rs | 33 +++++++++ .../witness_generator/src/node_aggregation.rs | 36 ++++----- prover/witness_generator/src/scheduler.rs | 31 +++----- prover/witness_vector_generator/Cargo.toml | 3 +- .../witness_vector_generator/src/generator.rs | 20 ++--- prover/witness_vector_generator/src/lib.rs | 2 + prover/witness_vector_generator/src/main.rs | 1 + .../witness_vector_generator/src/metrics.rs | 18 +++++ 51 files changed, 623 insertions(+), 249 deletions(-) create mode 100644 prover/circuit_synthesizer/src/metrics.rs create mode 100644 prover/proof_fri_compressor/src/metrics.rs create mode 100644 prover/prover/src/metrics.rs create mode 100644 prover/prover_fri/src/metrics.rs create mode 100644 prover/prover_fri_gateway/src/metrics.rs create mode 100644 prover/prover_fri_utils/src/metrics.rs create mode 100644 prover/witness_generator/src/metrics.rs create mode 100644 prover/witness_vector_generator/src/metrics.rs diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 2c958ce0394..9084adb61cd 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -2744,6 +2744,24 @@ }, "query": "UPDATE transactions\n SET in_mempool = TRUE\n FROM (\n SELECT hash FROM (\n SELECT hash\n FROM transactions\n WHERE miniblock_number IS NULL AND in_mempool = FALSE AND error IS NULL\n AND (is_priority = TRUE OR (max_fee_per_gas >= $2 and gas_per_pubdata_limit >= $3))\n AND tx_format != $4\n ORDER BY is_priority DESC, priority_op_id, received_at\n LIMIT $1\n ) as subquery1\n ORDER BY hash\n ) as subquery2\n WHERE transactions.hash = subquery2.hash\n RETURNING transactions.*" }, + "2cc57497090a97bcb453036f7b5e2139b590699aa1a2df4d6fd2b19e27e06251": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_batch_number FROM proof_generation_details WHERE status <> 'generated' ORDER BY l1_batch_number ASC LIMIT 1" + }, "2e3f116ca05ae70b7c83ac550302194c91f57b69902ff8e42140fde732ae5e6a": { "describe": { "columns": [], @@ -3971,6 +3989,24 @@ }, "query": "VACUUM storage_logs" }, + "4860c1118485da8673963a260ded76eb8e13989936f9ab17e23687a1103132cb": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_batch_number FROM proof_generation_details WHERE status = 'ready_to_be_proven' ORDER BY l1_batch_number ASC LIMIT 1" + }, "4ab8a25620b5400d836e1b847320d4e176629a27e1a6cb0666ab02bb55371769": { "describe": { "columns": [ @@ -8814,6 +8850,26 @@ }, "query": "SELECT MAX(operation_number) as \"max?\" FROM storage_logs WHERE miniblock_number = $1" }, + "a7c7e8f036404d24dc6bfa184a84b92d8f73ca034970481af34b6163e66dc59a": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int2" + ] + } + }, + "query": "\n SELECT l1_batch_number \n FROM prover_jobs_fri \n WHERE status <> 'skipped'\n AND status <> 'successful'\n AND aggregation_round = $1 \n ORDER BY l1_batch_number ASC \n LIMIT 1\n " + }, "a8b32073a67ad77caab11e73a5cac5aa5b5382648ff95d6787a309eb3f64d434": { "describe": { "columns": [], @@ -11541,6 +11597,24 @@ }, "query": "\n UPDATE node_aggregation_witness_jobs\n SET status='queued'\n WHERE l1_batch_number IN\n (SELECT prover_jobs.l1_batch_number\n FROM prover_jobs\n JOIN node_aggregation_witness_jobs nawj ON prover_jobs.l1_batch_number = nawj.l1_batch_number\n WHERE nawj.status = 'waiting_for_proofs'\n AND prover_jobs.status = 'successful'\n AND prover_jobs.aggregation_round = 1\n GROUP BY prover_jobs.l1_batch_number, nawj.number_of_leaf_circuits\n HAVING COUNT(*) = nawj.number_of_leaf_circuits)\n RETURNING l1_batch_number;\n " }, + "f15f0848cfd830ec5d5b479fdcdd36c6a4439495b7680614ac1b0e4d73fb992f": { + "describe": { + "columns": [ + { + "name": "l1_batch_number", + "ordinal": 0, + "type_info": "Int8" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [] + } + }, + "query": "SELECT l1_batch_number FROM proof_compression_jobs_fri WHERE status <> 'successful' ORDER BY l1_batch_number ASC LIMIT 1" + }, "f1defa140e20b9c250d3212602dc259c0a35598c2e69d1c42746a8fab6dd8d3e": { "describe": { "columns": [], diff --git a/core/lib/dal/src/fri_proof_compressor_dal.rs b/core/lib/dal/src/fri_proof_compressor_dal.rs index 97caf76ebce..b7f1d1921e9 100644 --- a/core/lib/dal/src/fri_proof_compressor_dal.rs +++ b/core/lib/dal/src/fri_proof_compressor_dal.rs @@ -206,6 +206,22 @@ impl FriProofCompressorDal<'_, '_> { } } + pub async fn get_oldest_not_compressed_batch(&mut self) -> Option { + let result: Option = sqlx::query!( + "SELECT l1_batch_number \ + FROM proof_compression_jobs_fri \ + WHERE status <> 'successful' \ + ORDER BY l1_batch_number ASC \ + LIMIT 1", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + + result + } + pub async fn requeue_stuck_jobs( &mut self, processing_timeout: Duration, diff --git a/core/lib/dal/src/fri_prover_dal.rs b/core/lib/dal/src/fri_prover_dal.rs index 026cb783dd3..7878cbfa542 100644 --- a/core/lib/dal/src/fri_prover_dal.rs +++ b/core/lib/dal/src/fri_prover_dal.rs @@ -341,6 +341,28 @@ impl FriProverDal<'_, '_> { } } + pub async fn min_unproved_l1_batch_number_for_aggregation_round( + &mut self, + aggregation_round: AggregationRound, + ) -> Option { + sqlx::query!( + r#" + SELECT l1_batch_number + FROM prover_jobs_fri + WHERE status <> 'skipped' + AND status <> 'successful' + AND aggregation_round = $1 + ORDER BY l1_batch_number ASC + LIMIT 1 + "#, + aggregation_round as i16 + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)) + } + pub async fn update_status(&mut self, id: u32, status: &str) { sqlx::query!( "UPDATE prover_jobs_fri \ diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index d5fd3079dc1..22db4463469 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -109,4 +109,36 @@ impl ProofGenerationDal<'_, '_> { .then_some(()) .ok_or(sqlx::Error::RowNotFound) } + + pub async fn get_oldest_unprocessed_batch(&mut self) -> Option { + let result: Option = sqlx::query!( + "SELECT l1_batch_number \ + FROM proof_generation_details \ + WHERE status = 'ready_to_be_proven' \ + ORDER BY l1_batch_number ASC \ + LIMIT 1", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + + result + } + + pub async fn get_oldest_not_generated_batch(&mut self) -> Option { + let result: Option = sqlx::query!( + "SELECT l1_batch_number \ + FROM proof_generation_details \ + WHERE status <> 'generated' \ + ORDER BY l1_batch_number ASC \ + LIMIT 1", + ) + .fetch_optional(self.storage.conn()) + .await + .unwrap() + .map(|row| L1BatchNumber(row.l1_batch_number as u32)); + + result + } } diff --git a/core/lib/types/src/proofs.rs b/core/lib/types/src/proofs.rs index 28d25900231..b28b81b79fb 100644 --- a/core/lib/types/src/proofs.rs +++ b/core/lib/types/src/proofs.rs @@ -98,6 +98,17 @@ impl AggregationRound { } } +impl std::fmt::Display for AggregationRound { + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + formatter.write_str(match self { + Self::BasicCircuits => "basic_circuits", + Self::LeafAggregation => "leaf_aggregation", + Self::NodeAggregation => "node_aggregation", + Self::Scheduler => "scheduler", + }) + } +} + impl FromStr for AggregationRound { type Err = String; diff --git a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs index 7a86dcf905f..769792b6a58 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs @@ -58,6 +58,26 @@ impl PeriodicJob for FriProofCompressorStatsReporter { stats.in_progress as f64, "type" => "in_progress" ); + + let oldest_not_compressed_batch = self + .pool + .access_storage() + .await + .unwrap() + .fri_proof_compressor_dal() + .get_oldest_not_compressed_batch() + .await; + + if let Some(l1_batch_number) = oldest_not_compressed_batch { + metrics::gauge!( + format!( + "prover_fri.{}.oldest_not_compressed_batch", + PROOF_COMPRESSOR_SERVICE_NAME + ), + l1_batch_number.0 as f64 + ); + } + Ok(()) } diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index 9d3264b679e..129f9befbd5 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -82,6 +82,45 @@ impl PeriodicJob for FriProverStatsReporter { "circuit_id" => circuit_id.to_string(), "aggregation_round" => aggregation_round.to_string()); } + + // FIXME: refactor metrics here + + if let Some(l1_batch_number) = conn + .proof_generation_dal() + .get_oldest_unprocessed_batch() + .await + { + metrics::gauge!( + "fri_prover.oldest_unprocessed_batch", + l1_batch_number.0 as f64 + ) + } + + if let Some(l1_batch_number) = conn + .proof_generation_dal() + .get_oldest_not_generated_batch() + .await + { + metrics::gauge!( + "fri_prover.oldest_not_generated_batch", + l1_batch_number.0 as f64 + ) + } + + for aggregation_round in 0..2 { + if let Some(l1_batch_number) = conn + .fri_prover_jobs_dal() + .min_unproved_l1_batch_number_for_aggregation_round(aggregation_round.into()) + .await + { + metrics::gauge!( + "fri_prover.oldest_unprocessed_block_by_round", + l1_batch_number.0 as f64, + "aggregation_round" => aggregation_round.to_string() + ) + } + } + Ok(()) } diff --git a/core/lib/zksync_core/src/witness_generator/mod.rs b/core/lib/zksync_core/src/witness_generator/mod.rs index 5dcebadf6a8..18b23866056 100644 --- a/core/lib/zksync_core/src/witness_generator/mod.rs +++ b/core/lib/zksync_core/src/witness_generator/mod.rs @@ -66,12 +66,7 @@ impl From for StageLabel { impl fmt::Display for StageLabel { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str(match self.0 { - AggregationRound::BasicCircuits => "basic_circuits", - AggregationRound::LeafAggregation => "leaf_aggregation", - AggregationRound::NodeAggregation => "node_aggregation", - AggregationRound::Scheduler => "scheduler", - }) + self.0.fmt(formatter) } } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d27b787084f..823b426d4cc 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7035,13 +7035,13 @@ dependencies = [ "ctrlc", "futures 0.3.29", "local-ip-address", - "metrics", "prometheus_exporter", "prover-service", "structopt", "thiserror", "tokio", "tracing", + "vise", "vlog", "zkevm_test_harness 1.3.3", "zksync_config", @@ -7279,11 +7279,11 @@ dependencies = [ "bincode", "ctrlc", "futures 0.3.29", - "metrics", "prometheus_exporter", "structopt", "tokio", "tracing", + "vise", "vk_setup_data_generator_server_fri", "vlog", "zkevm_test_harness 1.4.0", @@ -7345,7 +7345,6 @@ dependencies = [ "futures 0.3.29", "hex", "local-ip-address", - "metrics", "prometheus_exporter", "prover-service", "queues", @@ -7356,6 +7355,7 @@ dependencies = [ "thiserror", "tokio", "tracing", + "vise", "vlog", "zkevm_test_harness 1.3.3", "zksync_circuit_breaker", @@ -7380,12 +7380,12 @@ dependencies = [ "ctrlc", "futures 0.3.29", "local-ip-address", - "metrics", "prometheus_exporter", "serde", "shivini", "tokio", "tracing", + "vise", "vk_setup_data_generator_server_fri", "vlog", "zkevm_test_harness 1.4.0", @@ -7410,12 +7410,12 @@ dependencies = [ "ctrlc", "futures 0.3.29", "log", - "metrics", "prometheus_exporter", "reqwest", "serde", "tokio", "tracing", + "vise", "vlog", "zksync_config", "zksync_dal", @@ -7439,9 +7439,9 @@ dependencies = [ name = "zksync_prover_fri_utils" version = "0.1.0" dependencies = [ - "metrics", "serde", "tracing", + "vise", "zksync_config", "zksync_dal", "zksync_object_store", @@ -7610,7 +7610,6 @@ dependencies = [ "ctrlc", "futures 0.3.29", "hex", - "metrics", "multivm", "prometheus_exporter", "rand 0.8.5", @@ -7619,6 +7618,7 @@ dependencies = [ "structopt", "tokio", "tracing", + "vise", "vk_setup_data_generator_server_fri", "vlog", "zk_evm 1.4.0", @@ -7646,13 +7646,13 @@ dependencies = [ "bincode", "ctrlc", "futures 0.3.29", - "metrics", "prometheus_exporter", "queues", "serde", "structopt", "tokio", "tracing", + "vise", "vk_setup_data_generator_server_fri", "vlog", "zksync_config", diff --git a/prover/circuit_synthesizer/Cargo.toml b/prover/circuit_synthesizer/Cargo.toml index d8de49fb765..c6f70bd5c3b 100644 --- a/prover/circuit_synthesizer/Cargo.toml +++ b/prover/circuit_synthesizer/Cargo.toml @@ -8,6 +8,8 @@ name = "zksync_circuit_synthesizer" path = "src/main.rs" [dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } + zksync_dal = { path = "../../core/lib/dal" } zksync_types = { path = "../../core/lib/types" } zksync_queued_job_processor = { path = "../../core/lib/queued_job_processor" } @@ -34,5 +36,4 @@ tokio = { version = "1.23.0", features = ["full"] } futures = "0.3" ctrlc = { version = "3.1", features = ["termination"] } local-ip-address = "0.5.0" -metrics = "0.21" tracing = "0.1" diff --git a/prover/circuit_synthesizer/src/circuit_synthesizer.rs b/prover/circuit_synthesizer/src/circuit_synthesizer.rs index 1d68138cc60..55da03949a7 100644 --- a/prover/circuit_synthesizer/src/circuit_synthesizer.rs +++ b/prover/circuit_synthesizer/src/circuit_synthesizer.rs @@ -13,6 +13,7 @@ use zkevm_test_harness::bellman::plonk::better_better_cs::cs::Circuit; use zkevm_test_harness::pairing::bn256::Bn256; use zkevm_test_harness::witness::oracle::VmWitnessOracle; +use crate::metrics::METRICS; use zksync_config::configs::prover_group::ProverGroupConfig; use zksync_config::configs::CircuitSynthesizerConfig; use zksync_config::ProverConfigs; @@ -116,11 +117,7 @@ impl CircuitSynthesizer { "Finished circuit synthesis for circuit: {circuit_type} took {:?}", start_instant.elapsed() ); - metrics::histogram!( - "server.circuit_synthesizer.synthesize", - start_instant.elapsed(), - "circuit_type" => circuit_type, - ); + METRICS.synthesize[&circuit_type].observe(start_instant.elapsed()); // we don't perform assembly finalization here since it increases the assembly size significantly due to padding. Ok((assembly, circuit.numeric_circuit_type())) @@ -302,11 +299,8 @@ async fn handle_send_result( "Sent assembly of size: {blob_size_in_gb}GB successfully, took: {elapsed:?} \ for job: {job_id} by: {local_ip:?} to: {address:?}" ); - metrics::histogram!( - "server.circuit_synthesizer.blob_sending_time", - *elapsed, - "blob_size_in_gb" => blob_size_in_gb.to_string(), - ); + + METRICS.blob_sending_time[&blob_size_in_gb].observe(*elapsed); // endregion diff --git a/prover/circuit_synthesizer/src/main.rs b/prover/circuit_synthesizer/src/main.rs index 5789854e97a..5592885dcdd 100644 --- a/prover/circuit_synthesizer/src/main.rs +++ b/prover/circuit_synthesizer/src/main.rs @@ -16,6 +16,7 @@ use zksync_verification_key_server::get_cached_commitments; use crate::circuit_synthesizer::CircuitSynthesizer; mod circuit_synthesizer; +mod metrics; #[derive(Debug, StructOpt)] #[structopt(name = "TODO", about = "TODO")] diff --git a/prover/circuit_synthesizer/src/metrics.rs b/prover/circuit_synthesizer/src/metrics.rs new file mode 100644 index 00000000000..b9ee5b10c15 --- /dev/null +++ b/prover/circuit_synthesizer/src/metrics.rs @@ -0,0 +1,14 @@ +use std::time::Duration; +use vise::{Buckets, Histogram, LabeledFamily, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_circuit_synthesizer")] +pub(crate) struct CircuitSynthesizerMetrics { + #[metrics(buckets = Buckets::LATENCIES, labels = ["blob_size_in_gb"])] + pub blob_sending_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub synthesize: LabeledFamily<&'static str, Histogram>, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/prover/proof_fri_compressor/Cargo.toml b/prover/proof_fri_compressor/Cargo.toml index 45ffe756e19..80ee3789806 100644 --- a/prover/proof_fri_compressor/Cargo.toml +++ b/prover/proof_fri_compressor/Cargo.toml @@ -6,6 +6,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } + zksync_types = { path = "../../core/lib/types" } zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } @@ -27,6 +29,5 @@ structopt = "0.3.26" tokio = { version = "1", features = ["time"] } futures = { version = "0.3", features = ["compat"] } ctrlc = { version = "3.1", features = ["termination"] } -metrics = "0.21" async-trait = "0.1" bincode = "1.0" diff --git a/prover/proof_fri_compressor/src/compressor.rs b/prover/proof_fri_compressor/src/compressor.rs index 9da71c83eac..f0f8efc6102 100644 --- a/prover/proof_fri_compressor/src/compressor.rs +++ b/prover/proof_fri_compressor/src/compressor.rs @@ -3,6 +3,7 @@ use async_trait::async_trait; use std::time::Instant; use tokio::task::JoinHandle; +use crate::metrics::METRICS; use zkevm_test_harness::proof_wrapper_utils::{wrap_proof, WrapperConfig}; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; @@ -124,13 +125,13 @@ impl JobProcessor for ProofCompressor { "Started proof compression for L1 batch: {:?}", l1_batch_number ); - let started_at = Instant::now(); + let observer = METRICS.blob_fetch_time.start(); + let fri_proof: FriProofWrapper = self.blob_store.get(fri_proof_id) .await.with_context(|| format!("Failed to get fri proof from blob store for {l1_batch_number} with id {fri_proof_id}"))?; - metrics::histogram!( - "prover_fri.proof_fri_compressor.blob_fetch_time", - started_at.elapsed(), - ); + + observer.observe(); + let scheduler_proof = match fri_proof { FriProofWrapper::Base(_) => anyhow::bail!("Must be a scheduler proof not base layer"), FriProofWrapper::Recursive(proof) => proof, @@ -166,10 +167,7 @@ impl JobProcessor for ProofCompressor { started_at: Instant, artifacts: Proof>>, ) -> anyhow::Result<()> { - metrics::histogram!( - "prover_fri.proof_fri_compressor.compression_time", - started_at.elapsed(), - ); + METRICS.compression_time.observe(started_at.elapsed()); tracing::info!( "Finished fri proof compression for job: {job_id} took: {:?}", started_at.elapsed() @@ -192,10 +190,10 @@ impl JobProcessor for ProofCompressor { .put(job_id, &l1_batch_proof) .await .context("Failed to save converted l1_batch_proof")?; - metrics::histogram!( - "prover_fri.proof_fri_compressor.blob_save_time", - blob_save_started_at.elapsed(), - ); + METRICS + .blob_save_time + .observe(blob_save_started_at.elapsed()); + self.pool .access_storage() .await diff --git a/prover/proof_fri_compressor/src/main.rs b/prover/proof_fri_compressor/src/main.rs index b5b5ba672c5..c8396803339 100644 --- a/prover/proof_fri_compressor/src/main.rs +++ b/prover/proof_fri_compressor/src/main.rs @@ -16,6 +16,7 @@ use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::compressor::ProofCompressor; mod compressor; +mod metrics; #[derive(Debug, StructOpt)] #[structopt( diff --git a/prover/proof_fri_compressor/src/metrics.rs b/prover/proof_fri_compressor/src/metrics.rs new file mode 100644 index 00000000000..5891da2f416 --- /dev/null +++ b/prover/proof_fri_compressor/src/metrics.rs @@ -0,0 +1,16 @@ +use std::time::Duration; +use vise::{Buckets, Histogram, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_fri_proof_fri_compressor")] +pub(crate) struct ProofFriCompressorMetrics { + #[metrics(buckets = Buckets::LATENCIES)] + pub blob_fetch_time: Histogram, + #[metrics(buckets = Buckets::LATENCIES)] + pub compression_time: Histogram, + #[metrics(buckets = Buckets::LATENCIES)] + pub blob_save_time: Histogram, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/prover/prover/Cargo.toml b/prover/prover/Cargo.toml index d0b66f3e3f7..03adce72daf 100644 --- a/prover/prover/Cargo.toml +++ b/prover/prover/Cargo.toml @@ -11,6 +11,8 @@ categories = ["cryptography"] publish = false # We don't want to publish our binaries. [dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } + zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } @@ -43,7 +45,6 @@ thiserror = "1.0" chrono = "0.4" serde_json = "1.0" ethabi = "18.0.0" -metrics = "0.21" hex = "0.4" serde = { version = "1.0", features = ["derive"] } bincode = "1.3.2" diff --git a/prover/prover/src/main.rs b/prover/prover/src/main.rs index 2c03c2f070d..56ac77336c2 100644 --- a/prover/prover/src/main.rs +++ b/prover/prover/src/main.rs @@ -2,6 +2,7 @@ #[cfg(feature = "gpu")] mod artifact_provider; +mod metrics; #[cfg(feature = "gpu")] mod prover; #[cfg(feature = "gpu")] diff --git a/prover/prover/src/metrics.rs b/prover/prover/src/metrics.rs new file mode 100644 index 00000000000..4544ae9bfa7 --- /dev/null +++ b/prover/prover/src/metrics.rs @@ -0,0 +1,40 @@ +use std::time::Duration; +use vise::{Buckets, Counter, Histogram, LabeledFamily, Metrics}; + +const PROVER_LATENCY_BUCKETS: Buckets = Buckets::values(&[ + 1.0, 10.0, 20.0, 40.0, 60.0, 120.0, 240.0, 360.0, 600.0, 1800.0, 3600.0, +]); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover")] +pub(crate) struct ProverMetrics { + #[metrics(buckets = PROVER_LATENCY_BUCKETS, labels = ["circuit_type"])] + pub proof_generation_time: LabeledFamily>, + #[metrics(buckets = PROVER_LATENCY_BUCKETS, labels = ["circuit_type"])] + pub circuit_synthesis_time: LabeledFamily>, + #[metrics(buckets = PROVER_LATENCY_BUCKETS, labels = ["circuit_type"])] + pub assembly_finalize_time: LabeledFamily>, + #[metrics(buckets = PROVER_LATENCY_BUCKETS, labels = ["circuit_type"])] + pub assembly_encoding_time: LabeledFamily>, + #[metrics(buckets = PROVER_LATENCY_BUCKETS, labels = ["circuit_type"])] + pub assembly_decoding_time: LabeledFamily>, + #[metrics(buckets = PROVER_LATENCY_BUCKETS, labels = ["circuit_type"])] + pub assembly_transferring_time: LabeledFamily>, + #[metrics(buckets = PROVER_LATENCY_BUCKETS, labels = ["circuit_type"])] + pub setup_load_time: LabeledFamily>, + #[metrics(labels = ["circuit_type"])] + pub setup_loading_cache_miss: LabeledFamily, + #[metrics(buckets = PROVER_LATENCY_BUCKETS)] + pub prover_wait_idle_time: Histogram, + #[metrics(buckets = PROVER_LATENCY_BUCKETS)] + pub setup_load_wait_idle_time: Histogram, + #[metrics(buckets = PROVER_LATENCY_BUCKETS)] + pub scheduler_wait_idle_time: Histogram, + #[metrics(buckets = PROVER_LATENCY_BUCKETS)] + pub download_time: Histogram, + #[metrics(buckets = PROVER_LATENCY_BUCKETS, labels = ["queue_capacity"])] + pub queue_free_slots: LabeledFamily>, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/prover/prover/src/prover.rs b/prover/prover/src/prover.rs index 25e46938a1d..1885d815332 100644 --- a/prover/prover/src/prover.rs +++ b/prover/prover/src/prover.rs @@ -9,6 +9,7 @@ use tokio::runtime::Handle; use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncProof; use zkevm_test_harness::pairing::bn256::Bn256; +use crate::metrics::METRICS; use zksync_config::{PostgresConfig, ProverConfig}; use zksync_dal::ConnectionPool; use zksync_dal::StorageProcessor; @@ -64,11 +65,9 @@ impl ProverReporter { serialized.len() >> 10, duration, ); - metrics::histogram!( - "server.prover.proof_generation_time", - duration, - "circuit_type" => circuit_type, - ); + + METRICS.proof_generation_time[&circuit_type].observe(duration); + let job_id = job_id as u32; self.rt_handle.block_on(async { let mut connection = self.pool.access_storage().await.unwrap(); @@ -187,11 +186,7 @@ impl JobReporter for ProverReporter { circuit_type, duration, ); - metrics::histogram!( - "server.prover.circuit_synthesis_time", - duration, - "circuit_type" => circuit_type, - ); + METRICS.circuit_synthesis_time[&circuit_type].observe(duration); } JobResult::AssemblyFinalized(job_id, duration) => { @@ -202,11 +197,7 @@ impl JobReporter for ProverReporter { circuit_type, duration, ); - metrics::histogram!( - "server.prover.assembly_finalize_time", - duration, - "circuit_type" => circuit_type, - ); + METRICS.assembly_finalize_time[&circuit_type].observe(duration); } JobResult::SetupLoaded(job_id, duration, cache_miss) => { @@ -219,16 +210,8 @@ impl JobReporter for ProverReporter { duration, cache_miss ); - metrics::histogram!( - "server.prover.setup_load_time", - duration, - "circuit_type" => circuit_type.clone() - ); - metrics::counter!( - "server.prover.setup_loading_cache_miss", - 1, - "circuit_type" => circuit_type - ); + METRICS.setup_load_time[&circuit_type].observe(duration); + METRICS.setup_loading_cache_miss[&circuit_type].inc(); } JobResult::AssemblyEncoded(job_id, duration) => { @@ -239,11 +222,7 @@ impl JobReporter for ProverReporter { circuit_type, duration, ); - metrics::histogram!( - "server.prover.assembly_encoding_time", - duration, - "circuit_type" => circuit_type, - ); + METRICS.assembly_encoding_time[&circuit_type].observe(duration); } JobResult::AssemblyDecoded(job_id, duration) => { @@ -254,11 +233,7 @@ impl JobReporter for ProverReporter { circuit_type, duration, ); - metrics::histogram!( - "server.prover.assembly_decoding_time", - duration, - "circuit_type" => circuit_type, - ); + METRICS.assembly_decoding_time[&circuit_type].observe(duration); } JobResult::FailureWithDebugging(job_id, circuit_id, assembly, error) => { @@ -285,11 +260,7 @@ impl JobReporter for ProverReporter { circuit_type, duration, ); - metrics::histogram!( - "server.prover.assembly_transferring_time", - duration, - "circuit_type" => circuit_type, - ); + METRICS.assembly_transferring_time[&circuit_type].observe(duration); } JobResult::ProverWaitedIdle(prover_id, duration) => { @@ -298,17 +269,17 @@ impl JobReporter for ProverReporter { duration, prover_id ); - metrics::histogram!("server.prover.prover_wait_idle_time", duration,); + METRICS.prover_wait_idle_time.observe(duration); } JobResult::SetupLoaderWaitedIdle(duration) => { tracing::trace!("Setup load wait idle time: {:?}", duration); - metrics::histogram!("server.prover.setup_load_wait_wait_idle_time", duration,); + METRICS.setup_load_wait_idle_time.observe(duration); } JobResult::SchedulerWaitedIdle(duration) => { tracing::trace!("Scheduler wait idle time: {:?}", duration); - metrics::histogram!("server.prover.scheduler_wait_idle_time", duration,); + METRICS.scheduler_wait_idle_time.observe(duration); } } } diff --git a/prover/prover/src/run.rs b/prover/prover/src/run.rs index d54e8a873a7..9342cd554e6 100644 --- a/prover/prover/src/run.rs +++ b/prover/prover/src/run.rs @@ -20,6 +20,7 @@ use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; use zksync_utils::wait_for_tasks::wait_for_tasks; use crate::artifact_provider::ProverArtifactProvider; +use crate::metrics::METRICS; use crate::prover::ProverReporter; use crate::prover_params::ProverParams; use crate::socket_listener::incoming_socket_listener; @@ -148,8 +149,7 @@ pub async fn run() -> anyhow::Result<()> { &prover_config.initial_setup_key_path, &prover_config.key_download_url, ); - metrics::histogram!("server.prover.download_time", started_at.elapsed()); - + METRICS.download_time.observe(started_at.elapsed()); env::set_var("CRS_FILE", prover_config.initial_setup_key_path.clone()); // We don't have a graceful shutdown process for the prover, so `_stop_sender` is unused. // Though we still need to create a channel because circuit breaker expects `stop_receiver`. diff --git a/prover/prover/src/synthesized_circuit_provider.rs b/prover/prover/src/synthesized_circuit_provider.rs index c5016810a0c..3c6939dc6aa 100644 --- a/prover/prover/src/synthesized_circuit_provider.rs +++ b/prover/prover/src/synthesized_circuit_provider.rs @@ -6,6 +6,7 @@ use tokio::sync::Mutex; use prover_service::RemoteSynthesizer; use queues::{Buffer, IsQueue}; +use crate::metrics::METRICS; use tokio::runtime::Handle; use zksync_dal::ConnectionPool; use zksync_types::proofs::SocketAddress; @@ -69,11 +70,9 @@ impl RemoteSynthesizer for SynthesizedCircuitProvider { queue_free_slots, assembly_queue.capacity() ); - metrics::histogram!( - "server.prover.queue_free_slots", - queue_free_slots as f64, - "queue_capacity" => assembly_queue.capacity().to_string() - ); + METRICS.queue_free_slots[&assembly_queue.capacity().to_string()] + .observe(queue_free_slots); + Some(Box::new(Cursor::new(blob))) } Err(_) => None, diff --git a/prover/prover_fri/Cargo.toml b/prover/prover_fri/Cargo.toml index 314f034a9a6..1d641311c22 100644 --- a/prover/prover_fri/Cargo.toml +++ b/prover/prover_fri/Cargo.toml @@ -6,6 +6,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } + zksync_types = { path = "../../core/lib/types" } zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } @@ -34,7 +36,6 @@ tracing = "0.1" tokio = { version = "1", features = ["time"] } futures = { version = "0.3", features = ["compat"] } ctrlc = { version = "3.1", features = ["termination"] } -metrics = "0.21" serde = { version = "1.0", features = ["derive"] } async-trait = "0.1" local-ip-address = "0.5.0" diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/prover_fri/src/gpu_prover_job_processor.rs index ec022d419d4..9d7eda1202a 100644 --- a/prover/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/prover_fri/src/gpu_prover_job_processor.rs @@ -16,6 +16,7 @@ pub mod gpu_prover { use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerProof; use zksync_prover_fri_types::WitnessVectorArtifacts; + use crate::metrics::METRICS; use zksync_config::configs::fri_prover_group::FriProverGroupConfig; use zksync_config::configs::FriProverConfig; use zksync_dal::ConnectionPool; @@ -102,11 +103,10 @@ pub mod gpu_prover { let artifact: GoldilocksGpuProverSetupData = get_setup_data_for_circuit_type(key.clone()) .context("get_setup_data_for_circuit_type()")?; - metrics::histogram!( - "prover_fri.prover.gpu_setup_data_load_time", - started_at.elapsed(), - "circuit_type" => key.circuit_id.to_string(), - ); + + METRICS.gpu_setup_data_load_time[&key.circuit_id.to_string()] + .observe(started_at.elapsed()); + Arc::new(artifact) } }) @@ -161,11 +161,9 @@ pub mod gpu_prover { prover_job.job_id, started_at.elapsed() ); - metrics::histogram!( - "prover_fri.prover.gpu_proof_generation_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string() - ); + METRICS.gpu_proof_generation_time[&circuit_id.to_string()] + .observe(started_at.elapsed()); + let proof = proof.into(); verify_proof( &prover_job.circuit_wrapper, @@ -258,10 +256,8 @@ pub mod gpu_prover { started_at: Instant, artifacts: Self::JobArtifacts, ) -> anyhow::Result<()> { - metrics::histogram!( - "prover_fri.prover.gpu_total_proving_time", - started_at.elapsed(), - ); + METRICS.gpu_total_proving_time.observe(started_at.elapsed()); + let mut storage_processor = self.prover_connection_pool.access_storage().await.unwrap(); save_proof( job_id, diff --git a/prover/prover_fri/src/lib.rs b/prover/prover_fri/src/lib.rs index 5fdb260d40d..8d57083ebd3 100644 --- a/prover/prover_fri/src/lib.rs +++ b/prover/prover_fri/src/lib.rs @@ -1,3 +1,4 @@ #![feature(generic_const_exprs)] +mod metrics; pub mod prover_job_processor; pub mod utils; diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index 14b54350946..ab0994a3648 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -29,6 +29,8 @@ mod prover_job_processor; mod socket_listener; mod utils; +mod metrics; + async fn graceful_shutdown(port: u16) -> anyhow::Result> { let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; let pool = ConnectionPool::singleton(postgres_config.prover_url()?) diff --git a/prover/prover_fri/src/metrics.rs b/prover/prover_fri/src/metrics.rs new file mode 100644 index 00000000000..27ddce54d6c --- /dev/null +++ b/prover/prover_fri/src/metrics.rs @@ -0,0 +1,43 @@ +use std::time::Duration; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, LabeledFamily, Metrics}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] +pub(crate) struct CircuitLabels { + pub circuit_type: u8, + pub layer: Layer, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] +#[metrics(rename_all = "snake_case")] +pub(crate) enum Layer { + Recursive, + Base, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_fri_prover")] +pub(crate) struct ProverFriMetrics { + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub gpu_setup_data_load_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub gpu_proof_generation_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES)] + pub gpu_total_proving_time: Histogram, + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub setup_data_load_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES)] + pub proof_generation_time: Family>, + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub proof_verification_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES)] + pub cpu_total_proving_time: Histogram, + #[metrics(buckets = Buckets::LATENCIES, labels = ["blob_size_in_gb"])] + pub witness_vector_blob_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub gpu_assembly_generation_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub blob_save_time: LabeledFamily>, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/prover/prover_fri/src/prover_job_processor.rs b/prover/prover_fri/src/prover_job_processor.rs index d540771fd14..30beae5bc82 100644 --- a/prover/prover_fri/src/prover_job_processor.rs +++ b/prover/prover_fri/src/prover_job_processor.rs @@ -19,6 +19,7 @@ use zksync_prover_fri_types::circuit_definitions::{ use zkevm_test_harness::prover_utils::{prove_base_layer_circuit, prove_recursion_layer_circuit}; +use crate::metrics::{CircuitLabels, Layer, METRICS}; use zksync_config::configs::fri_prover_group::FriProverGroupConfig; use zksync_config::configs::FriProverConfig; use zksync_dal::ConnectionPool; @@ -90,11 +91,9 @@ impl Prover { let artifact: GoldilocksProverSetupData = get_cpu_setup_data_for_circuit_type(key.clone()) .context("get_cpu_setup_data_for_circuit_type()")?; - metrics::histogram!( - "prover_fri.prover.setup_data_load_time", - started_at.elapsed(), - "circuit_type" => key.circuit_id.to_string(), - ); + METRICS.gpu_setup_data_load_time[&key.circuit_id.to_string()] + .observe(started_at.elapsed()); + Arc::new(artifact) } }) @@ -137,12 +136,13 @@ impl Prover { &artifact.wits_hint, &artifact.finalization_hint, ); - metrics::histogram!( - "prover_fri.prover.proof_generation_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string(), - "layer" => "recursive", - ); + + let label = CircuitLabels { + circuit_type: circuit_id, + layer: Layer::Recursive, + }; + METRICS.proof_generation_time[&label].observe(started_at.elapsed()); + verify_proof( &CircuitWrapper::Recursive(circuit), &proof, @@ -177,12 +177,13 @@ impl Prover { &artifact.wits_hint, &artifact.finalization_hint, ); - metrics::histogram!( - "prover_fri.prover.proof_generation_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string(), - "layer" => "base", - ); + + let label = CircuitLabels { + circuit_type: circuit_id, + layer: Layer::Base, + }; + METRICS.proof_generation_time[&label].observe(started_at.elapsed()); + verify_proof(&CircuitWrapper::Base(circuit), &proof, &artifact.vk, job_id); FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) } @@ -242,10 +243,8 @@ impl JobProcessor for Prover { started_at: Instant, artifacts: Self::JobArtifacts, ) -> anyhow::Result<()> { - metrics::histogram!( - "prover_fri.prover.cpu_total_proving_time", - started_at.elapsed(), - ); + METRICS.cpu_total_proving_time.observe(started_at.elapsed()); + let mut storage_processor = self.prover_connection_pool.access_storage().await.unwrap(); save_proof( job_id, diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/prover_fri/src/socket_listener.rs index 45c5518a1c6..e9ecbd1e60b 100644 --- a/prover/prover_fri/src/socket_listener.rs +++ b/prover/prover_fri/src/socket_listener.rs @@ -12,6 +12,7 @@ pub mod gpu_socket_listener { get_finalization_hints, get_round_for_recursive_circuit_type, }; + use crate::metrics::METRICS; use crate::utils::{GpuProverJob, ProvingAssembly, SharedWitnessVectorQueue}; use anyhow::Context as _; use tokio::sync::watch; @@ -113,11 +114,10 @@ pub mod gpu_socket_listener { file_size_in_gb, started_at.elapsed().as_secs() ); - metrics::histogram!( - "prover_fri.prover_fri.witness_vector_blob_time", - started_at.elapsed(), - "blob_size_in_gb" => file_size_in_gb.to_string(), - ); + + METRICS.witness_vector_blob_time[&(file_size_in_gb as u64)] + .observe(started_at.elapsed()); + let witness_vector = bincode::deserialize::(&assembly) .context("Failed deserializing witness vector")?; let assembly = generate_assembly_for_repeated_proving( @@ -185,11 +185,9 @@ pub mod gpu_socket_listener { job_id, started_at.elapsed() ); - metrics::histogram!( - "prover_fri.prover.gpu_assembly_generation_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string() - ); + + METRICS.gpu_assembly_generation_time[&circuit_id.to_string()].observe(started_at.elapsed()); + Ok(cs) } } diff --git a/prover/prover_fri/src/utils.rs b/prover/prover_fri/src/utils.rs index 5b0f8be04a4..c67ee9149f1 100644 --- a/prover/prover_fri/src/utils.rs +++ b/prover/prover_fri/src/utils.rs @@ -26,6 +26,7 @@ use zksync_prover_fri_types::{ }; use zksync_prover_fri_utils::get_base_layer_circuit_id_for_recursive_layer; +use crate::metrics::METRICS; use zksync_types::{basic_fri_types::CircuitIdRoundTuple, proofs::AggregationRound, L1BatchNumber}; pub type F = GoldilocksField; @@ -94,11 +95,8 @@ pub async fn save_proof( let blob_save_started_at = Instant::now(); let blob_url = blob_store.put(job_id, &proof).await.unwrap(); - metrics::histogram!( - "prover_fri.prover.blob_save_time", - blob_save_started_at.elapsed(), - "circuit_type" => circuit_type.to_string(), - ); + + METRICS.blob_save_time[&circuit_type.to_string()].observe(blob_save_started_at.elapsed()); let mut transaction = storage_processor.start_transaction().await.unwrap(); let job_metadata = transaction @@ -141,11 +139,9 @@ pub fn verify_proof( recursive_circuit.numeric_circuit_type(), ), }; - metrics::histogram!( - "prover_fri.prover.proof_verification_time", - started_at.elapsed(), - "circuit_type" => circuit_id.to_string(), - ); + + METRICS.proof_verification_time[&circuit_id.to_string()].observe(started_at.elapsed()); + if !is_valid { let msg = format!( "Failed to verify base layer proof for job-id: {job_id} circuit_type {circuit_id}" diff --git a/prover/prover_fri_gateway/Cargo.toml b/prover/prover_fri_gateway/Cargo.toml index 24e0a97bae7..198d32e5550 100644 --- a/prover/prover_fri_gateway/Cargo.toml +++ b/prover/prover_fri_gateway/Cargo.toml @@ -4,6 +4,8 @@ version = "0.1.0" edition = "2021" [dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } + zksync_types = { path = "../../core/lib/types" } zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } @@ -21,5 +23,4 @@ ctrlc = { version = "3.1", features = ["termination"] } async-trait = "0.1" futures = { version = "0.3", features = ["compat"] } serde = { version = "1.0", features = ["derive"] } -metrics = "0.21" log = "0.4.20" diff --git a/prover/prover_fri_gateway/src/api_data_fetcher.rs b/prover/prover_fri_gateway/src/api_data_fetcher.rs index a009f1783f2..339a7bec9e6 100644 --- a/prover/prover_fri_gateway/src/api_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/api_data_fetcher.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use crate::metrics::METRICS; use async_trait::async_trait; use reqwest::Client; use serde::{de::DeserializeOwned, Serialize}; @@ -70,7 +71,7 @@ impl PeriodicApiStruct { self.handle_response(job_id, response).await; } Err(err) => { - metrics::counter!("prover_fri.prover_fri_gateway.http_error", 1, "service_name" => Self::SERVICE_NAME); + METRICS.http_error[&Self::SERVICE_NAME].inc(); tracing::error!("HTTP request failed due to error: {}", err); } } diff --git a/prover/prover_fri_gateway/src/main.rs b/prover/prover_fri_gateway/src/main.rs index dd1570989c1..3a3f8b42ae0 100644 --- a/prover/prover_fri_gateway/src/main.rs +++ b/prover/prover_fri_gateway/src/main.rs @@ -12,6 +12,7 @@ use zksync_types::prover_server_api::{ProofGenerationDataRequest, SubmitProofReq use zksync_utils::wait_for_tasks::wait_for_tasks; mod api_data_fetcher; +mod metrics; mod proof_gen_data_fetcher; mod proof_submitter; diff --git a/prover/prover_fri_gateway/src/metrics.rs b/prover/prover_fri_gateway/src/metrics.rs new file mode 100644 index 00000000000..34d10ef9a79 --- /dev/null +++ b/prover/prover_fri_gateway/src/metrics.rs @@ -0,0 +1,11 @@ +use vise::{Counter, LabeledFamily, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_fri_prover_fri_gateway")] +pub(crate) struct ProverFriGatewayMetrics { + #[metrics(labels = ["service_name"])] + pub http_error: LabeledFamily<&'static str, Counter>, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); diff --git a/prover/prover_fri_utils/Cargo.toml b/prover/prover_fri_utils/Cargo.toml index fb2d729800c..b216f6307a7 100644 --- a/prover/prover_fri_utils/Cargo.toml +++ b/prover/prover_fri_utils/Cargo.toml @@ -6,6 +6,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } + zksync_object_store = { path = "../../core/lib/object_store" } zksync_config = { path = "../../core/lib/config" } zksync_types = { path = "../../core/lib/types" } @@ -14,4 +16,3 @@ zksync_dal = { path = "../../core/lib/dal" } tracing = "0.1" serde = { version = "1.0", features = ["derive"] } -metrics = "0.21" diff --git a/prover/prover_fri_utils/src/lib.rs b/prover/prover_fri_utils/src/lib.rs index cbe6570d657..f0edcd07902 100644 --- a/prover/prover_fri_utils/src/lib.rs +++ b/prover/prover_fri_utils/src/lib.rs @@ -11,9 +11,11 @@ use zksync_prover_fri_types::{ get_current_pod_name, CircuitWrapper, ProverJob, ProverServiceDataKey, }; +use crate::metrics::{CircuitLabels, PROVER_FRI_UTILS_METRICS}; use zksync_types::proofs::AggregationRound; use zksync_types::protocol_version::L1VerifierConfig; +pub mod metrics; pub mod socket_utils; pub async fn fetch_next_circuit( @@ -61,12 +63,13 @@ pub async fn fetch_next_circuit( .get(circuit_key) .await .unwrap_or_else(|err| panic!("{err:?}")); - metrics::histogram!( - "prover_fri.prover.blob_fetch_time", - started_at.elapsed(), - "circuit_type" => prover_job.circuit_id.to_string(), - "aggregation_round" => format!("{:?}", prover_job.aggregation_round), - ); + + let label = CircuitLabels { + circuit_type: prover_job.circuit_id, + aggregation_round: prover_job.aggregation_round.into(), + }; + PROVER_FRI_UTILS_METRICS.blob_fetch_time[&label].observe(started_at.elapsed()); + let setup_data_key = ProverServiceDataKey { circuit_id: prover_job.circuit_id, round: prover_job.aggregation_round, diff --git a/prover/prover_fri_utils/src/metrics.rs b/prover/prover_fri_utils/src/metrics.rs new file mode 100644 index 00000000000..767e2c25fc1 --- /dev/null +++ b/prover/prover_fri_utils/src/metrics.rs @@ -0,0 +1,36 @@ +use std::time::Duration; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; +use zksync_types::proofs::AggregationRound; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] +pub struct CircuitLabels { + pub circuit_type: u8, + pub aggregation_round: StageLabel, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", format = "wit_gen_{}")] +pub struct StageLabel(AggregationRound); + +impl From for StageLabel { + fn from(round: AggregationRound) -> Self { + Self(round) + } +} + +impl std::fmt::Display for StageLabel { + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(formatter) + } +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_fri_prover")] +pub(crate) struct ProverFriUtilsMetrics { + #[metrics(buckets = Buckets::LATENCIES)] + pub blob_fetch_time: Family>, +} + +#[vise::register] +pub(crate) static PROVER_FRI_UTILS_METRICS: vise::Global = + vise::Global::new(); diff --git a/prover/witness_generator/Cargo.toml b/prover/witness_generator/Cargo.toml index a8dce0547b2..0d28734a70f 100644 --- a/prover/witness_generator/Cargo.toml +++ b/prover/witness_generator/Cargo.toml @@ -11,6 +11,8 @@ categories = ["cryptography"] publish = false # We don't want to publish our binaries. [dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } + zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } zksync_env_config = { path = "../../core/lib/env_config" } @@ -38,7 +40,6 @@ anyhow = "1.0" tracing = "0.1" tokio = { version = "1", features = ["time"] } futures = { version = "0.3", features = ["compat"] } -metrics = "0.21" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" async-trait = "0.1" diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 645e3672274..963ec034b26 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -23,6 +23,7 @@ use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::blo use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; use zksync_prover_fri_types::{AuxOutputWitnessWrapper, get_current_pod_name}; +use crate::metrics::WITNESS_GENERATOR_METRICS; use crate::storage_oracle::StorageOracle; use multivm::vm_latest::{ constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, @@ -124,7 +125,7 @@ impl BasicWitnessGenerator { // We get value higher than `blocks_proving_percentage` with prob = `1 - blocks_proving_percentage`. // In this case job should be skipped. if threshold > blocks_proving_percentage && !shall_force_process_block { - metrics::counter!("server.witness_generator_fri.skipped_blocks", 1); + WITNESS_GENERATOR_METRICS.skipped_blocks.inc(); tracing::info!( "Skipping witness generation for block {}, blocks_proving_percentage: {}", block_number.0, @@ -146,7 +147,7 @@ impl BasicWitnessGenerator { } } - metrics::counter!("server.witness_generator_fri.sampled_blocks", 1); + WITNESS_GENERATOR_METRICS.sampled_blocks.inc(); tracing::info!( "Starting witness generation of type {:?} for block {}", AggregationRound::BasicCircuits, @@ -196,11 +197,10 @@ impl JobProcessor for BasicWitnessGenerator { ); let started_at = Instant::now(); let job = get_artifacts(block_number, &*self.object_store).await; - metrics::histogram!( - "prover_fri.witness_generation.blob_fetch_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::BasicCircuits), - ); + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + Ok(Some((block_number, job))) } None => Ok(None), @@ -258,11 +258,10 @@ impl JobProcessor for BasicWitnessGenerator { self.config.shall_save_to_public_bucket, ) .await; - metrics::histogram!( - "prover_fri.witness_generation.blob_save_time", - blob_started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::BasicCircuits), - ); + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] + .observe(blob_started_at.elapsed()); + update_database(&self.prover_connection_pool, started_at, job_id, blob_urls).await; Ok(()) } @@ -305,11 +304,9 @@ async fn process_basic_circuits_job( scheduler_witness, aux_output_witness, ) = generate_witness(object_store, config, connection_pool, witness_gen_input).await; - metrics::histogram!( - "prover_fri.witness_generation.witness_generation_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::BasicCircuits), - ); + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + tracing::info!( "Witness generation for block {} is complete in {:?}", block_number.0, diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index e31a44c42aa..d90520a19e6 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -18,6 +18,7 @@ use zksync_vk_setup_data_server_fri::{ get_base_layer_vk_for_circuit_type, get_recursive_layer_vk_for_circuit_type, }; +use crate::metrics::WITNESS_GENERATOR_METRICS; use crate::utils::{ load_proofs_for_job_ids, save_node_aggregations_artifacts, save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, @@ -199,11 +200,10 @@ pub async fn prepare_leaf_aggregation_job( let started_at = Instant::now(); let closed_form_input = get_artifacts(&metadata, object_store).await; let proofs = load_proofs_for_job_ids(&metadata.prover_job_ids_for_proofs, object_store).await; - metrics::histogram!( - "prover_fri.witness_generation.blob_fetch_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::LeafAggregation), - ); + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + let started_at = Instant::now(); let base_vk = get_base_layer_vk_for_circuit_type(metadata.circuit_id) .context("get_base_layer_vk_for_circuit_type()")?; @@ -221,11 +221,10 @@ pub async fn prepare_leaf_aggregation_job( } } let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); - metrics::histogram!( - "prover_fri.witness_generation.prepare_job_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::LeafAggregation), - ); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + Ok(LeafAggregationWitnessGeneratorJob { circuit_id: metadata.circuit_id, block_number: metadata.block_number, @@ -249,11 +248,9 @@ pub fn process_leaf_aggregation_job( let leaf_params = (circuit_id, job.leaf_params); let (aggregations, closed_form_inputs) = create_leaf_witnesses(subsets, job.proofs, job.base_vk, leaf_params); - metrics::histogram!( - "prover_fri.witness_generation.witness_generation_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::LeafAggregation), - ); + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + tracing::info!( "Leaf witness generation for block {} with circuit id {}: is complete in {:?}.", job.block_number.0, @@ -379,11 +376,9 @@ async fn save_artifacts( None, ) .await; - metrics::histogram!( - "prover_fri.witness_generation.blob_save_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::LeafAggregation), - ); + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + BlobUrls { circuit_ids_and_urls, aggregations_urls, diff --git a/prover/witness_generator/src/lib.rs b/prover/witness_generator/src/lib.rs index f7f0fcb2642..567769c07b9 100644 --- a/prover/witness_generator/src/lib.rs +++ b/prover/witness_generator/src/lib.rs @@ -8,5 +8,7 @@ pub mod scheduler; mod storage_oracle; pub mod utils; +pub mod metrics; + #[cfg(test)] mod tests; diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index e6226c40131..a8d0bda48f2 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -19,6 +19,7 @@ use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; use crate::basic_circuits::BasicWitnessGenerator; use crate::leaf_aggregation::LeafAggregationWitnessGenerator; +use crate::metrics::SERVER_METRICS; use crate::node_aggregation::NodeAggregationWitnessGenerator; use crate::scheduler::SchedulerWitnessGenerator; @@ -28,6 +29,9 @@ mod node_aggregation; mod precalculated_merkle_paths_provider; mod scheduler; mod storage_oracle; + +mod metrics; + mod utils; #[derive(Debug, StructOpt)] @@ -225,11 +229,7 @@ async fn main() -> anyhow::Result<()> { round, started_at.elapsed() ); - metrics::gauge!( - "server.init.latency", - started_at.elapsed(), - "stage" => format!("fri_witness_generator_{:?}", round) - ); + SERVER_METRICS.init_latency[&(*round).into()].set(started_at.elapsed()); } let mut stop_signal_receiver = get_stop_signal_receiver(); diff --git a/prover/witness_generator/src/metrics.rs b/prover/witness_generator/src/metrics.rs new file mode 100644 index 00000000000..3bddefc00c4 --- /dev/null +++ b/prover/witness_generator/src/metrics.rs @@ -0,0 +1,33 @@ +use std::time::Duration; +use vise::{Buckets, Counter, Family, Gauge, Histogram, LabeledFamily, Metrics}; +use zksync_prover_fri_utils::metrics::StageLabel; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_fri_witness_generator")] +pub(crate) struct WitnessGeneratorMetrics { + #[metrics(buckets = Buckets::LATENCIES)] + pub blob_fetch_time: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + pub prepare_job_time: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + pub witness_generation_time: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + pub blob_save_time: Family>, + + pub sampled_blocks: Counter, + pub skipped_blocks: Counter, +} + +#[vise::register] +pub(crate) static WITNESS_GENERATOR_METRICS: vise::Global = + vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover")] +pub(crate) struct ServerMetrics { + #[metrics(labels = ["stage"])] + pub init_latency: LabeledFamily>, +} + +#[vise::register] +pub(crate) static SERVER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs index 8349b1e18e9..1ae5a255197 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -16,6 +16,7 @@ use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::lea use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; +use crate::metrics::WITNESS_GENERATOR_METRICS; use crate::utils::{ load_proofs_for_job_ids, save_node_aggregations_artifacts, save_recursive_layer_prover_input_artifacts, AggregationWrapper, @@ -108,11 +109,10 @@ impl NodeAggregationWitnessGenerator { node_vk_commitment, &job.all_leafs_layer_params, ); - metrics::histogram!( - "prover_fri.witness_generation.witness_generation_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::NodeAggregation), - ); + WITNESS_GENERATOR_METRICS.witness_generation_time + [&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); + tracing::info!( "Node witness generation for block {} with circuit id {} at depth {} with {} next_aggregations jobs completed in {:?}.", job.block_number.0, @@ -228,11 +228,10 @@ pub async fn prepare_job( let started_at = Instant::now(); let artifacts = get_artifacts(&metadata, object_store).await; let proofs = load_proofs_for_job_ids(&metadata.prover_job_ids_for_proofs, object_store).await; - metrics::histogram!( - "prover_fri.witness_generation.blob_fetch_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::NodeAggregation), - ); + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); + let started_at = Instant::now(); let leaf_vk = get_recursive_layer_vk_for_circuit_type(metadata.circuit_id) .context("get_recursive_layer_vk_for_circuit_type")?; @@ -254,11 +253,9 @@ pub async fn prepare_job( } } - metrics::histogram!( - "prover_fri.witness_generation.job_preparation_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::NodeAggregation), - ); + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); + Ok(NodeAggregationWitnessGeneratorJob { circuit_id: metadata.circuit_id, block_number: metadata.block_number, @@ -376,11 +373,10 @@ async fn save_artifacts( Some(artifacts.circuit_id), ) .await; - metrics::histogram!( - "prover_fri.witness_generation.blob_save_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::NodeAggregation), - ); + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); + BlobUrls { node_aggregations_url: aggregations_urls, circuit_ids_and_urls, diff --git a/prover/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs index 50950c8e986..5036aa188ec 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -16,6 +16,7 @@ use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::Sch use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; +use crate::metrics::WITNESS_GENERATOR_METRICS; use crate::utils::{load_proofs_for_job_ids, SchedulerPartialInputWrapper}; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; @@ -86,11 +87,8 @@ impl SchedulerWitnessGenerator { transcript_params: (), _marker: std::marker::PhantomData, }; - metrics::histogram!( - "prover_fri.witness_generation.witness_generation_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), - ); + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); tracing::info!( "Scheduler generation for block {} is complete in {:?}", @@ -173,11 +171,8 @@ impl JobProcessor for SchedulerWitnessGenerator { .put(key, &CircuitWrapper::Recursive(artifacts.scheduler_circuit)) .await .unwrap(); - metrics::histogram!( - "prover_fri.witness_generation.blob_save_time", - blob_save_started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), - ); + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] + .observe(blob_save_started_at.elapsed()); let mut prover_connection = self.prover_connection_pool.access_storage().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); @@ -234,11 +229,9 @@ pub async fn prepare_job( ) -> anyhow::Result { let started_at = Instant::now(); let proofs = load_proofs_for_job_ids(&proof_job_ids, object_store).await; - metrics::histogram!( - "prover_fri.witness_generation.blob_fetch_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), - ); + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + let mut recursive_proofs = vec![]; for wrapper in proofs { match wrapper { @@ -270,11 +263,9 @@ pub async fn prepare_job( .try_into() .unwrap(); scheduler_witness.leaf_layer_parameters = leaf_layer_params; - metrics::histogram!( - "prover_fri.witness_generation.prepare_job_time", - started_at.elapsed(), - "aggregation_round" => format!("{:?}", AggregationRound::Scheduler), - ); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); Ok(SchedulerWitnessGeneratorJob { block_number: l1_batch_number, diff --git a/prover/witness_vector_generator/Cargo.toml b/prover/witness_vector_generator/Cargo.toml index e8f94849e30..be5869fb1a4 100644 --- a/prover/witness_vector_generator/Cargo.toml +++ b/prover/witness_vector_generator/Cargo.toml @@ -6,6 +6,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } + zksync_types = { path = "../../core/lib/types" } zksync_dal = { path = "../../core/lib/dal" } zksync_config = { path = "../../core/lib/config" } @@ -26,7 +28,6 @@ structopt = "0.3.26" tokio = { version = "1", features = ["time"] } futures = { version = "0.3", features = ["compat"] } ctrlc = { version = "3.1", features = ["termination"] } -metrics = "0.21" serde = { version = "1.0", features = ["derive"] } async-trait = "0.1" queues = "1.1.0" diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/witness_vector_generator/src/generator.rs index 27e71b6cdec..ee17e0edaa7 100644 --- a/prover/witness_vector_generator/src/generator.rs +++ b/prover/witness_vector_generator/src/generator.rs @@ -4,6 +4,7 @@ use anyhow::Context as _; use async_trait::async_trait; use tokio::task::JoinHandle; +use crate::metrics::METRICS; use tokio::time::sleep; use zksync_config::configs::FriWitnessVectorGeneratorConfig; use zksync_dal::ConnectionPool; @@ -114,11 +115,11 @@ impl JobProcessor for WitnessVectorGenerator { started_at: Instant, artifacts: WitnessVectorArtifacts, ) -> anyhow::Result<()> { - metrics::histogram!( - "prover_fri.witness_vector_generator.gpu_witness_vector_generation_time", - started_at.elapsed(), - "circuit_type" => get_numeric_circuit_id(&artifacts.prover_job.circuit_wrapper).to_string(), - ); + let circuit_type = + get_numeric_circuit_id(&artifacts.prover_job.circuit_wrapper).to_string(); + + METRICS.gpu_witness_vector_generation_time[&circuit_type].observe(started_at.elapsed()); + tracing::info!( "Finished witness vector generation for job: {job_id} in zone: {:?} took: {:?}", self.zone, @@ -150,6 +151,8 @@ impl JobProcessor for WitnessVectorGenerator { handle_send_result(&result, job_id, &address, &self.pool, self.zone.clone()).await; if result.is_ok() { + METRICS.prover_waiting_time[&circuit_type].observe(now.elapsed()); + METRICS.prover_attempts_count[&circuit_type].observe(attempts as usize); return Ok(()); } @@ -204,11 +207,8 @@ async fn handle_send_result( "Sent assembly of size: {blob_size_in_mb}MB successfully, took: {elapsed:?} \ for job: {job_id} to: {address:?}" ); - metrics::histogram!( - "prover_fri.witness_vector_generator.blob_sending_time", - *elapsed, - "blob_size_in_mb" => blob_size_in_mb.to_string(), - ); + + METRICS.blob_sending_time[&blob_size_in_mb.to_string()].observe(*elapsed); pool.access_storage() .await diff --git a/prover/witness_vector_generator/src/lib.rs b/prover/witness_vector_generator/src/lib.rs index 365bdf1cc0d..d9d47d54897 100644 --- a/prover/witness_vector_generator/src/lib.rs +++ b/prover/witness_vector_generator/src/lib.rs @@ -1,3 +1,5 @@ #![feature(generic_const_exprs)] pub mod generator; + +pub mod metrics; diff --git a/prover/witness_vector_generator/src/main.rs b/prover/witness_vector_generator/src/main.rs index bcf95f60351..7c1aa8e0b89 100644 --- a/prover/witness_vector_generator/src/main.rs +++ b/prover/witness_vector_generator/src/main.rs @@ -20,6 +20,7 @@ use zksync_utils::wait_for_tasks::wait_for_tasks; use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; mod generator; +mod metrics; #[derive(Debug, StructOpt)] #[structopt( diff --git a/prover/witness_vector_generator/src/metrics.rs b/prover/witness_vector_generator/src/metrics.rs new file mode 100644 index 00000000000..4bc11ff401b --- /dev/null +++ b/prover/witness_vector_generator/src/metrics.rs @@ -0,0 +1,18 @@ +use std::time::Duration; +use vise::{Buckets, Histogram, LabeledFamily, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_fri_witness_vector_generator")] +pub(crate) struct WitnessVectorGeneratorMetrics { + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub gpu_witness_vector_generation_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub blob_sending_time: LabeledFamily>, + #[metrics(buckets = Buckets::LATENCIES, labels = ["circuit_type"])] + pub prover_waiting_time: LabeledFamily>, + #[metrics(buckets = Buckets::exponential(1.0..=64.0, 2.0), labels = ["circuit_type"])] + pub prover_attempts_count: LabeledFamily>, +} + +#[vise::register] +pub(crate) static METRICS: vise::Global = vise::Global::new(); From 90a085e524bce2422cf1681406e6b2546a524d70 Mon Sep 17 00:00:00 2001 From: gorden <148852660+gordera@users.noreply.github.com> Date: Mon, 4 Dec 2023 18:43:32 +0800 Subject: [PATCH 083/115] chore: fix wrong line (#592) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fix wrong line ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- docs/advanced/prover_keys.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/prover_keys.md b/docs/advanced/prover_keys.md index 6e127f431fc..8cf59067cf8 100644 --- a/docs/advanced/prover_keys.md +++ b/docs/advanced/prover_keys.md @@ -114,7 +114,7 @@ For SNARK circuits (like snark_wrapper), we use keccak as hash function. For STA friendly hash function (currently Poseidon2). [basic_circuit_list]: - https://github.com/matter-labs/era-zkevm_test_harness/blob/3cd647aa57fc2e1180bab53f7a3b61ec47502a46/circuit_definitions/src/circuit_definitions/base_layer/mod.rs#L80 + https://github.com/matter-labs/era-zkevm_test_harness/blob/3cd647aa57fc2e1180bab53f7a3b61ec47502a46/circuit_definitions/src/circuit_definitions/base_layer/mod.rs#L77 [recursive_circuit_list]: https://github.com/matter-labs/era-zkevm_test_harness/blob/3cd647aa57fc2e1180bab53f7a3b61ec47502a46/circuit_definitions/src/circuit_definitions/recursion_layer/mod.rs#L29 [verification_key_list]: From a08ba322eb1e0c9e66dd343c522eea8899d1a5b7 Mon Sep 17 00:00:00 2001 From: yilirong1992 <42017468+yilirong1992@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:52:03 +0800 Subject: [PATCH 084/115] chore(docs): fixed docs typo (#588) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Hello, fixed typo ## Why ❔ - fixed typo ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Roman Brodetski --- prover/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prover/README.md b/prover/README.md index 310913bf06b..c97cdaff2b8 100644 --- a/prover/README.md +++ b/prover/README.md @@ -29,7 +29,7 @@ feature flag). Only used in GPU proving mode. Prepares all the witness data using CPU, and then streams it to the prover_fri. -This is mosty used for resource efficiency (as machines with GPUs are more expensive, it allows us to run many +This is mostly used for resource efficiency (as machines with GPUs are more expensive, it allows us to run many witness_vector_generators, that can 'share' as single gpu based prover_fri). ### proof_fri_compressor From 8c634357fecfe97b6149af27c5627ebd450415c9 Mon Sep 17 00:00:00 2001 From: Salad <148864073+Saladerl@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:52:26 +0800 Subject: [PATCH 085/115] chore(docs): fix typos in document (#589) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Hello, I corrected the typo. ## Why ❔ - fixed typo ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov --- docs/advanced/prover.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/prover.md b/docs/advanced/prover.md index 6211f00dea7..71cb668bcdd 100644 --- a/docs/advanced/prover.md +++ b/docs/advanced/prover.md @@ -184,7 +184,7 @@ pub struct UInt32 { pub(crate) variable: Variable, } impl CSAllocatable for UInt32 { - // So the 'witness' type (concrete value) for U32 is u32 - no surprsise ;-) + // So the 'witness' type (concrete value) for U32 is u32 - no surprises ;-) type Witness = u32; ... } From 659d0e4718b8eba324aeb50718d36ecd5bdeab02 Mon Sep 17 00:00:00 2001 From: min <52465594+yilimin999@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:52:27 +0800 Subject: [PATCH 086/115] chore: fix typo (#587) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fixed typos ## Why ❔ fixed typos ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov --- docs/advanced/how_l2_messaging_works.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/how_l2_messaging_works.md b/docs/advanced/how_l2_messaging_works.md index c62fe4afc5b..dbf0abbff8d 100644 --- a/docs/advanced/how_l2_messaging_works.md +++ b/docs/advanced/how_l2_messaging_works.md @@ -177,7 +177,7 @@ return actualRootHash == calculatedRootHash; ## Summary -In this article, we've travelled through a vast array of topics: from a user contract dispatching a message to L1 by +In this article, we've traveled through a vast array of topics: from a user contract dispatching a message to L1 by invoking a system contract, to this message's hash making its way all the way to the VM via special opcodes. We've also explored how it's ultimately included in the execution results (as part of QueryLogs), gathered by the State Keeper, and transmitted to L1 for final verification. From ad4f9ab29c2a48982585f93350c9ef38cbec985a Mon Sep 17 00:00:00 2001 From: Jean <148654781+oxJean@users.noreply.github.com> Date: Mon, 4 Dec 2023 20:56:22 +0800 Subject: [PATCH 087/115] chore(docs): fix broken link (#590) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fixed broken link ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov --- docs/advanced/how_l2_messaging_works.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/how_l2_messaging_works.md b/docs/advanced/how_l2_messaging_works.md index dbf0abbff8d..147f55dee06 100644 --- a/docs/advanced/how_l2_messaging_works.md +++ b/docs/advanced/how_l2_messaging_works.md @@ -194,7 +194,7 @@ transmitted to L1 for final verification. [vm_execution_result]: https://github.com/matter-labs/zksync-era/blob/43d7bd587a84b1b4489f4c6a4169ccb90e0df467/core/lib/vm/src/vm.rs#L81 [log_queries]: - https://github.com/matter-labs/zk_evm_abstractions/blob/839721a4ae2093c5c0aa8ffd49758f32ecd172ed/src/queries.rs#L30C2-L30C2 + https://github.com/matter-labs/era-zk_evm_abstractions/blob/15a2af404902d5f10352e3d1fac693cc395fcff9/src/queries.rs#L30C2-L30C2 [aux_bytes]: https://github.com/matter-labs/zkevm_opcode_defs/blob/780ce4129a95ab9a68abf0d60c156ee8df6008c2/src/system_params.rs#L37C39-L37C39 [event_sink]: From ccd13ce88ff52c3135d794c6f92bec3b16f2210f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Mon, 4 Dec 2023 14:44:35 +0100 Subject: [PATCH 088/115] feat: faster and less noisy zk fmt (#513) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ I've added caching to prettier and changed so that noisy output about changed files is redirected to /dev/null. `zk fmt` is 3 times faster after those changes ## Why ❔ `zk fmt` output was too verbose and we didn't use cache ## Checklist - [X] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [X] Code has been formatted via `zk fmt` and `zk lint`. --- infrastructure/zk/src/fmt.ts | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index 4bb46dba760..fa8b5e79691 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -4,6 +4,10 @@ import * as utils from './utils'; const EXTENSIONS = ['ts', 'md', 'sol', 'js']; const CONFIG_PATH = 'etc/prettier-config'; +function prettierFlags(phaseName: string) { + phaseName = phaseName.replace('/', '-').replace('.', ''); + return ` --cache --cache-location node_modules/.cache/prettier/.prettier-cache-${phaseName}`; +} export async function prettier(extension: string, check: boolean = false) { if (!EXTENSIONS.includes(extension)) { throw new Error('Unsupported extension'); @@ -17,19 +21,31 @@ export async function prettier(extension: string, check: boolean = false) { return; } - await utils.spawn(`yarn --silent prettier --config ${CONFIG_PATH}/${extension}.js --${command} ${files}`); + await utils.spawn( + `yarn --silent prettier --config ${CONFIG_PATH}/${extension}.js --${command} ${files} ${prettierFlags( + extension + )} ${check ? '' : '> /dev/null'}` + ); +} + +async function prettierContracts(check: boolean, directory: string) { + await utils.spawn( + `yarn --silent --cwd ${directory} prettier:${check ? 'check' : 'fix'} ${prettierFlags(directory)} ${ + check ? '' : '> /dev/null' + }` + ); } async function prettierL1Contracts(check: boolean = false) { - await utils.spawn(`yarn --silent --cwd contracts/ethereum prettier:${check ? 'check' : 'fix'}`); + await prettierContracts(check, 'contracts/ethereum'); } async function prettierL2Contracts(check: boolean = false) { - await utils.spawn(`yarn --silent --cwd contracts/zksync prettier:${check ? 'check' : 'fix'}`); + await prettierContracts(check, 'contracts/zksync'); } async function prettierSystemContracts(check: boolean = false) { - await utils.spawn(`yarn --silent --cwd etc/system-contracts prettier:${check ? 'check' : 'fix'}`); + await prettierContracts(check, 'etc/system-contracts'); } export async function rustfmt(check: boolean = false) { From aebb70d09ed3225ac9fba996e90d36a743813566 Mon Sep 17 00:00:00 2001 From: penghuarong <42017444+penghuarong@users.noreply.github.com> Date: Tue, 5 Dec 2023 01:06:22 +0800 Subject: [PATCH 089/115] chore: the errors in the document have been correct (#583) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ the errors in the document have been correct ## Why ❔ the errors in the document have been correct ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov --- docs/setup-dev.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/setup-dev.md b/docs/setup-dev.md index cb42a2b1c7c..b33a3848769 100644 --- a/docs/setup-dev.md +++ b/docs/setup-dev.md @@ -20,7 +20,7 @@ If you are a NixOS user or would like to have a reproducible environment, skip t Install `docker`. It is recommended to follow the instructions from the [official site](https://docs.docker.com/install/). -Note: currently official site proposes using Docker Desktop for linux, which is a GUI tool with plenty of quirks. If you +Note: currently official site proposes using Docker Desktop for Linux, which is a GUI tool with plenty of quirks. If you want to only have CLI tool, you need the `docker-ce` package and you can follow [this guide](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-on-ubuntu-20-04) for Ubuntu. From 445a39ba51131edbff260a9d737079ffcf4d7f48 Mon Sep 17 00:00:00 2001 From: umi <66466781+yiliminiqihang@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:09:54 +0800 Subject: [PATCH 090/115] chore(docs): the errors in the document have been corrected. (#461) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - the errors in the document have been corrected. ## Why ❔ - the errors in the document have been corrected. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> From c8295221fc1443727f449714a5ec06240d668473 Mon Sep 17 00:00:00 2001 From: Doll <148654386+Dollyerls@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:28:41 +0800 Subject: [PATCH 091/115] chore: update document (#601) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - update document ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- docs/advanced/bytecode_compression.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/advanced/bytecode_compression.md b/docs/advanced/bytecode_compression.md index 2c9e42acd5d..0742da6db80 100644 --- a/docs/advanced/bytecode_compression.md +++ b/docs/advanced/bytecode_compression.md @@ -2,7 +2,7 @@ ## Overview -As we are a rollup - all the bytecodes that contracts use in our chain must be copied into L1 (so that the chain can be +As we are a rollup - all the bytecodes that contracts used in our chain must be copied into L1 (so that the chain can be reconstructed from L1 if needed). Given the want/need to cutdown on space used, bytecode is compressed prior to being posted to L1. At a high level @@ -31,7 +31,7 @@ Dictionary would be: 3 -> 0xC (count: 1) ``` -Note that '1' maps to '0xD', as it occurs twice, and first occurrence is earlier than first occurrence of 0xB, that also +Note that '1' maps to '0xD', as it occurs twice, and first occurrence is earlier than first occurence of 0xB, that also occurs twice. Compressed bytecode: @@ -99,10 +99,10 @@ with no change to the underlying algorithm. ### Verification & Publication The function `publishCompressBytecode` takes in both the original `_bytecode` and the `_rawCompressedData` , the latter -of which comes from the output of the server’s compression algorithm. Looping over the encoded data, derived from -`_rawCompressedData` , the corresponding chunks are pulled from the dictionary and compared to the original byte code, -reverting if there is a mismatch. After the encoded data has been verified, it is published to L1 and marked accordingly -within the `KnownCodesStorage` contract. +of which comes from the server’s compression algorithm output. Looping over the encoded data, derived from +`_rawCompressedData` , the corresponding chunks are retrieved from the dictionary and compared to the original byte +code, reverting if there is a mismatch. After the encoded data has been verified, it is published to L1 and marked +accordingly within the `KnownCodesStorage` contract. Pseudo-code implementation: From 714a8905d407de36a906a4b6d464ec2cab6eb3e8 Mon Sep 17 00:00:00 2001 From: gorden <148852660+gordera@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:29:18 +0800 Subject: [PATCH 092/115] chore: fixed typos in documentation (#603) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - fixed typos in documentation ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- docs/advanced/pubdata.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/pubdata.md b/docs/advanced/pubdata.md index d34033d0a5e..2f5cdb25132 100644 --- a/docs/advanced/pubdata.md +++ b/docs/advanced/pubdata.md @@ -194,4 +194,4 @@ the writes will be repeated ones. Given the structure above, there is a tool, created by the [Equilibrium Team](https://equilibrium.co/) that solely uses L1 pubdata for reconstructing the state and verifying that the state root on L1 can be created using pubdata. A link to the repo can be found [here](https://github.com/eqlabs/zksync-state-reconstruct). The way the tool works is by parsing -out all the L1 pubdata for an executed batch, compaing the state roots after each batch is processed. +out all the L1 pubdata for an executed batch, comparing the state roots after each batch is processed. From 8f92aaea33f8dd9818075c3b92d6cfd3127a97fa Mon Sep 17 00:00:00 2001 From: Jean <148654781+oxJean@users.noreply.github.com> Date: Tue, 5 Dec 2023 18:09:53 +0800 Subject: [PATCH 093/115] chore: remove incorrect branch prompts (#594) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ remove incorrect branch prompts ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov --- docs/advanced/gas_and_fees.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/gas_and_fees.md b/docs/advanced/gas_and_fees.md index b8f0e531e98..0f10f0ef1d6 100644 --- a/docs/advanced/gas_and_fees.md +++ b/docs/advanced/gas_and_fees.md @@ -127,5 +127,5 @@ There are a few reasons why refunds might be 'larger' on zkSync (i.e., why we mi https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs#L30 'gas_adjuster' [get_txs_fee_in_wei]: - https://github.com/matter-labs/zksync-era/blob/d590b3f0965a23eb0011779aab829d86d4fdc1d1/core/bin/zksync_core/src/api_server/tx_sender/mod.rs#L450 + https://github.com/matter-labs/zksync-era/blob/714a8905d407de36a906a4b6d464ec2cab6eb3e8/core/lib/zksync_core/src/api_server/tx_sender/mod.rs#L656 'get_txs_fee_in_wei' From 56776f929f547b1a91c5b70f89e87ef7dc25c65a Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 5 Dec 2023 12:47:12 +0200 Subject: [PATCH 094/115] fix: Sync protocol version between consensus and server blocks (#568) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Aligns the protocol version for consensus blocks with that of `SyncBlock`s. ## Why ❔ Required for gossip-based block syncing to work correctly. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- Cargo.lock | 23 +++--- core/lib/types/Cargo.toml | 6 +- core/lib/zksync_core/Cargo.toml | 12 +-- .../src/sync_layer/gossip/conversions.rs | 16 +++- .../zksync_core/src/sync_layer/gossip/mod.rs | 5 +- .../src/sync_layer/gossip/tests.rs | 77 ++++++++++++------- prover/Cargo.lock | 13 ++-- spellcheck/era.dic | 1 + 8 files changed, 96 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 073f25b71bb..5b7b6cb3424 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8514,7 +8514,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "once_cell", @@ -8541,7 +8541,7 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "once_cell", @@ -8561,7 +8561,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "blst", @@ -8579,7 +8579,7 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "prost", @@ -8601,7 +8601,7 @@ dependencies = [ [[package]] name = "zksync_consensus_network" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "async-trait", @@ -8625,7 +8625,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "bit-vec", @@ -8633,6 +8633,7 @@ dependencies = [ "prost", "rand 0.8.5", "serde", + "thiserror", "tracing", "zksync_concurrency", "zksync_consensus_crypto", @@ -8644,7 +8645,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "async-trait", @@ -8661,7 +8662,7 @@ dependencies = [ [[package]] name = "zksync_consensus_sync_blocks" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "thiserror", @@ -8676,7 +8677,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "thiserror", "zksync_concurrency", @@ -9002,7 +9003,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "bit-vec", @@ -9020,7 +9021,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "heck 0.4.1", diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 6bf130bc70c..3e81701a04a 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -23,8 +23,8 @@ codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } zk_evm_1_4_0 = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0", package = "zk_evm" } zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } anyhow = "1.0.75" chrono = { version = "0.4", features = ["serde"] } @@ -55,4 +55,4 @@ tokio = { version = "1", features = ["rt", "macros"] } serde_with = { version = "1", features = ["hex"] } [build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index 2bccff98ae9..2d313a21367 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -40,11 +40,11 @@ vlog = { path = "../vlog" } multivm = { path = "../multivm" } # Consensus dependenices -zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } -zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } prost = "0.12.1" serde = { version = "1.0", features = ["derive"] } @@ -98,4 +98,4 @@ tempfile = "3.0.2" test-casing = "0.1.2" [build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "ed71b2e817c980a2daffef6a01885219e1dc6fa0" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } diff --git a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs index 410c2bfe204..ee2286917c1 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs @@ -12,6 +12,14 @@ use crate::{consensus, sync_layer::fetcher::FetchedBlock}; pub(super) fn sync_block_to_consensus_block(mut block: SyncBlock) -> anyhow::Result { let number = BlockNumber(block.number.0.into()); let consensus = block.consensus.take().context("Missing consensus fields")?; + let consensus_protocol_version = consensus.justification.message.protocol_version.as_u32(); + let block_protocol_version = block.protocol_version as u32; + anyhow::ensure!( + consensus_protocol_version == block_protocol_version, + "Protocol version for justification ({consensus_protocol_version}) differs from \ + SyncBlock.protocol_version={block_protocol_version}" + ); + let payload: consensus::Payload = block.try_into().context("Missing `SyncBlock` data")?; let payload = payload.encode(); let header = BlockHeader { @@ -36,11 +44,17 @@ impl FetchedBlock { let payload = consensus::Payload::decode(&block.payload) .context("Failed deserializing block payload")?; + let protocol_version = block.justification.message.protocol_version; + let protocol_version = + u16::try_from(protocol_version.as_u32()).context("Invalid protocol version")?; + let protocol_version = ProtocolVersionId::try_from(protocol_version) + .with_context(|| format!("Unsupported protocol version: {protocol_version}"))?; + Ok(Self { number: MiniblockNumber(number), l1_batch_number: payload.l1_batch_number, last_in_batch, - protocol_version: ProtocolVersionId::latest(), // FIXME + protocol_version, timestamp: payload.timestamp, l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, diff --git a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs index 2fd9f46aabb..2ec9ca5b60e 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs @@ -72,10 +72,11 @@ async fn run_gossip_fetcher_inner( .await?; let buffered = Arc::new(Buffered::new(store)); let store = buffered.inner(); - let executor = Executor::new(executor_config, node_key, buffered.clone()) - .context("Node executor misconfiguration")?; scope::run!(ctx, |ctx, s| async { + let executor = Executor::new(ctx, executor_config, node_key, buffered.clone()) + .await + .context("Node executor misconfiguration")?; s.spawn_bg(async { store .run_background_tasks(ctx) diff --git a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs index 30597189f0b..d8775d3637c 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs @@ -11,7 +11,8 @@ use zksync_consensus_roles::validator::{self, FinalBlock}; use zksync_consensus_storage::{InMemoryStorage, WriteBlockStore}; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{ - api::en::SyncBlock, block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, H256, + api::en::SyncBlock, block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, + ProtocolVersionId, H256, }; use super::*; @@ -63,12 +64,19 @@ pub(super) async fn block_payload( consensus::Payload::try_from(sync_block).unwrap() } +fn latest_protocol_version() -> validator::ProtocolVersion { + (ProtocolVersionId::latest() as u32) + .try_into() + .expect("latest protocol version is invalid") +} + /// Adds consensus information for the specified `count` of miniblocks, starting from the genesis. pub(super) async fn add_consensus_fields( storage: &mut StorageProcessor<'_>, validator_key: &validator::SecretKey, block_numbers: ops::Range, ) { + let protocol_version = latest_protocol_version(); let mut prev_block_hash = validator::BlockHeaderHash::from_bytes([0; 32]); let validator_set = validator::ValidatorSet::new([validator_key.public()]).unwrap(); for number in block_numbers { @@ -79,7 +87,7 @@ pub(super) async fn add_consensus_fields( payload: payload.hash(), }; let replica_commit = validator::ReplicaCommit { - protocol_version: validator::CURRENT_VERSION, + protocol_version, view: validator::ViewNumber(number.into()), proposal: block_header, }; @@ -113,7 +121,7 @@ pub(super) fn create_genesis_block( }; let validator_set = validator::ValidatorSet::new([validator_key.public()]).unwrap(); let replica_commit = validator::ReplicaCommit { - protocol_version: validator::CURRENT_VERSION, + protocol_version: latest_protocol_version(), view: validator::ViewNumber(number), proposal: block_header, }; @@ -182,10 +190,12 @@ async fn syncing_via_gossip_fetcher(delay_first_block: bool, delay_second_block: let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; let mut storage = pool.access_storage().await.unwrap(); + let protocol_version = latest_protocol_version(); let genesis_block_payload = block_payload(&mut storage, 0).await.encode(); let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); let rng = &mut ctx.rng(); - let mut validator = FullValidatorConfig::for_single_validator(rng, genesis_block_payload); + let mut validator = + FullValidatorConfig::for_single_validator(rng, protocol_version, genesis_block_payload); let validator_set = validator.node_config.validators.clone(); let external_node = validator.connect_full_node(rng); @@ -207,19 +217,21 @@ async fn syncing_via_gossip_fetcher(delay_first_block: bool, delay_second_block: .unwrap(); } } - let validator = Executor::new( - validator.node_config, - validator.node_key, - validator_storage.clone(), - ) - .unwrap(); - // ^ We intentionally do not run consensus on the validator node, since it'll produce blocks - // with payloads that cannot be parsed by the external node. let (actions_sender, mut actions) = ActionQueue::new(); let (keeper_actions_sender, keeper_actions) = ActionQueue::new(); let state_keeper = StateKeeperHandles::new(pool.clone(), keeper_actions, &[&tx_hashes]).await; scope::run!(ctx, |ctx, s| async { + let validator = Executor::new( + ctx, + validator.node_config, + validator.node_key, + validator_storage.clone(), + ) + .await?; + // ^ We intentionally do not run consensus on the validator node, since it'll produce blocks + // with payloads that cannot be parsed by the external node. + s.spawn_bg(validator.run(ctx)); s.spawn_bg(run_gossip_fetcher_inner( ctx, @@ -309,10 +321,12 @@ async fn syncing_via_gossip_fetcher_with_multiple_l1_batches(initial_block_count let tx_hashes: Vec<_> = tx_hashes.iter().map(Vec::as_slice).collect(); let mut storage = pool.access_storage().await.unwrap(); + let protocol_version = latest_protocol_version(); let genesis_block_payload = block_payload(&mut storage, 0).await.encode(); let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); let rng = &mut ctx.rng(); - let mut validator = FullValidatorConfig::for_single_validator(rng, genesis_block_payload); + let mut validator = + FullValidatorConfig::for_single_validator(rng, protocol_version, genesis_block_payload); let validator_set = validator.node_config.validators.clone(); let external_node = validator.connect_full_node(rng); @@ -327,16 +341,18 @@ async fn syncing_via_gossip_fetcher_with_multiple_l1_batches(initial_block_count for block in initial_blocks { validator_storage.put_block(ctx, block).await.unwrap(); } - let validator = Executor::new( - validator.node_config, - validator.node_key, - validator_storage.clone(), - ) - .unwrap(); let (actions_sender, actions) = ActionQueue::new(); let state_keeper = StateKeeperHandles::new(pool.clone(), actions, &tx_hashes).await; scope::run!(ctx, |ctx, s| async { + let validator = Executor::new( + ctx, + validator.node_config, + validator.node_key, + validator_storage.clone(), + ) + .await?; + s.spawn_bg(validator.run(ctx)); s.spawn_bg(async { for block in delayed_blocks { @@ -388,8 +404,12 @@ async fn syncing_from_non_zero_block(first_block_number: u32) { .encode(); let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); let rng = &mut ctx.rng(); - let mut validator = - FullValidatorConfig::for_single_validator(rng, genesis_block_payload.clone()); + let protocol_version = latest_protocol_version(); + let mut validator = FullValidatorConfig::for_single_validator( + rng, + protocol_version, + genesis_block_payload.clone(), + ); // Override the genesis block since it has an incorrect block number. let genesis_block = create_genesis_block( &validator.validator_key, @@ -418,13 +438,6 @@ async fn syncing_from_non_zero_block(first_block_number: u32) { tracing::trace!("Re-inserted blocks to node storage"); let validator_storage = Arc::new(InMemoryStorage::new(genesis_block)); - let validator = Executor::new( - validator.node_config, - validator.node_key, - validator_storage.clone(), - ) - .unwrap(); - let tx_hashes = if first_block_number >= 2 { &tx_hashes[1..] // Skip transactions in L1 batch #1, since they won't be executed } else { @@ -433,6 +446,14 @@ async fn syncing_from_non_zero_block(first_block_number: u32) { let (actions_sender, actions) = ActionQueue::new(); let state_keeper = StateKeeperHandles::new(pool.clone(), actions, tx_hashes).await; scope::run!(ctx, |ctx, s| async { + let validator = Executor::new( + ctx, + validator.node_config, + validator.node_key, + validator_storage.clone(), + ) + .await?; + s.spawn_bg(validator.run(ctx)); s.spawn_bg(async { for block in &delayed_blocks { diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 823b426d4cc..95ff42d7052 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7059,7 +7059,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "once_cell", @@ -7086,7 +7086,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "blst", @@ -7104,7 +7104,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "bit-vec", @@ -7112,6 +7112,7 @@ dependencies = [ "prost", "rand 0.8.5", "serde", + "thiserror", "tracing", "zksync_concurrency", "zksync_consensus_crypto", @@ -7123,7 +7124,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "thiserror", "zksync_concurrency", @@ -7301,7 +7302,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "bit-vec", @@ -7319,7 +7320,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=ed71b2e817c980a2daffef6a01885219e1dc6fa0#ed71b2e817c980a2daffef6a01885219e1dc6fa0" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=da015d4c94b19962bc11622b6cc256e214256555#da015d4c94b19962bc11622b6cc256e214256555" dependencies = [ "anyhow", "heck 0.4.1", diff --git a/spellcheck/era.dic b/spellcheck/era.dic index 666ee047fd2..13dd303a3dc 100644 --- a/spellcheck/era.dic +++ b/spellcheck/era.dic @@ -261,6 +261,7 @@ blockchains sidechain sidechains tokenomics +validator validator's validator CHAINID From c6786190a0b98f308b84f40f84ffff442ae1f63b Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 5 Dec 2023 12:59:45 +0100 Subject: [PATCH 095/115] chore(main): release core 18.5.0 (#593) :robot: I have created a release *beep* *boop* --- ## [18.5.0](https://github.com/matter-labs/zksync-era/compare/core-v18.4.0...core-v18.5.0) (2023-12-05) ### Features * Add metric to CallTracer for calculating maximum depth of the calls ([#535](https://github.com/matter-labs/zksync-era/issues/535)) ([19c84ce](https://github.com/matter-labs/zksync-era/commit/19c84ce624d53735133fa3b12c7f980e8c14260d)) * Add various metrics to the Prover subsystems ([#541](https://github.com/matter-labs/zksync-era/issues/541)) ([58a4e6c](https://github.com/matter-labs/zksync-era/commit/58a4e6c4c22bd7f002ede1c6def0dc260706185e)) ### Bug Fixes * Sync protocol version between consensus and server blocks ([#568](https://github.com/matter-labs/zksync-era/issues/568)) ([56776f9](https://github.com/matter-labs/zksync-era/commit/56776f929f547b1a91c5b70f89e87ef7dc25c65a)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 9c7f15805a9..315ef45f25d 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.4.0", + "core": "18.5.0", "prover": "9.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 04f3f13e716..cc48a8b8c8d 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [18.5.0](https://github.com/matter-labs/zksync-era/compare/core-v18.4.0...core-v18.5.0) (2023-12-05) + + +### Features + +* Add metric to CallTracer for calculating maximum depth of the calls ([#535](https://github.com/matter-labs/zksync-era/issues/535)) ([19c84ce](https://github.com/matter-labs/zksync-era/commit/19c84ce624d53735133fa3b12c7f980e8c14260d)) +* Add various metrics to the Prover subsystems ([#541](https://github.com/matter-labs/zksync-era/issues/541)) ([58a4e6c](https://github.com/matter-labs/zksync-era/commit/58a4e6c4c22bd7f002ede1c6def0dc260706185e)) + + +### Bug Fixes + +* Sync protocol version between consensus and server blocks ([#568](https://github.com/matter-labs/zksync-era/issues/568)) ([56776f9](https://github.com/matter-labs/zksync-era/commit/56776f929f547b1a91c5b70f89e87ef7dc25c65a)) + ## [18.4.0](https://github.com/matter-labs/zksync-era/compare/core-v18.3.1...core-v18.4.0) (2023-12-01) From 8ace0bea93bdc7eac0babb165b2e186b4d98aabd Mon Sep 17 00:00:00 2001 From: Jack <87960263+ylmin@users.noreply.github.com> Date: Tue, 5 Dec 2023 20:05:06 +0800 Subject: [PATCH 096/115] chore: fix link (#576) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fix link ## Why ❔ - fix link ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- docs/advanced/how_call_works.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/advanced/how_call_works.md b/docs/advanced/how_call_works.md index cf9adc12f68..4ba4859c410 100644 --- a/docs/advanced/how_call_works.md +++ b/docs/advanced/how_call_works.md @@ -115,7 +115,7 @@ In this article, we covered the 'life of a call' from the RPC to the inner worki https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs 'execution sandbox' [vm_code]: - https://github.com/matter-labs/zksync-2-dev/blob/dc3b3d6b055c558b0e1a76ef5de3184291489d9f/core/lib/vm/src/vm.rs#L544 + https://github.com/matter-labs/zksync-era/blob/ccd13ce88ff52c3135d794c6f92bec3b16f2210f/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs#L108 'vm code' [bootloader_code]: https://github.com/matter-labs/era-system-contracts/blob/93a375ef6ccfe0181a248cb712c88a1babe1f119/bootloader/bootloader.yul From e71ee349009ba42289e08ec352fc92e9e78c18bb Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 5 Dec 2023 13:24:26 +0100 Subject: [PATCH 097/115] chore(main): release prover 9.1.0 (#460) :robot: I have created a release *beep* *boop* --- ## [9.1.0](https://github.com/matter-labs/zksync-era/compare/prover-v9.0.0...prover-v9.1.0) (2023-12-05) ### Features * Add various metrics to the Prover subsystems ([#541](https://github.com/matter-labs/zksync-era/issues/541)) ([58a4e6c](https://github.com/matter-labs/zksync-era/commit/58a4e6c4c22bd7f002ede1c6def0dc260706185e)) * adds spellchecker workflow, and corrects misspelled words ([#559](https://github.com/matter-labs/zksync-era/issues/559)) ([beac0a8](https://github.com/matter-labs/zksync-era/commit/beac0a85bb1535b05c395057171f197cd976bf82)) * **dal:** Do not load config from env in DAL crate ([#444](https://github.com/matter-labs/zksync-era/issues/444)) ([3fe1bb2](https://github.com/matter-labs/zksync-era/commit/3fe1bb21f8d33557353f447811ca86c60f1fe51a)) * **en:** Implement gossip fetcher ([#371](https://github.com/matter-labs/zksync-era/issues/371)) ([a49b61d](https://github.com/matter-labs/zksync-era/commit/a49b61d7769f9dd7b4cbc4905f8f8a23abfb541c)) * **hyperchain:** Adding prover related commands to zk stack ([#440](https://github.com/matter-labs/zksync-era/issues/440)) ([580cada](https://github.com/matter-labs/zksync-era/commit/580cada003bdfe2fff686a1fc3ce001b4959aa4d)) * **job-processor:** report attempts metrics ([#448](https://github.com/matter-labs/zksync-era/issues/448)) ([ab31f03](https://github.com/matter-labs/zksync-era/commit/ab31f031dfcaa7ddf296786ddccb78e8edd2d3c5)) * **merkle tree:** Allow random-order tree recovery ([#485](https://github.com/matter-labs/zksync-era/issues/485)) ([146e4cf](https://github.com/matter-labs/zksync-era/commit/146e4cf2f8d890ff0a8d33229e224442e14be437)) * **witness-generator:** add logs to leaf aggregation job ([#542](https://github.com/matter-labs/zksync-era/issues/542)) ([7e95a3a](https://github.com/matter-labs/zksync-era/commit/7e95a3a66ea48be7b6059d34630e22c503399bdf)) ### Bug Fixes * Change no pending batches 404 error into a success response ([#279](https://github.com/matter-labs/zksync-era/issues/279)) ([e8fd805](https://github.com/matter-labs/zksync-era/commit/e8fd805c8be7980de7676bca87cfc2d445aab9e1)) * **ci:** Use the same nightly rust ([#530](https://github.com/matter-labs/zksync-era/issues/530)) ([67ef133](https://github.com/matter-labs/zksync-era/commit/67ef1339d42786efbeb83c22fac99f3bf5dd4380)) * **crypto:** update shivini to switch to era-cuda ([#469](https://github.com/matter-labs/zksync-era/issues/469)) ([38bb482](https://github.com/matter-labs/zksync-era/commit/38bb4823c7b5e0e651d9f531feede66c24afd19f)) * Sync protocol version between consensus and server blocks ([#568](https://github.com/matter-labs/zksync-era/issues/568)) ([56776f9](https://github.com/matter-labs/zksync-era/commit/56776f929f547b1a91c5b70f89e87ef7dc25c65a)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: Artem Makhortov <13339874+artmakh@users.noreply.github.com> --- .github/release-please/manifest.json | 2 +- prover/CHANGELOG.md | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 315ef45f25d..e52e2f0a537 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", "core": "18.5.0", - "prover": "9.0.0" + "prover": "9.1.0" } diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index ea18d81dbcc..7c3b072b147 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog +## [9.1.0](https://github.com/matter-labs/zksync-era/compare/prover-v9.0.0...prover-v9.1.0) (2023-12-05) + + +### Features + +* Add various metrics to the Prover subsystems ([#541](https://github.com/matter-labs/zksync-era/issues/541)) ([58a4e6c](https://github.com/matter-labs/zksync-era/commit/58a4e6c4c22bd7f002ede1c6def0dc260706185e)) +* adds spellchecker workflow, and corrects misspelled words ([#559](https://github.com/matter-labs/zksync-era/issues/559)) ([beac0a8](https://github.com/matter-labs/zksync-era/commit/beac0a85bb1535b05c395057171f197cd976bf82)) +* **dal:** Do not load config from env in DAL crate ([#444](https://github.com/matter-labs/zksync-era/issues/444)) ([3fe1bb2](https://github.com/matter-labs/zksync-era/commit/3fe1bb21f8d33557353f447811ca86c60f1fe51a)) +* **en:** Implement gossip fetcher ([#371](https://github.com/matter-labs/zksync-era/issues/371)) ([a49b61d](https://github.com/matter-labs/zksync-era/commit/a49b61d7769f9dd7b4cbc4905f8f8a23abfb541c)) +* **hyperchain:** Adding prover related commands to zk stack ([#440](https://github.com/matter-labs/zksync-era/issues/440)) ([580cada](https://github.com/matter-labs/zksync-era/commit/580cada003bdfe2fff686a1fc3ce001b4959aa4d)) +* **job-processor:** report attempts metrics ([#448](https://github.com/matter-labs/zksync-era/issues/448)) ([ab31f03](https://github.com/matter-labs/zksync-era/commit/ab31f031dfcaa7ddf296786ddccb78e8edd2d3c5)) +* **merkle tree:** Allow random-order tree recovery ([#485](https://github.com/matter-labs/zksync-era/issues/485)) ([146e4cf](https://github.com/matter-labs/zksync-era/commit/146e4cf2f8d890ff0a8d33229e224442e14be437)) +* **witness-generator:** add logs to leaf aggregation job ([#542](https://github.com/matter-labs/zksync-era/issues/542)) ([7e95a3a](https://github.com/matter-labs/zksync-era/commit/7e95a3a66ea48be7b6059d34630e22c503399bdf)) + + +### Bug Fixes + +* Change no pending batches 404 error into a success response ([#279](https://github.com/matter-labs/zksync-era/issues/279)) ([e8fd805](https://github.com/matter-labs/zksync-era/commit/e8fd805c8be7980de7676bca87cfc2d445aab9e1)) +* **ci:** Use the same nightly rust ([#530](https://github.com/matter-labs/zksync-era/issues/530)) ([67ef133](https://github.com/matter-labs/zksync-era/commit/67ef1339d42786efbeb83c22fac99f3bf5dd4380)) +* **crypto:** update shivini to switch to era-cuda ([#469](https://github.com/matter-labs/zksync-era/issues/469)) ([38bb482](https://github.com/matter-labs/zksync-era/commit/38bb4823c7b5e0e651d9f531feede66c24afd19f)) +* Sync protocol version between consensus and server blocks ([#568](https://github.com/matter-labs/zksync-era/issues/568)) ([56776f9](https://github.com/matter-labs/zksync-era/commit/56776f929f547b1a91c5b70f89e87ef7dc25c65a)) + ## [9.0.0](https://github.com/matter-labs/zksync-era/compare/prover-v8.1.0...prover-v9.0.0) (2023-11-09) From e2e94ff59deffda11f45d19d8075ed98a0590780 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 5 Dec 2023 14:27:32 +0100 Subject: [PATCH 098/115] chore(main): release prover 10.0.0 (#608) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit :robot: I have created a release *beep* *boop* --- ## [10.0.0](https://github.com/matter-labs/zksync-era/compare/prover-v9.1.0...prover-v10.0.0) (2023-12-05) ### ⚠ BREAKING CHANGES * boojum integration ([#112](https://github.com/matter-labs/zksync-era/issues/112)) * Update to protocol version 17 ([#384](https://github.com/matter-labs/zksync-era/issues/384)) ### Features * Add various metrics to the Prover subsystems ([#541](https://github.com/matter-labs/zksync-era/issues/541)) ([58a4e6c](https://github.com/matter-labs/zksync-era/commit/58a4e6c4c22bd7f002ede1c6def0dc260706185e)) * adds spellchecker workflow, and corrects misspelled words ([#559](https://github.com/matter-labs/zksync-era/issues/559)) ([beac0a8](https://github.com/matter-labs/zksync-era/commit/beac0a85bb1535b05c395057171f197cd976bf82)) * boojum integration ([#112](https://github.com/matter-labs/zksync-era/issues/112)) ([e76d346](https://github.com/matter-labs/zksync-era/commit/e76d346d02ded771dea380aa8240da32119d7198)) * **boojum:** Adding README to prover directory ([#189](https://github.com/matter-labs/zksync-era/issues/189)) ([c175033](https://github.com/matter-labs/zksync-era/commit/c175033b48a8da4969d88b6850dd0247c4004794)) * **config:** Extract everything not related to the env config from zksync_config crate ([#245](https://github.com/matter-labs/zksync-era/issues/245)) ([42c64e9](https://github.com/matter-labs/zksync-era/commit/42c64e91e13b6b37619f1459f927fa046ef01097)) * **core:** Split config definitions and deserialization ([#414](https://github.com/matter-labs/zksync-era/issues/414)) ([c7c6b32](https://github.com/matter-labs/zksync-era/commit/c7c6b321a63dbcc7f1af045aa7416e697beab08f)) * **dal:** Do not load config from env in DAL crate ([#444](https://github.com/matter-labs/zksync-era/issues/444)) ([3fe1bb2](https://github.com/matter-labs/zksync-era/commit/3fe1bb21f8d33557353f447811ca86c60f1fe51a)) * **en:** Implement gossip fetcher ([#371](https://github.com/matter-labs/zksync-era/issues/371)) ([a49b61d](https://github.com/matter-labs/zksync-era/commit/a49b61d7769f9dd7b4cbc4905f8f8a23abfb541c)) * **fri-prover:** In witness - panic if protocol version is not available ([#192](https://github.com/matter-labs/zksync-era/issues/192)) ([0403749](https://github.com/matter-labs/zksync-era/commit/040374900656c854a7b9de32e5dbaf47c1c47889)) * **hyperchain:** Adding prover related commands to zk stack ([#440](https://github.com/matter-labs/zksync-era/issues/440)) ([580cada](https://github.com/matter-labs/zksync-era/commit/580cada003bdfe2fff686a1fc3ce001b4959aa4d)) * **job-processor:** report attempts metrics ([#448](https://github.com/matter-labs/zksync-era/issues/448)) ([ab31f03](https://github.com/matter-labs/zksync-era/commit/ab31f031dfcaa7ddf296786ddccb78e8edd2d3c5)) * **merkle tree:** Allow random-order tree recovery ([#485](https://github.com/matter-labs/zksync-era/issues/485)) ([146e4cf](https://github.com/matter-labs/zksync-era/commit/146e4cf2f8d890ff0a8d33229e224442e14be437)) * **merkle tree:** Snapshot recovery for Merkle tree ([#163](https://github.com/matter-labs/zksync-era/issues/163)) ([9e20703](https://github.com/matter-labs/zksync-era/commit/9e2070380e6720d84563a14a2246fc18fdb1f8f9)) * Rewrite server binary to use `vise` metrics ([#120](https://github.com/matter-labs/zksync-era/issues/120)) ([26ee1fb](https://github.com/matter-labs/zksync-era/commit/26ee1fbb16cbd7c4fad334cbc6804e7d779029b6)) * Update to protocol version 17 ([#384](https://github.com/matter-labs/zksync-era/issues/384)) ([ba271a5](https://github.com/matter-labs/zksync-era/commit/ba271a5f34d64d04c0135b8811685b80f26a8c32)) * **vm:** Move all vm versions to the one crate ([#249](https://github.com/matter-labs/zksync-era/issues/249)) ([e3fb489](https://github.com/matter-labs/zksync-era/commit/e3fb4894d08aa98a84e64eaa95b51001055cf911)) * **witness-generator:** add logs to leaf aggregation job ([#542](https://github.com/matter-labs/zksync-era/issues/542)) ([7e95a3a](https://github.com/matter-labs/zksync-era/commit/7e95a3a66ea48be7b6059d34630e22c503399bdf)) ### Bug Fixes * Change no pending batches 404 error into a success response ([#279](https://github.com/matter-labs/zksync-era/issues/279)) ([e8fd805](https://github.com/matter-labs/zksync-era/commit/e8fd805c8be7980de7676bca87cfc2d445aab9e1)) * **ci:** Use the same nightly rust ([#530](https://github.com/matter-labs/zksync-era/issues/530)) ([67ef133](https://github.com/matter-labs/zksync-era/commit/67ef1339d42786efbeb83c22fac99f3bf5dd4380)) * **crypto:** update deps to include circuit fixes ([#402](https://github.com/matter-labs/zksync-era/issues/402)) ([4c82015](https://github.com/matter-labs/zksync-era/commit/4c820150714dfb01c304c43e27f217f17deba449)) * **crypto:** update shivini to switch to era-cuda ([#469](https://github.com/matter-labs/zksync-era/issues/469)) ([38bb482](https://github.com/matter-labs/zksync-era/commit/38bb4823c7b5e0e651d9f531feede66c24afd19f)) * **crypto:** update snark-vk to be used in server and update args for proof wrapping ([#240](https://github.com/matter-labs/zksync-era/issues/240)) ([4a5c54c](https://github.com/matter-labs/zksync-era/commit/4a5c54c48bbc100c29fa719c4b1dc3535743003d)) * **docs:** Add links to setup-data keys ([#360](https://github.com/matter-labs/zksync-era/issues/360)) ([1d4fe69](https://github.com/matter-labs/zksync-era/commit/1d4fe697e4e98a8e64642cde4fe202338ce5ec61)) * **path:** update gpu prover setup data path to remove extra gpu suffix ([#454](https://github.com/matter-labs/zksync-era/issues/454)) ([2e145c1](https://github.com/matter-labs/zksync-era/commit/2e145c192b348b2756acf61fac5bfe0ca5a6575f)) * **prover-fri:** Update setup loading for node agg circuit ([#323](https://github.com/matter-labs/zksync-era/issues/323)) ([d1034b0](https://github.com/matter-labs/zksync-era/commit/d1034b05754219b603508ef79c114d908c94c1e9)) * **prover-logging:** tasks_allowed_to_finish set to true for 1 off jobs ([#227](https://github.com/matter-labs/zksync-era/issues/227)) ([0fac66f](https://github.com/matter-labs/zksync-era/commit/0fac66f5ff86cc801ea0bb6f9272cb397cd03a95)) * Sync protocol version between consensus and server blocks ([#568](https://github.com/matter-labs/zksync-era/issues/568)) ([56776f9](https://github.com/matter-labs/zksync-era/commit/56776f929f547b1a91c5b70f89e87ef7dc25c65a)) * Update prover to use the correct storage oracle ([#446](https://github.com/matter-labs/zksync-era/issues/446)) ([835dd82](https://github.com/matter-labs/zksync-era/commit/835dd828ef5610a446ec8c456e4df1def0e213ab)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- prover/CHANGELOG.md | 43 ++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index e52e2f0a537..4d3b72a795f 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", "core": "18.5.0", - "prover": "9.1.0" + "prover": "10.0.0" } diff --git a/prover/CHANGELOG.md b/prover/CHANGELOG.md index 7c3b072b147..3038c52081a 100644 --- a/prover/CHANGELOG.md +++ b/prover/CHANGELOG.md @@ -1,5 +1,48 @@ # Changelog +## [10.0.0](https://github.com/matter-labs/zksync-era/compare/prover-v9.1.0...prover-v10.0.0) (2023-12-05) + + +### ⚠ BREAKING CHANGES + +* boojum integration ([#112](https://github.com/matter-labs/zksync-era/issues/112)) +* Update to protocol version 17 ([#384](https://github.com/matter-labs/zksync-era/issues/384)) + +### Features + +* Add various metrics to the Prover subsystems ([#541](https://github.com/matter-labs/zksync-era/issues/541)) ([58a4e6c](https://github.com/matter-labs/zksync-era/commit/58a4e6c4c22bd7f002ede1c6def0dc260706185e)) +* adds spellchecker workflow, and corrects misspelled words ([#559](https://github.com/matter-labs/zksync-era/issues/559)) ([beac0a8](https://github.com/matter-labs/zksync-era/commit/beac0a85bb1535b05c395057171f197cd976bf82)) +* boojum integration ([#112](https://github.com/matter-labs/zksync-era/issues/112)) ([e76d346](https://github.com/matter-labs/zksync-era/commit/e76d346d02ded771dea380aa8240da32119d7198)) +* **boojum:** Adding README to prover directory ([#189](https://github.com/matter-labs/zksync-era/issues/189)) ([c175033](https://github.com/matter-labs/zksync-era/commit/c175033b48a8da4969d88b6850dd0247c4004794)) +* **config:** Extract everything not related to the env config from zksync_config crate ([#245](https://github.com/matter-labs/zksync-era/issues/245)) ([42c64e9](https://github.com/matter-labs/zksync-era/commit/42c64e91e13b6b37619f1459f927fa046ef01097)) +* **core:** Split config definitions and deserialization ([#414](https://github.com/matter-labs/zksync-era/issues/414)) ([c7c6b32](https://github.com/matter-labs/zksync-era/commit/c7c6b321a63dbcc7f1af045aa7416e697beab08f)) +* **dal:** Do not load config from env in DAL crate ([#444](https://github.com/matter-labs/zksync-era/issues/444)) ([3fe1bb2](https://github.com/matter-labs/zksync-era/commit/3fe1bb21f8d33557353f447811ca86c60f1fe51a)) +* **en:** Implement gossip fetcher ([#371](https://github.com/matter-labs/zksync-era/issues/371)) ([a49b61d](https://github.com/matter-labs/zksync-era/commit/a49b61d7769f9dd7b4cbc4905f8f8a23abfb541c)) +* **fri-prover:** In witness - panic if protocol version is not available ([#192](https://github.com/matter-labs/zksync-era/issues/192)) ([0403749](https://github.com/matter-labs/zksync-era/commit/040374900656c854a7b9de32e5dbaf47c1c47889)) +* **hyperchain:** Adding prover related commands to zk stack ([#440](https://github.com/matter-labs/zksync-era/issues/440)) ([580cada](https://github.com/matter-labs/zksync-era/commit/580cada003bdfe2fff686a1fc3ce001b4959aa4d)) +* **job-processor:** report attempts metrics ([#448](https://github.com/matter-labs/zksync-era/issues/448)) ([ab31f03](https://github.com/matter-labs/zksync-era/commit/ab31f031dfcaa7ddf296786ddccb78e8edd2d3c5)) +* **merkle tree:** Allow random-order tree recovery ([#485](https://github.com/matter-labs/zksync-era/issues/485)) ([146e4cf](https://github.com/matter-labs/zksync-era/commit/146e4cf2f8d890ff0a8d33229e224442e14be437)) +* **merkle tree:** Snapshot recovery for Merkle tree ([#163](https://github.com/matter-labs/zksync-era/issues/163)) ([9e20703](https://github.com/matter-labs/zksync-era/commit/9e2070380e6720d84563a14a2246fc18fdb1f8f9)) +* Rewrite server binary to use `vise` metrics ([#120](https://github.com/matter-labs/zksync-era/issues/120)) ([26ee1fb](https://github.com/matter-labs/zksync-era/commit/26ee1fbb16cbd7c4fad334cbc6804e7d779029b6)) +* Update to protocol version 17 ([#384](https://github.com/matter-labs/zksync-era/issues/384)) ([ba271a5](https://github.com/matter-labs/zksync-era/commit/ba271a5f34d64d04c0135b8811685b80f26a8c32)) +* **vm:** Move all vm versions to the one crate ([#249](https://github.com/matter-labs/zksync-era/issues/249)) ([e3fb489](https://github.com/matter-labs/zksync-era/commit/e3fb4894d08aa98a84e64eaa95b51001055cf911)) +* **witness-generator:** add logs to leaf aggregation job ([#542](https://github.com/matter-labs/zksync-era/issues/542)) ([7e95a3a](https://github.com/matter-labs/zksync-era/commit/7e95a3a66ea48be7b6059d34630e22c503399bdf)) + + +### Bug Fixes + +* Change no pending batches 404 error into a success response ([#279](https://github.com/matter-labs/zksync-era/issues/279)) ([e8fd805](https://github.com/matter-labs/zksync-era/commit/e8fd805c8be7980de7676bca87cfc2d445aab9e1)) +* **ci:** Use the same nightly rust ([#530](https://github.com/matter-labs/zksync-era/issues/530)) ([67ef133](https://github.com/matter-labs/zksync-era/commit/67ef1339d42786efbeb83c22fac99f3bf5dd4380)) +* **crypto:** update deps to include circuit fixes ([#402](https://github.com/matter-labs/zksync-era/issues/402)) ([4c82015](https://github.com/matter-labs/zksync-era/commit/4c820150714dfb01c304c43e27f217f17deba449)) +* **crypto:** update shivini to switch to era-cuda ([#469](https://github.com/matter-labs/zksync-era/issues/469)) ([38bb482](https://github.com/matter-labs/zksync-era/commit/38bb4823c7b5e0e651d9f531feede66c24afd19f)) +* **crypto:** update snark-vk to be used in server and update args for proof wrapping ([#240](https://github.com/matter-labs/zksync-era/issues/240)) ([4a5c54c](https://github.com/matter-labs/zksync-era/commit/4a5c54c48bbc100c29fa719c4b1dc3535743003d)) +* **docs:** Add links to setup-data keys ([#360](https://github.com/matter-labs/zksync-era/issues/360)) ([1d4fe69](https://github.com/matter-labs/zksync-era/commit/1d4fe697e4e98a8e64642cde4fe202338ce5ec61)) +* **path:** update gpu prover setup data path to remove extra gpu suffix ([#454](https://github.com/matter-labs/zksync-era/issues/454)) ([2e145c1](https://github.com/matter-labs/zksync-era/commit/2e145c192b348b2756acf61fac5bfe0ca5a6575f)) +* **prover-fri:** Update setup loading for node agg circuit ([#323](https://github.com/matter-labs/zksync-era/issues/323)) ([d1034b0](https://github.com/matter-labs/zksync-era/commit/d1034b05754219b603508ef79c114d908c94c1e9)) +* **prover-logging:** tasks_allowed_to_finish set to true for 1 off jobs ([#227](https://github.com/matter-labs/zksync-era/issues/227)) ([0fac66f](https://github.com/matter-labs/zksync-era/commit/0fac66f5ff86cc801ea0bb6f9272cb397cd03a95)) +* Sync protocol version between consensus and server blocks ([#568](https://github.com/matter-labs/zksync-era/issues/568)) ([56776f9](https://github.com/matter-labs/zksync-era/commit/56776f929f547b1a91c5b70f89e87ef7dc25c65a)) +* Update prover to use the correct storage oracle ([#446](https://github.com/matter-labs/zksync-era/issues/446)) ([835dd82](https://github.com/matter-labs/zksync-era/commit/835dd828ef5610a446ec8c456e4df1def0e213ab)) + ## [9.1.0](https://github.com/matter-labs/zksync-era/compare/prover-v9.0.0...prover-v9.1.0) (2023-12-05) From 83d5a2a869177d4f512fe709b54f09c2269db477 Mon Sep 17 00:00:00 2001 From: pompon0 Date: Tue, 5 Dec 2023 18:06:51 +0300 Subject: [PATCH 099/115] refactor: Removed protobuf encoding from zksync_types (#562) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removed protobuf encoding from zksync_types. ## Why ❔ To make zksync_types have less dependencies. --- Cargo.lock | 5 +- core/lib/dal/Cargo.toml | 10 +- core/lib/{types => dal}/build.rs | 4 +- core/lib/dal/src/blocks_dal.rs | 5 +- core/lib/dal/src/models/mod.rs | 1 + .../src => dal/src/models}/proto/mod.proto | 2 +- core/lib/dal/src/models/proto/mod.rs | 2 + core/lib/dal/src/models/storage_sync.rs | 146 ++++++++++++++---- core/lib/dal/src/sync_dal.rs | 6 +- core/lib/types/Cargo.toml | 3 - core/lib/types/src/api/en.rs | 8 +- core/lib/types/src/block.rs | 42 ----- core/lib/types/src/lib.rs | 2 - core/lib/types/src/proto/mod.rs | 2 - .../lib/zksync_core/src/sync_layer/fetcher.rs | 25 +-- .../src/sync_layer/gossip/conversions.rs | 15 +- .../src/sync_layer/gossip/storage/mod.rs | 8 +- .../src/sync_layer/gossip/tests.rs | 6 +- .../zksync_core/src/sync_layer/sync_action.rs | 7 +- 19 files changed, 181 insertions(+), 118 deletions(-) rename core/lib/{types => dal}/build.rs (77%) rename core/lib/{types/src => dal/src/models}/proto/mod.proto (90%) create mode 100644 core/lib/dal/src/models/proto/mod.rs delete mode 100644 core/lib/types/src/proto/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 5b7b6cb3424..3fbf3deb35e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8828,6 +8828,7 @@ dependencies = [ "itertools", "num 0.3.1", "once_cell", + "prost", "rand 0.8.5", "serde", "serde_json", @@ -8838,8 +8839,11 @@ dependencies = [ "tracing", "url", "vise", + "zksync_consensus_roles", "zksync_contracts", "zksync_health_check", + "zksync_protobuf", + "zksync_protobuf_build", "zksync_system_constants", "zksync_types", "zksync_utils", @@ -9158,7 +9162,6 @@ dependencies = [ "num_enum", "once_cell", "parity-crypto", - "prost", "rlp", "secp256k1 0.27.0", "serde", diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 616e8a32a9e..d73837cdc29 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_dal" version = "0.1.0" -edition = "2018" +edition = "2021" authors = ["The Matter Labs Team "] homepage = "https://zksync.io/" repository = "https://github.com/matter-labs/zksync-era" @@ -9,6 +9,8 @@ license = "MIT OR Apache-2.0" keywords = ["blockchain", "zksync"] categories = ["cryptography"] +links = "zksync_dal_proto" + [dependencies] vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" } zksync_utils = { path = "../utils" } @@ -16,11 +18,14 @@ zksync_system_constants = { path = "../constants" } zksync_contracts = { path = "../contracts" } zksync_types = { path = "../types" } zksync_health_check = { path = "../health_check" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } itertools = "0.10.1" thiserror = "1.0" anyhow = "1.0" url = "2" +prost = "0.12.1" rand = "0.8" tokio = { version = "1", features = ["full"] } sqlx = { version = "0.5.13", default-features = false, features = [ @@ -46,3 +51,6 @@ tracing = "0.1" [dev-dependencies] assert_matches = "1.5.0" + +[build-dependencies] +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "da015d4c94b19962bc11622b6cc256e214256555" } diff --git a/core/lib/types/build.rs b/core/lib/dal/build.rs similarity index 77% rename from core/lib/types/build.rs rename to core/lib/dal/build.rs index 464a905e47a..66896b3e02f 100644 --- a/core/lib/types/build.rs +++ b/core/lib/dal/build.rs @@ -1,8 +1,8 @@ //! Generates rust code from protobufs. fn main() { zksync_protobuf_build::Config { - input_root: "src/proto".into(), - proto_root: "zksync/types".into(), + input_root: "src/models/proto".into(), + proto_root: "zksync/dal".into(), dependencies: vec!["::zksync_consensus_roles::proto".parse().unwrap()], protobuf_crate: "::zksync_protobuf".parse().unwrap(), is_public: true, diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index ca5018ae51e..c60d52e197b 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -10,12 +10,13 @@ use sqlx::Row; use zksync_types::{ aggregated_operations::AggregatedActionType, - block::{BlockGasCount, ConsensusBlockFields, L1BatchHeader, MiniblockHeader}, + block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, commitment::{L1BatchMetadata, L1BatchWithMetadata}, Address, L1BatchNumber, LogQuery, MiniblockNumber, ProtocolVersionId, H256, MAX_GAS_PER_PUBDATA_BYTE, U256, }; +pub use crate::models::storage_sync::ConsensusBlockFields; use crate::{ instrument::InstrumentExt, models::storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageMiniblockHeader}, @@ -405,7 +406,7 @@ impl BlocksDal<'_, '_> { let result = sqlx::query!( "UPDATE miniblocks SET consensus = $2 WHERE number = $1", miniblock_number.0 as i64, - serde_json::to_value(consensus).unwrap(), + zksync_protobuf::serde::serialize(consensus, serde_json::value::Serializer).unwrap(), ) .execute(self.storage.conn()) .await?; diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index f6ebb6fc781..4e3e0853991 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -1,3 +1,4 @@ +mod proto; pub mod storage_block; pub mod storage_eth_tx; pub mod storage_event; diff --git a/core/lib/types/src/proto/mod.proto b/core/lib/dal/src/models/proto/mod.proto similarity index 90% rename from core/lib/types/src/proto/mod.proto rename to core/lib/dal/src/models/proto/mod.proto index 2fc03e285d3..b817d35e032 100644 --- a/core/lib/types/src/proto/mod.proto +++ b/core/lib/dal/src/models/proto/mod.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package zksync.types; +package zksync.dal; import "zksync/roles/validator.proto"; diff --git a/core/lib/dal/src/models/proto/mod.rs b/core/lib/dal/src/models/proto/mod.rs new file mode 100644 index 00000000000..29f7c04d5d6 --- /dev/null +++ b/core/lib/dal/src/models/proto/mod.rs @@ -0,0 +1,2 @@ +#![allow(warnings)] +include!(concat!(env!("OUT_DIR"), "/src/models/proto/gen.rs")); diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index b49cfd98acc..3415cb9b264 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,8 +1,9 @@ -use std::convert::TryInto; - +use anyhow::Context as _; +use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContractsHashes; -use zksync_types::api::en::SyncBlock; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction, H256}; +use zksync_protobuf::{read_required, ProtoFmt}; +use zksync_types::api::en; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction, H160, H256}; #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageSyncBlock { @@ -24,45 +25,126 @@ pub struct StorageSyncBlock { pub consensus: Option, } +fn parse_h256(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) +} + +fn parse_h160(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) +} + impl StorageSyncBlock { pub(crate) fn into_sync_block( self, current_operator_address: Address, transactions: Option>, - ) -> SyncBlock { - let number = self.number; - - SyncBlock { - number: MiniblockNumber(self.number as u32), - l1_batch_number: L1BatchNumber(self.l1_batch_number as u32), + ) -> anyhow::Result { + Ok(en::SyncBlock { + number: MiniblockNumber(self.number.try_into().context("number")?), + l1_batch_number: L1BatchNumber( + self.l1_batch_number.try_into().context("l1_batch_number")?, + ), last_in_batch: self .last_batch_miniblock - .map(|n| n == number) + .map(|n| n == self.number) .unwrap_or(false), - timestamp: self.timestamp as u64, - root_hash: self.root_hash.as_deref().map(H256::from_slice), - l1_gas_price: self.l1_gas_price as u64, - l2_fair_gas_price: self.l2_fair_gas_price as u64, - // TODO (SMA-1635): Make these filed non optional in database + timestamp: self.timestamp.try_into().context("timestamp")?, + root_hash: self + .root_hash + .map(|h| parse_h256(&h)) + .transpose() + .context("root_hash")?, + l1_gas_price: self.l1_gas_price.try_into().context("l1_gas_price")?, + l2_fair_gas_price: self + .l2_fair_gas_price + .try_into() + .context("l2_fair_gas_price")?, + // TODO (SMA-1635): Make these fields non optional in database base_system_contracts_hashes: BaseSystemContractsHashes { - bootloader: self - .bootloader_code_hash - .map(|bootloader_code_hash| H256::from_slice(&bootloader_code_hash)) - .expect("Should not be none"), - default_aa: self - .default_aa_code_hash - .map(|default_aa_code_hash| H256::from_slice(&default_aa_code_hash)) - .expect("Should not be none"), + bootloader: parse_h256( + &self + .bootloader_code_hash + .context("bootloader_code_hash should not be none")?, + ) + .context("bootloader_code_hash")?, + default_aa: parse_h256( + &self + .default_aa_code_hash + .context("default_aa_code_hash should not be none")?, + ) + .context("default_aa_code_hash")?, + }, + operator_address: match self.fee_account_address { + Some(addr) => parse_h160(&addr).context("fee_account_address")?, + None => current_operator_address, }, - operator_address: self - .fee_account_address - .map(|fee_account_address| Address::from_slice(&fee_account_address)) - .unwrap_or(current_operator_address), transactions, - virtual_blocks: Some(self.virtual_blocks as u32), - hash: Some(H256::from_slice(&self.hash)), - protocol_version: (self.protocol_version as u16).try_into().unwrap(), - consensus: self.consensus.map(|v| serde_json::from_value(v).unwrap()), + virtual_blocks: Some(self.virtual_blocks.try_into().context("virtual_blocks")?), + hash: Some(parse_h256(&self.hash).context("hash")?), + protocol_version: u16::try_from(self.protocol_version) + .context("protocol_version")? + .try_into() + .context("protocol_version")?, + consensus: match self.consensus { + None => None, + Some(v) => { + let v: ConsensusBlockFields = + zksync_protobuf::serde::deserialize(v).context("consensus")?; + Some(v.encode()) + } + }, + }) + } +} + +/// Consensus-related L2 block (= miniblock) fields. +#[derive(Debug, Clone, PartialEq)] +pub struct ConsensusBlockFields { + /// Hash of the previous consensus block. + pub parent: validator::BlockHeaderHash, + /// Quorum certificate for the block. + pub justification: validator::CommitQC, +} + +impl ConsensusBlockFields { + pub fn encode(&self) -> en::ConsensusBlockFields { + en::ConsensusBlockFields(zksync_protobuf::encode(self).into()) + } + pub fn decode(x: &en::ConsensusBlockFields) -> anyhow::Result { + zksync_protobuf::decode(&x.0 .0) + } +} + +impl ProtoFmt for ConsensusBlockFields { + type Proto = crate::models::proto::ConsensusBlockFields; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + parent: read_required(&r.parent).context("parent")?, + justification: read_required(&r.justification).context("justification")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + parent: Some(self.parent.build()), + justification: Some(self.justification.build()), } } } + +#[cfg(test)] +mod tests { + use super::ConsensusBlockFields; + use rand::Rng; + use zksync_consensus_roles::validator; + + #[tokio::test] + async fn encode_decode() { + let rng = &mut rand::thread_rng(); + let block = rng.gen::(); + let want = ConsensusBlockFields { + parent: block.header.parent, + justification: block.justification, + }; + assert_eq!(want, ConsensusBlockFields::decode(&want.encode()).unwrap()); + } +} diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 7b7c1359414..991469db469 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -4,7 +4,7 @@ use crate::{ instrument::InstrumentExt, metrics::MethodLatency, models::{storage_sync::StorageSyncBlock, storage_transaction::StorageTransaction}, - SqlxError, StorageProcessor, + StorageProcessor, }; /// DAL subset dedicated to the EN synchronization. @@ -19,7 +19,7 @@ impl SyncDal<'_, '_> { block_number: MiniblockNumber, current_operator_address: Address, include_transactions: bool, - ) -> Result, SqlxError> { + ) -> anyhow::Result> { let latency = MethodLatency::new("sync_dal_sync_block"); let storage_block_details = sqlx::query_as!( StorageSyncBlock, @@ -65,7 +65,7 @@ impl SyncDal<'_, '_> { } else { None }; - Some(storage_block_details.into_sync_block(current_operator_address, transactions)) + Some(storage_block_details.into_sync_block(current_operator_address, transactions)?) } else { None }; diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 3e81701a04a..aef8ce52d3a 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -10,8 +10,6 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] readme = "README.md" -links = "zksync_types_proto" - [dependencies] zksync_system_constants = { path = "../constants" } zksync_utils = { path = "../utils" } @@ -30,7 +28,6 @@ anyhow = "1.0.75" chrono = { version = "0.4", features = ["serde"] } num = { version = "0.3.1", features = ["serde"] } once_cell = "1.7" -prost = "0.12.1" rlp = "0.5" serde = "1.0.90" serde_json = "1.0.0" diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index aa3d2955e2e..c1899c2bf3c 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -5,7 +5,13 @@ use zk_evm::ethereum_types::Address; use zksync_basic_types::{L1BatchNumber, MiniblockNumber, H256}; use zksync_contracts::BaseSystemContractsHashes; -use crate::{block::ConsensusBlockFields, ProtocolVersionId}; +use crate::ProtocolVersionId; + +/// Protobuf-encoded consensus-related L2 block (= miniblock) fields. +/// See `zksync_dal::models::storage_sync::ConsensusBlockFields`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(transparent)] +pub struct ConsensusBlockFields(pub zksync_basic_types::Bytes); /// Representation of the L2 block, as needed for the EN synchronization. /// This structure has several fields that describe *L1 batch* rather than diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index b4026468868..e61a56d2c91 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,13 +1,10 @@ -use anyhow::Context as _; use serde::{Deserialize, Serialize}; use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use std::{fmt, ops}; use zksync_basic_types::{H2048, H256, U256}; -use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContractsHashes; -use zksync_protobuf::{read_required, ProtoFmt}; use crate::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, @@ -88,45 +85,6 @@ pub struct MiniblockHeader { pub virtual_blocks: u32, } -/// Consensus-related L2 block (= miniblock) fields. -#[derive(Debug, Clone, PartialEq)] -pub struct ConsensusBlockFields { - /// Hash of the previous consensus block. - pub parent: validator::BlockHeaderHash, - /// Quorum certificate for the block. - pub justification: validator::CommitQC, -} - -impl ProtoFmt for ConsensusBlockFields { - type Proto = crate::proto::ConsensusBlockFields; - - fn read(proto: &Self::Proto) -> anyhow::Result { - Ok(Self { - parent: read_required(&proto.parent).context("parent")?, - justification: read_required(&proto.justification).context("justification")?, - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - parent: Some(self.parent.build()), - justification: Some(self.justification.build()), - } - } -} - -impl Serialize for ConsensusBlockFields { - fn serialize(&self, s: S) -> Result { - zksync_protobuf::serde::serialize(self, s) - } -} - -impl<'de> Deserialize<'de> for ConsensusBlockFields { - fn deserialize>(d: D) -> Result { - zksync_protobuf::serde::deserialize(d) - } -} - /// Data needed to execute a miniblock in the VM. #[derive(Debug)] pub struct MiniblockExecutionData { diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 22904eb71b8..4715a2f86da 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -62,8 +62,6 @@ pub mod utils; pub mod vk_transform; pub mod vm_version; -mod proto; - /// Denotes the first byte of the special zkSync's EIP-712-signed transaction. pub const EIP_712_TX_TYPE: u8 = 0x71; diff --git a/core/lib/types/src/proto/mod.rs b/core/lib/types/src/proto/mod.rs deleted file mode 100644 index 660bf4c5b4c..00000000000 --- a/core/lib/types/src/proto/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -#![allow(warnings)] -include!(concat!(env!("OUT_DIR"), "/src/proto/gen.rs")); diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 9cdd7e64fd1..3adbc8920bf 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -3,10 +3,9 @@ use tokio::sync::watch; use std::time::Duration; -use zksync_dal::StorageProcessor; +use zksync_dal::{blocks_dal::ConsensusBlockFields, StorageProcessor}; use zksync_types::{ - api::en::SyncBlock, block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, - ProtocolVersionId, + api::en::SyncBlock, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, }; use zksync_web3_decl::jsonrpsee::core::Error as RpcError; @@ -37,9 +36,10 @@ pub(super) struct FetchedBlock { pub consensus: Option, } -impl From for FetchedBlock { - fn from(block: SyncBlock) -> Self { - Self { +impl TryFrom for FetchedBlock { + type Error = anyhow::Error; + fn try_from(block: SyncBlock) -> anyhow::Result { + Ok(Self { number: block.number, l1_batch_number: block.l1_batch_number, last_in_batch: block.last_in_batch, @@ -51,9 +51,14 @@ impl From for FetchedBlock { operator_address: block.operator_address, transactions: block .transactions - .expect("Transactions are always requested"), - consensus: block.consensus, - } + .context("Transactions are always requested")?, + consensus: block + .consensus + .as_ref() + .map(ConsensusBlockFields::decode) + .transpose() + .context("ConsensusBlockFields::decode()")?, + }) } } @@ -273,7 +278,7 @@ impl MainNodeFetcher { request_latency.observe(); let block_number = block.number; - let new_actions = self.cursor.advance(block.into()); + let new_actions = self.cursor.advance(block.try_into()?); tracing::info!( "New miniblock: {block_number} / {}", diff --git a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs index ee2286917c1..00c6c651452 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs @@ -1,17 +1,20 @@ //! Conversion logic between server and consensus types. - use anyhow::Context as _; use zksync_consensus_roles::validator::{BlockHeader, BlockNumber, FinalBlock}; -use zksync_types::{ - api::en::SyncBlock, block::ConsensusBlockFields, MiniblockNumber, ProtocolVersionId, -}; +use zksync_dal::blocks_dal::ConsensusBlockFields; +use zksync_types::{api::en, MiniblockNumber, ProtocolVersionId}; use crate::{consensus, sync_layer::fetcher::FetchedBlock}; -pub(super) fn sync_block_to_consensus_block(mut block: SyncBlock) -> anyhow::Result { +pub(super) fn sync_block_to_consensus_block(block: en::SyncBlock) -> anyhow::Result { let number = BlockNumber(block.number.0.into()); - let consensus = block.consensus.take().context("Missing consensus fields")?; + let consensus = block + .consensus + .as_ref() + .context("Missing consensus fields")?; + let consensus = + ConsensusBlockFields::decode(consensus).context("ConsensusBlockFields::decode()")?; let consensus_protocol_version = consensus.justification.message.protocol_version.as_u32(); let block_protocol_version = block.protocol_version as u32; anyhow::ensure!( diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs index a490147512e..db36f71d35c 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs @@ -12,8 +12,9 @@ use zksync_concurrency::{ }; use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; use zksync_consensus_storage::{BlockStore, StorageError, StorageResult}; +use zksync_dal::blocks_dal::ConsensusBlockFields; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_types::{api::en::SyncBlock, block::ConsensusBlockFields, Address, MiniblockNumber}; +use zksync_types::{api::en::SyncBlock, Address, MiniblockNumber}; #[cfg(test)] mod tests; @@ -157,9 +158,12 @@ impl PostgresBlockStorage { justification: genesis_block.justification.clone(), }; if let Some(actual_consensus_fields) = &actual_consensus_fields { + let actual_consensus_fields = ConsensusBlockFields::decode(actual_consensus_fields) + .context("ConsensusBlockFields::decode()") + .map_err(StorageError::Database)?; // While justifications may differ among nodes for an arbitrary block, we assume that // the genesis block has a hardcoded justification. - if *actual_consensus_fields != expected_consensus_fields { + if actual_consensus_fields != expected_consensus_fields { let err = anyhow::anyhow!( "Genesis block consensus fields in Postgres {actual_consensus_fields:?} do not match \ the configured ones {expected_consensus_fields:?}" diff --git a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs index d8775d3637c..ddb97484968 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs @@ -9,10 +9,10 @@ use zksync_concurrency::{ctx, scope, testonly::abort_on_panic, time}; use zksync_consensus_executor::testonly::FullValidatorConfig; use zksync_consensus_roles::validator::{self, FinalBlock}; use zksync_consensus_storage::{InMemoryStorage, WriteBlockStore}; +use zksync_dal::blocks_dal::ConsensusBlockFields; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{ - api::en::SyncBlock, block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, - ProtocolVersionId, H256, + api::en::SyncBlock, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; use super::*; @@ -509,7 +509,7 @@ async fn insert_sync_blocks(pool: ConnectionPool, blocks: Vec, tx_has let (actions_sender, actions) = ActionQueue::new(); let state_keeper = StateKeeperHandles::new(pool.clone(), actions, tx_hashes).await; for block in blocks { - let block_actions = fetcher.advance(block.into()); + let block_actions = fetcher.advance(block.try_into().unwrap()); actions_sender.push_actions(block_actions).await; } diff --git a/core/lib/zksync_core/src/sync_layer/sync_action.rs b/core/lib/zksync_core/src/sync_layer/sync_action.rs index b278cb1c98e..994676def49 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_action.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_action.rs @@ -1,9 +1,6 @@ use tokio::sync::mpsc; - -use zksync_types::{ - block::ConsensusBlockFields, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, - Transaction, -}; +use zksync_dal::blocks_dal::ConsensusBlockFields; +use zksync_types::{Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction}; use super::metrics::QUEUE_METRICS; From f6a69e025979a1283964ff69508543ed7606e391 Mon Sep 17 00:00:00 2001 From: Zijing Zhang <50045289+pluveto@users.noreply.github.com> Date: Tue, 5 Dec 2023 23:49:57 +0800 Subject: [PATCH 100/115] fix: use powers array in plonkSetup function (#508) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR modifies the `plonkSetup` function in `run.ts` to use the `powers` array when downloading key files. ## Why ❔ Previously, the function forget to use the argument values `powers`. Now, it will download keys for any powers specified in the `powers` array. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [NA] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Igor Aleksanov --- infrastructure/zk/src/run/run.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts index 3e13bd5a222..3f17fb3e54c 100644 --- a/infrastructure/zk/src/run/run.ts +++ b/infrastructure/zk/src/run/run.ts @@ -70,7 +70,8 @@ export async function plonkSetup(powers?: number[]) { const URL = 'https://storage.googleapis.com/universal-setup'; fs.mkdirSync('keys/setup', { recursive: true }); process.chdir('keys/setup'); - for (let power = 20; power <= 26; power++) { + for (let i = 0; i < powers.length; i++) { + const power = powers[i]; if (!fs.existsSync(`setup_2^${power}.key`)) { await utils.spawn(`curl -LO ${URL}/setup_2^${power}.key`); await utils.sleep(1); From aeaaecb54b6bd3f173727531418dc242357b2aee Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Tue, 5 Dec 2023 18:46:48 +0200 Subject: [PATCH 101/115] fix: Fix database connections in house keeper (#610) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Use correct connections for databases in house keeper. ## Why ❔ Databases are divided in 2 on mainnet and testnet ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- .../src/house_keeper/fri_prover_queue_monitor.rs | 9 +++++++-- core/lib/zksync_core/src/lib.rs | 14 +++++++++----- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs index 129f9befbd5..635d630fe1a 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_prover_queue_monitor.rs @@ -7,6 +7,7 @@ use zksync_prover_utils::periodic_job::PeriodicJob; pub struct FriProverStatsReporter { reporting_interval_ms: u64, prover_connection_pool: ConnectionPool, + db_connection_pool: ConnectionPool, config: FriProverGroupConfig, } @@ -14,11 +15,13 @@ impl FriProverStatsReporter { pub fn new( reporting_interval_ms: u64, prover_connection_pool: ConnectionPool, + db_connection_pool: ConnectionPool, config: FriProverGroupConfig, ) -> Self { Self { reporting_interval_ms, prover_connection_pool, + db_connection_pool, config, } } @@ -85,7 +88,9 @@ impl PeriodicJob for FriProverStatsReporter { // FIXME: refactor metrics here - if let Some(l1_batch_number) = conn + let mut db_conn = self.db_connection_pool.access_storage().await.unwrap(); + + if let Some(l1_batch_number) = db_conn .proof_generation_dal() .get_oldest_unprocessed_batch() .await @@ -96,7 +101,7 @@ impl PeriodicJob for FriProverStatsReporter { ) } - if let Some(l1_batch_number) = conn + if let Some(l1_batch_number) = db_conn .proof_generation_dal() .get_oldest_not_generated_batch() .await diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 3b00d7cffda..2389d576173 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -1088,13 +1088,16 @@ async fn add_house_keeper_to_task_futures( .clone() .context("house_keeper_config")?; let postgres_config = configs.postgres_config.clone().context("postgres_config")?; - let connection_pool = ConnectionPool::singleton(postgres_config.replica_url()?) - .build() - .await - .context("failed to build a connection pool")?; + let connection_pool = ConnectionPool::builder( + postgres_config.replica_url()?, + postgres_config.max_connections()?, + ) + .build() + .await + .context("failed to build a connection pool")?; let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( house_keeper_config.l1_batch_metrics_reporting_interval_ms, - connection_pool, + connection_pool.clone(), ); let prover_connection_pool = ConnectionPool::builder( @@ -1192,6 +1195,7 @@ async fn add_house_keeper_to_task_futures( let fri_prover_stats_reporter = FriProverStatsReporter::new( house_keeper_config.fri_prover_stats_reporting_interval_ms, prover_connection_pool.clone(), + connection_pool.clone(), fri_prover_group_config, ); task_futures.push(tokio::spawn(fri_prover_stats_reporter.run())); From b65fedd6894497a4c9fbf38d558ccfaca535d1d2 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 5 Dec 2023 19:06:24 +0200 Subject: [PATCH 102/115] feat(contract-verifier): Support verification for zksolc v1.3.17 (#606) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds support for zksolc v1.3.17 to contract-verifier. ## Why ❔ Contract-verifier should support latest version ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- .github/workflows/ci-core-reusable.yml | 48 +++++++++---------- core/tests/ts-integration/hardhat.config.ts | 6 +-- .../ts-integration/scripts/compile-yul.ts | 2 +- .../tests/api/contract-verification.test.ts | 6 +-- docker/contract-verifier/Dockerfile | 6 ++- 5 files changed, 36 insertions(+), 32 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 7ad0e54074c..341a37a2c8c 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -113,20 +113,20 @@ jobs: run: | sudo apt update && sudo apt install wget -y - mkdir -p $(pwd)/etc/solc-bin/0.8.21 - wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.21%2Bcommit.d9974bed - mv solc-linux-amd64-v0.8.21+commit.d9974bed $(pwd)/etc/solc-bin/0.8.21/solc - chmod +x $(pwd)/etc/solc-bin/0.8.21/solc + mkdir -p $(pwd)/etc/solc-bin/0.8.23 + wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.23%2Bcommit.f704f362 + mv solc-linux-amd64-v0.8.23+commit.f704f362 $(pwd)/etc/solc-bin/0.8.23/solc + chmod +x $(pwd)/etc/solc-bin/0.8.23/solc - mkdir -p $(pwd)/etc/zksolc-bin/v1.3.16 - wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.16 - mv zksolc-linux-amd64-musl-v1.3.16 $(pwd)/etc/zksolc-bin/v1.3.16/zksolc - chmod +x $(pwd)/etc/zksolc-bin/v1.3.16/zksolc + mkdir -p $(pwd)/etc/zksolc-bin/v1.3.17 + wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.17 + mv zksolc-linux-amd64-musl-v1.3.17 $(pwd)/etc/zksolc-bin/v1.3.17/zksolc + chmod +x $(pwd)/etc/zksolc-bin/v1.3.17/zksolc - mkdir -p $(pwd)/etc/vyper-bin/0.3.3 - wget -O vyper0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3%2Bcommit.48e326f0.linux - mv vyper0.3.3 $(pwd)/etc/vyper-bin/0.3.3/vyper - chmod +x $(pwd)/etc/vyper-bin/0.3.3/vyper + mkdir -p $(pwd)/etc/vyper-bin/0.3.10 + wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux + mv vyper0.3.10 $(pwd)/etc/vyper-bin/0.3.10/vyper + chmod +x $(pwd)/etc/vyper-bin/0.3.10/vyper mkdir -p $(pwd)/etc/zkvyper-bin/v1.3.13 wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-v1.3.13 @@ -211,20 +211,20 @@ jobs: run: | sudo apt update && sudo apt install wget -y - mkdir -p $(pwd)/etc/solc-bin/0.8.21 - wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.21%2Bcommit.d9974bed - mv solc-linux-amd64-v0.8.21+commit.d9974bed $(pwd)/etc/solc-bin/0.8.21/solc - chmod +x $(pwd)/etc/solc-bin/0.8.21/solc + mkdir -p $(pwd)/etc/solc-bin/0.8.23 + wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.23%2Bcommit.f704f362 + mv solc-linux-amd64-v0.8.23+commit.f704f362 $(pwd)/etc/solc-bin/0.8.23/solc + chmod +x $(pwd)/etc/solc-bin/0.8.23/solc - mkdir -p $(pwd)/etc/zksolc-bin/v1.3.16 - wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.16 - mv zksolc-linux-amd64-musl-v1.3.16 $(pwd)/etc/zksolc-bin/v1.3.16/zksolc - chmod +x $(pwd)/etc/zksolc-bin/v1.3.16/zksolc + mkdir -p $(pwd)/etc/zksolc-bin/v1.3.17 + wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.17 + mv zksolc-linux-amd64-musl-v1.3.17 $(pwd)/etc/zksolc-bin/v1.3.17/zksolc + chmod +x $(pwd)/etc/zksolc-bin/v1.3.17/zksolc - mkdir -p $(pwd)/etc/vyper-bin/0.3.3 - wget -O vyper0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3%2Bcommit.48e326f0.linux - mv vyper0.3.3 $(pwd)/etc/vyper-bin/0.3.3/vyper - chmod +x $(pwd)/etc/vyper-bin/0.3.3/vyper + mkdir -p $(pwd)/etc/vyper-bin/0.3.10 + wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux + mv vyper0.3.10 $(pwd)/etc/vyper-bin/0.3.10/vyper + chmod +x $(pwd)/etc/vyper-bin/0.3.10/vyper mkdir -p $(pwd)/etc/zkvyper-bin/v1.3.11 wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-v1.3.11 diff --git a/core/tests/ts-integration/hardhat.config.ts b/core/tests/ts-integration/hardhat.config.ts index 166feea91d9..4840e46e82b 100644 --- a/core/tests/ts-integration/hardhat.config.ts +++ b/core/tests/ts-integration/hardhat.config.ts @@ -4,7 +4,7 @@ import '@matterlabs/hardhat-zksync-vyper'; export default { zksolc: { - version: '1.3.16', + version: '1.3.17', compilerSource: 'binary', settings: { isSystem: true @@ -20,9 +20,9 @@ export default { } }, solidity: { - version: '0.8.21' + version: '0.8.23' }, vyper: { - version: '0.3.3' + version: '0.3.10' } }; diff --git a/core/tests/ts-integration/scripts/compile-yul.ts b/core/tests/ts-integration/scripts/compile-yul.ts index 26f779878ae..f92c259723c 100644 --- a/core/tests/ts-integration/scripts/compile-yul.ts +++ b/core/tests/ts-integration/scripts/compile-yul.ts @@ -7,7 +7,7 @@ import { getZksolcUrl, saltFromUrl } from '@matterlabs/hardhat-zksync-solc'; import { getCompilersDir } from 'hardhat/internal/util/global-dir'; import path from 'path'; -const COMPILER_VERSION = '1.3.16'; +const COMPILER_VERSION = '1.3.17'; const IS_COMPILER_PRE_RELEASE = false; async function compilerLocation(): Promise { diff --git a/core/tests/ts-integration/tests/api/contract-verification.test.ts b/core/tests/ts-integration/tests/api/contract-verification.test.ts index cfda8a81074..984361fb7b3 100644 --- a/core/tests/ts-integration/tests/api/contract-verification.test.ts +++ b/core/tests/ts-integration/tests/api/contract-verification.test.ts @@ -9,11 +9,11 @@ import { sleep } from 'zksync-web3/build/src/utils'; // Regular expression to match ISO dates. const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; -const ZKSOLC_VERSION = 'v1.3.16'; -const SOLC_VERSION = '0.8.21'; +const ZKSOLC_VERSION = 'v1.3.17'; +const SOLC_VERSION = '0.8.23'; const ZKVYPER_VERSION = 'v1.3.13'; -const VYPER_VERSION = '0.3.3'; +const VYPER_VERSION = '0.3.10'; type HttpMethod = 'POST' | 'GET'; diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 1f244b38906..21ce6946489 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -24,7 +24,7 @@ RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget python # install zksolc 1.3.x RUN skip_versions="v1.3.12 v1.3.15" && \ - for VERSION in $(seq -f "v1.3.%g" 0 16); do \ + for VERSION in $(seq -f "v1.3.%g" 0 17); do \ if echo " $skip_versions " | grep -q -w " $VERSION "; then \ continue; \ fi; \ @@ -53,6 +53,10 @@ RUN mkdir -p /etc/vyper-bin/0.3.9 \ && wget -O vyper0.3.9 https://github.com/vyperlang/vyper/releases/download/v0.3.9/vyper.0.3.9%2Bcommit.66b96705.linux \ && mv vyper0.3.9 /etc/vyper-bin/0.3.9/vyper \ && chmod +x /etc/vyper-bin/0.3.9/vyper +RUN mkdir -p /etc/vyper-bin/0.3.10 \ + && wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux \ + && mv vyper0.3.10 /etc/vyper-bin/0.3.10/vyper \ + && chmod +x /etc/vyper-bin/0.3.10/vyper COPY --from=builder /usr/src/zksync/target/release/zksync_contract_verifier /usr/bin/ COPY etc/system-contracts/bootloader/build/artifacts/ /etc/system-contracts/bootloader/build/artifacts/ From b7007a3ab5499634ab17b03efcbfa096d55408f8 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Tue, 5 Dec 2023 18:45:01 +0100 Subject: [PATCH 103/115] chore(main): release core 18.6.0 (#613) :robot: I have created a release *beep* *boop* --- ## [18.6.0](https://github.com/matter-labs/zksync-era/compare/core-v18.5.0...core-v18.6.0) (2023-12-05) ### Features * **contract-verifier:** Support verification for zksolc v1.3.17 ([#606](https://github.com/matter-labs/zksync-era/issues/606)) ([b65fedd](https://github.com/matter-labs/zksync-era/commit/b65fedd6894497a4c9fbf38d558ccfaca535d1d2)) ### Bug Fixes * Fix database connections in house keeper ([#610](https://github.com/matter-labs/zksync-era/issues/610)) ([aeaaecb](https://github.com/matter-labs/zksync-era/commit/aeaaecb54b6bd3f173727531418dc242357b2aee)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 4d3b72a795f..bce64523644 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.5.0", + "core": "18.6.0", "prover": "10.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index cc48a8b8c8d..41439ca4651 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,17 @@ # Changelog +## [18.6.0](https://github.com/matter-labs/zksync-era/compare/core-v18.5.0...core-v18.6.0) (2023-12-05) + + +### Features + +* **contract-verifier:** Support verification for zksolc v1.3.17 ([#606](https://github.com/matter-labs/zksync-era/issues/606)) ([b65fedd](https://github.com/matter-labs/zksync-era/commit/b65fedd6894497a4c9fbf38d558ccfaca535d1d2)) + + +### Bug Fixes + +* Fix database connections in house keeper ([#610](https://github.com/matter-labs/zksync-era/issues/610)) ([aeaaecb](https://github.com/matter-labs/zksync-era/commit/aeaaecb54b6bd3f173727531418dc242357b2aee)) + ## [18.5.0](https://github.com/matter-labs/zksync-era/compare/core-v18.4.0...core-v18.5.0) (2023-12-05) From 684e933fdd177eb323ad75df0accef8821dd0fe1 Mon Sep 17 00:00:00 2001 From: Stanislav Bezkorovainyi Date: Wed, 6 Dec 2023 09:26:54 +0100 Subject: [PATCH 104/115] chore: Mainnet upgrade calldata (#564) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Includes mainnet upgrade preparation as well as some minor fixes for the upgrade tool ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: koloz --- contracts | 2 +- .../1699353977-boojum/mainnet2/crypto.json | 11 + .../1699353977-boojum/mainnet2/facetCuts.json | 177 ++++++++++ .../1699353977-boojum/mainnet2/facets.json | 18 + .../1699353977-boojum/mainnet2/l2Upgrade.json | 323 ++++++++++++++++++ .../mainnet2/transactions.json | 235 +++++++++++++ infrastructure/protocol-upgrade/README.md | 2 +- .../protocol-upgrade/src/crypto/crypto.ts | 2 +- 8 files changed, 767 insertions(+), 3 deletions(-) create mode 100644 etc/upgrades/1699353977-boojum/mainnet2/crypto.json create mode 100644 etc/upgrades/1699353977-boojum/mainnet2/facetCuts.json create mode 100644 etc/upgrades/1699353977-boojum/mainnet2/facets.json create mode 100644 etc/upgrades/1699353977-boojum/mainnet2/l2Upgrade.json create mode 100644 etc/upgrades/1699353977-boojum/mainnet2/transactions.json diff --git a/contracts b/contracts index 3e2bee96e41..a8429e8ec10 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 3e2bee96e412bac7c0a58c4b919837b59e9af36e +Subproject commit a8429e8ec10cb43edef1b1e8bb9b4b480d09222d diff --git a/etc/upgrades/1699353977-boojum/mainnet2/crypto.json b/etc/upgrades/1699353977-boojum/mainnet2/crypto.json new file mode 100644 index 00000000000..e9d65005395 --- /dev/null +++ b/etc/upgrades/1699353977-boojum/mainnet2/crypto.json @@ -0,0 +1,11 @@ +{ + "verifier": { + "address": "0xB465882F67d236DcC0D090F78ebb0d838e9719D8", + "txHash": "0xf623007b5e569800c688b84d2549cba86e0780c1814a8b586ed93deb131337e0" + }, + "keys": { + "recursionNodeLevelVkHash": "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + "recursionLeafLevelVkHash": "0x14628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/mainnet2/facetCuts.json b/etc/upgrades/1699353977-boojum/mainnet2/facetCuts.json new file mode 100644 index 00000000000..90dfba4d42e --- /dev/null +++ b/etc/upgrades/1699353977-boojum/mainnet2/facetCuts.json @@ -0,0 +1,177 @@ +[ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x33ce93fe", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xe58bb639", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0x9e3Fa34a10619fEDd7aE40A3fb86FA515fcfd269", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 0, + "isFreezable": true + } +] \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/mainnet2/facets.json b/etc/upgrades/1699353977-boojum/mainnet2/facets.json new file mode 100644 index 00000000000..12f1457c603 --- /dev/null +++ b/etc/upgrades/1699353977-boojum/mainnet2/facets.json @@ -0,0 +1,18 @@ +{ + "ExecutorFacet": { + "address": "0x9e3Fa34a10619fEDd7aE40A3fb86FA515fcfd269", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "AdminFacet": { + "address": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "GettersFacet": { + "address": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "MailboxFacet": { + "address": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/mainnet2/l2Upgrade.json b/etc/upgrades/1699353977-boojum/mainnet2/l2Upgrade.json new file mode 100644 index 00000000000..19977b5cc2a --- /dev/null +++ b/etc/upgrades/1699353977-boojum/mainnet2/l2Upgrade.json @@ -0,0 +1,323 @@ +{ + "systemContracts": [ + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7" + ], + "address": "0x0000000000000000000000000000000000000000" + }, + { + "name": "Ecrecover", + "bytecodeHashes": [ + "0x010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c" + ], + "address": "0x0000000000000000000000000000000000000001" + }, + { + "name": "SHA256", + "bytecodeHashes": [ + "0x010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d" + ], + "address": "0x0000000000000000000000000000000000000002" + }, + { + "name": "EcAdd", + "bytecodeHashes": [ + "0x010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d433" + ], + "address": "0x0000000000000000000000000000000000000006" + }, + { + "name": "EcMul", + "bytecodeHashes": [ + "0x0100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba571350675" + ], + "address": "0x0000000000000000000000000000000000000007" + }, + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7" + ], + "address": "0x0000000000000000000000000000000000008001" + }, + { + "name": "AccountCodeStorage", + "bytecodeHashes": [ + "0x0100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c" + ], + "address": "0x0000000000000000000000000000000000008002" + }, + { + "name": "NonceHolder", + "bytecodeHashes": [ + "0x0100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd470" + ], + "address": "0x0000000000000000000000000000000000008003" + }, + { + "name": "KnownCodesStorage", + "bytecodeHashes": [ + "0x0100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e" + ], + "address": "0x0000000000000000000000000000000000008004" + }, + { + "name": "ImmutableSimulator", + "bytecodeHashes": [ + "0x01000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c5" + ], + "address": "0x0000000000000000000000000000000000008005" + }, + { + "name": "ContractDeployer", + "bytecodeHashes": [ + "0x010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb4212" + ], + "address": "0x0000000000000000000000000000000000008006" + }, + { + "name": "L1Messenger", + "bytecodeHashes": [ + "0x01000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa" + ], + "address": "0x0000000000000000000000000000000000008008" + }, + { + "name": "MsgValueSimulator", + "bytecodeHashes": [ + "0x0100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb" + ], + "address": "0x0000000000000000000000000000000000008009" + }, + { + "name": "L2EthToken", + "bytecodeHashes": [ + "0x01000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3" + ], + "address": "0x000000000000000000000000000000000000800a" + }, + { + "name": "SystemContext", + "bytecodeHashes": [ + "0x0100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436" + ], + "address": "0x000000000000000000000000000000000000800b" + }, + { + "name": "BootloaderUtilities", + "bytecodeHashes": [ + "0x010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0" + ], + "address": "0x000000000000000000000000000000000000800c" + }, + { + "name": "EventWriter", + "bytecodeHashes": [ + "0x01000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339" + ], + "address": "0x000000000000000000000000000000000000800d" + }, + { + "name": "Compressor", + "bytecodeHashes": [ + "0x010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496" + ], + "address": "0x000000000000000000000000000000000000800e" + }, + { + "name": "ComplexUpgrader", + "bytecodeHashes": [ + "0x0100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc" + ], + "address": "0x000000000000000000000000000000000000800f" + }, + { + "name": "Keccak256", + "bytecodeHashes": [ + "0x0100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a89" + ], + "address": "0x0000000000000000000000000000000000008010" + } + ], + "defaultAA": { + "name": "DefaultAccount", + "bytecodeHashes": [ + "0x01000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d" + ] + }, + "bootloader": { + "name": "Bootloader", + "bytecodeHashes": [ + "0x01000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b88" + ] + }, + "forcedDeployments": [ + { + "bytecodeHash": "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7", + "newAddress": "0x0000000000000000000000000000000000000000", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c", + "newAddress": "0x0000000000000000000000000000000000000001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d", + "newAddress": "0x0000000000000000000000000000000000000002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d433", + "newAddress": "0x0000000000000000000000000000000000000006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba571350675", + "newAddress": "0x0000000000000000000000000000000000000007", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c7", + "newAddress": "0x0000000000000000000000000000000000008001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c", + "newAddress": "0x0000000000000000000000000000000000008002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd470", + "newAddress": "0x0000000000000000000000000000000000008003", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e", + "newAddress": "0x0000000000000000000000000000000000008004", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c5", + "newAddress": "0x0000000000000000000000000000000000008005", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb4212", + "newAddress": "0x0000000000000000000000000000000000008006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa", + "newAddress": "0x0000000000000000000000000000000000008008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb", + "newAddress": "0x0000000000000000000000000000000000008009", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3", + "newAddress": "0x000000000000000000000000000000000000800a", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436", + "newAddress": "0x000000000000000000000000000000000000800b", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0", + "newAddress": "0x000000000000000000000000000000000000800c", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339", + "newAddress": "0x000000000000000000000000000000000000800d", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496", + "newAddress": "0x000000000000000000000000000000000000800e", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc", + "newAddress": "0x000000000000000000000000000000000000800f", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a89", + "newAddress": "0x0000000000000000000000000000000000008010", + "value": 0, + "input": "0x", + "callConstructor": false + } + ], + "forcedDeploymentCalldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "calldata": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "tx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "18", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + } +} \ No newline at end of file diff --git a/etc/upgrades/1699353977-boojum/mainnet2/transactions.json b/etc/upgrades/1699353977-boojum/mainnet2/transactions.json new file mode 100644 index 00000000000..cc1554ffeea --- /dev/null +++ b/etc/upgrades/1699353977-boojum/mainnet2/transactions.json @@ -0,0 +1,235 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": "18", + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x01000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b88", + "defaultAccountHash": "0x01000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d", + "verifier": "0xB465882F67d236DcC0D090F78ebb0d838e9719D8", + "verifierParams": { + "recursionNodeLevelVkHash": "0x5a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d080", + "recursionLeafLevelVkHash": "0x14628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x656d9c18" + }, + "factoryDeps": [], + "newProtocolVersion": "18", + "newAllowList": "0x0C0dC1171258694635AA50cec5845aC1031cA6d7" + }, + "l1upgradeCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c000000000000000000000000000000000000000000000000000000000656d9c1800000000000000000000000000000000000000000000000000000000000000120000000000000000000000000c0dc1171258694635aa50cec5845ac1031ca6d700000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0x567e1B57A80a7F048A7402191F96C62730e30dB2", + "protocolVersion": "18", + "diamondUpgradeProposalId": { + "type": "BigNumber", + "hex": "0x0b" + }, + "upgradeTimestamp": "1701682200", + "transparentUpgrade": { + "facetCuts": [ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x73fb9297", + "0x36d4eb84", + "0x27ae4c16", + "0x0551448c", + "0x8043760a", + "0xbeda4b12", + "0x17338945", + "0x587809c7" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0xfe10226d", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x1b60e626", + "0xe39d3bff", + "0x33ce93fe", + "0x0ef240a0", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0xa39980a0", + "0x46657fe9", + "0x18e3a941", + "0x3db920ce", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0c4dd810", + "0xce9dcf16", + "0x7739cbe7", + "0xa9a2d18a" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0xe58bb639", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x409560DE546e057ce5bD5dB487EdF2bB5E785baB", + "selectors": [ + "0x0e18b681", + "0xe58bb639", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0xf235757f", + "0x1cc5d103", + "0xbe6f11cf", + "0x4623c91d", + "0x17338945" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0xF3ACF6a03ea4a914B78Ec788624B25ceC37c14A4", + "selectors": [ + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0xa7cd63b7", + "0x79823c9a", + "0x4fc07d75", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0x8665b150", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x63b5EC36B09384fFA7106A80Ec7cfdFCa521fD08", + "selectors": [ + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0x9e3Fa34a10619fEDd7aE40A3fb86FA515fcfd269", + "selectors": [ + "0x701f58c5", + "0xc3d93e7c", + "0x7f61885c", + "0x97c09d34" + ], + "action": 0, + "isFreezable": true + } + ], + "initAddress": "0x567e1B57A80a7F048A7402191F96C62730e30dB2", + "initCalldata": "0x1ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c000000000000000000000000000000000000000000000000000000000656d9c1800000000000000000000000000000000000000000000000000000000000000120000000000000000000000000c0dc1171258694635aa50cec5845ac1031ca6d700000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "proposeTransparentUpgradeCalldata": "0x8043760a0000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b0000000000000000000000000000000000000000000000000000000000000060000000000000000000000000567e1b57a80a7f048a7402191f96c62730e30db200000000000000000000000000000000000000000000000000000000000015400000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000007c000000000000000000000000000000000000000000000000000000000000009200000000000000000000000000000000000000000000000000000000000000a400000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000d60000000000000000000000000000000000000000000000000000000000000124000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000023cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff0000000000000000000000000000000000000000000000000000000033ce93fe000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005e58bb63900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000000000000000000000000000409560de546e057ce5bd5db487edf2bb5e785bab000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000f3acf6a03ea4a914b78ec788624b25cec37c14a40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000022cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b70000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d0000000000000000000000000000000000000000000000000000000000000000000000000000000063b5ec36b09384ffa7106a80ec7cfdfca521fd0800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb672419000000000000000000000000000000000000000000000000000000000000000000000000000000009e3fa34a10619fedd7ae40a3fb86fa515fcfd2690000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000017041ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c000000000000000000000000000000000000000000000000000000000656d9c1800000000000000000000000000000000000000000000000000000000000000120000000000000000000000000c0dc1171258694635aa50cec5845ac1031ca6d700000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeUpgradeCalldata": "0x36d4eb84000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000567e1b57a80a7f048a7402191f96c62730e30db200000000000000000000000000000000000000000000000000000000000015400000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000007c000000000000000000000000000000000000000000000000000000000000009200000000000000000000000000000000000000000000000000000000000000a400000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000d60000000000000000000000000000000000000000000000000000000000000124000000000000000000000000000000000000000000000000000000000000013a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000873fb92970000000000000000000000000000000000000000000000000000000036d4eb840000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000000551448c000000000000000000000000000000000000000000000000000000008043760a00000000000000000000000000000000000000000000000000000000beda4b12000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000587809c70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000023cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b700000000000000000000000000000000000000000000000000000000fe10226d0000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b7000000000000000000000000000000000000000000000000000000001b60e62600000000000000000000000000000000000000000000000000000000e39d3bff0000000000000000000000000000000000000000000000000000000033ce93fe000000000000000000000000000000000000000000000000000000000ef240a000000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc500000000000000000000000000000000000000000000000000000000a39980a00000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a941000000000000000000000000000000000000000000000000000000003db920ce0000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000040c4dd81000000000000000000000000000000000000000000000000000000000ce9dcf16000000000000000000000000000000000000000000000000000000007739cbe700000000000000000000000000000000000000000000000000000000a9a2d18a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000005e58bb63900000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d00000000000000000000000000000000000000000000000000000000000000000000000000000000409560de546e057ce5bd5db487edf2bb5e785bab000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000a0e18b68100000000000000000000000000000000000000000000000000000000e58bb63900000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf500000000000000000000000000000000000000000000000000000000f235757f000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000000000000000000000000000f3acf6a03ea4a914b78ec788624b25cec37c14a40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000022cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed62700000000000000000000000000000000000000000000000000000000a7cd63b70000000000000000000000000000000000000000000000000000000079823c9a000000000000000000000000000000000000000000000000000000004fc07d7500000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da000000000000000000000000000000000000000000000000000000008665b15000000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d0000000000000000000000000000000000000000000000000000000000000000000000000000000063b5ec36b09384ffa7106a80ec7cfdfca521fd0800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000066c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb672419000000000000000000000000000000000000000000000000000000000000000000000000000000009e3fa34a10619fedd7ae40a3fb86fa515fcfd2690000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000004701f58c500000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000007f61885c0000000000000000000000000000000000000000000000000000000097c09d340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000017041ed824a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000168001000983d4ac4f797cf5c077e022f72284969b13248c2a8e9846f574bdeb5b8801000651c5ae96f2aab07d720439e42491bb44c6384015e3a08e32620a4d582d000000000000000000000000b465882f67d236dcc0d090f78ebb0d838e9719d85a3ef282b21e12fe1f4438e5bb158fc5060b160559c5158c6389d62d9fe3d08014628525c227822148e718ca1138acfc6d25e759e19452455d89f7f610c3dcb8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000016a000000000000000000000000000000000000000000000000000000000000016c000000000000000000000000000000000000000000000000000000000656d9c1800000000000000000000000000000000000000000000000000000000000000120000000000000000000000000c0dc1171258694635aa50cec5845ac1031ca6d700000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa200000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000001460000000000000000000000000000000000000000000000000000000000000148000000000000000000000000000000000000000000000000000000000000014a000000000000000000000000000000000000000000000000000000000000014c000000000000000000000000000000000000000000000000000000000000011c4e9f18c170000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001400000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000340000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000000005800000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000007c0000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000009400000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000f40000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010c001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000114daca2ff44f27d543b8ef67d885bfed09a74ba9cb25f5912dd3d739c00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000178d93b2d7d6448866009892223caf018a8e8dbcf090c2b9053a285f8d00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000c5a85a372f441ac693210a18e683b530bed875fdcab2f7e101b057d43300000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100013759b40792c2c3d033990e992e5508263c15252eb2d9bfbba57135067500000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000007271e9710c356751295d83a25ffec94be2b4ada01ec1fa04c7cd6f2c700000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100009bc0511159b5ec703d0c56f87615964017739def4ab1ee606b8ec6458c00000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100012fa73fa922dd9fabb40d3275ce80396eff6ccf1b452c928c17d98bd47000000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100008b0ca6c6f277035366e99407fbb4b01e743e80b7d24dea5a3d647b423e00000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000047a3c40e3f4eb98f14967f141452ae602d8723a10975dc33960911d8c500000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010006091341955c8f76409de00549fb00b275166b5a0d0d7b82cbd629bb421200000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000301c943edb65f5a0b8cdd806218b8ecf25c022720fe3afe6951f202f3fa00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006fa1591d93fcc4a25e9340ad11d0e825904cd1842b8f7255701e1aacbb00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000139b506af2b02225838c5a33e30ace701b44b210a422eedab7dd31c28a3000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100023ba65021e4689dd1755f82108214a1f25150d439fe58c55cdb1f376436000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010009759cab4fa9e6ca0784746e1df600ff523f0f90c1e94191755cab4b2ed0000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000019642d87621fdd82cf65aa9146486c9256d5f8849af9a37c78ef519339000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001b72874590239af612f65d50a35975299f88de022493fe7f0a190e79496000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005bfc0443349233459892b51e9f67e27ac828d44d9c7cba8c8285fd66bc000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100001fb52ca33668d01c230a1c3b13ede90fe2e37d77222410e9f183cb7a8900000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md index 6636dff8b0f..caa14236f8b 100644 --- a/infrastructure/protocol-upgrade/README.md +++ b/infrastructure/protocol-upgrade/README.md @@ -167,7 +167,7 @@ $ zk f yarn start l2-transaction force-deployment-calldata \ To deploy a new verifier, use the following command: ```bash -$ zk f yarn start crypto deploy-verifier +$ zk f yarn start crypto deploy-verifier \ --private-key \ --l1rpc \ --gas-price \ diff --git a/infrastructure/protocol-upgrade/src/crypto/crypto.ts b/infrastructure/protocol-upgrade/src/crypto/crypto.ts index 8c7d666473f..df7aa6bd44b 100644 --- a/infrastructure/protocol-upgrade/src/crypto/crypto.ts +++ b/infrastructure/protocol-upgrade/src/crypto/crypto.ts @@ -58,7 +58,7 @@ command command .command('deploy-verifier') - .option('--l1Rpc ') + .option('--l1rpc ') .option('--private-key ') .option('--create2-address ') .option('--nonce ') From 2de48256ebe028c6f95eb32e75339d8b05472c0f Mon Sep 17 00:00:00 2001 From: zksync-admin-bot2 <91326834+zksync-admin-bot2@users.noreply.github.com> Date: Wed, 6 Dec 2023 11:38:12 +0200 Subject: [PATCH 105/115] chore: Update generated Prover FRI CPU setup-data keys from branch main (#609) "Update generated Prover FRI CPU setup-data keys from branch main" --- prover/setup-data-cpu-keys.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/prover/setup-data-cpu-keys.json b/prover/setup-data-cpu-keys.json index 98a15d88038..1a0427e5346 100644 --- a/prover/setup-data-cpu-keys.json +++ b/prover/setup-data-cpu-keys.json @@ -1,5 +1,5 @@ { - "us": "gs://matterlabs-setup-data-us/fb5e9fd/", - "europe": "gs://matterlabs-setup-data-europe/fb5e9fd/", - "asia": "gs://matterlabs-setup-data-asia/fb5e9fd/" + "us": "gs://matterlabs-setup-data-us/e2e94ff/", + "europe": "gs://matterlabs-setup-data-europe/e2e94ff/", + "asia": "gs://matterlabs-setup-data-asia/e2e94ff/" } From 5cf7210dc77bb615944352f23ed39fad324b914f Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Wed, 6 Dec 2023 11:51:40 +0200 Subject: [PATCH 106/115] perf(external-node): Use async miniblock sealing in external IO (#611) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ External IO uses async miniblock sealing. ## Why ❔ Execution of transactions and miniblock sealing (writing data to postgres) happen in parallel so the perfomance is better. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- core/bin/external_node/src/config/mod.rs | 9 +++ core/bin/external_node/src/main.rs | 17 +++- .../src/state_keeper/io/mempool.rs | 3 + .../zksync_core/src/state_keeper/io/mod.rs | 11 +-- .../src/state_keeper/io/seal_logic.rs | 55 ++++++++++++- .../src/state_keeper/io/tests/mod.rs | 12 +++ .../zksync_core/src/state_keeper/metrics.rs | 7 +- core/lib/zksync_core/src/state_keeper/mod.rs | 6 +- .../src/state_keeper/updates/mod.rs | 10 +++ .../zksync_core/src/sync_layer/external_io.rs | 78 ++++--------------- core/lib/zksync_core/src/sync_layer/tests.rs | 6 +- spellcheck/era.dic | 1 + 12 files changed, 132 insertions(+), 83 deletions(-) diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 3f26a334ea3..c116201b91d 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -190,6 +190,11 @@ pub struct OptionalENConfig { /// Number of keys that is processed by enum_index migration in State Keeper each L1 batch. #[serde(default = "OptionalENConfig::default_enum_index_migration_chunk_size")] pub enum_index_migration_chunk_size: usize, + /// Capacity of the queue for asynchronous miniblock sealing. Once this many miniblocks are queued, + /// sealing will block until some of the miniblocks from the queue are processed. + /// 0 means that sealing is synchronous; this is mostly useful for performance comparison, testing etc. + #[serde(default = "OptionalENConfig::default_miniblock_seal_queue_capacity")] + pub miniblock_seal_queue_capacity: usize, } impl OptionalENConfig { @@ -288,6 +293,10 @@ impl OptionalENConfig { 5000 } + const fn default_miniblock_seal_queue_capacity() -> usize { + 10 + } + pub fn polling_interval(&self) -> Duration { Duration::from_millis(self.polling_interval) } diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 52f3353dc07..6324b0599a6 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -22,7 +22,10 @@ use zksync_core::{ }, reorg_detector::ReorgDetector, setup_sigint_handler, - state_keeper::{L1BatchExecutorBuilder, MainBatchExecutorBuilder, ZkSyncStateKeeper}, + state_keeper::{ + L1BatchExecutorBuilder, MainBatchExecutorBuilder, MiniblockSealer, MiniblockSealerHandle, + ZkSyncStateKeeper, + }, sync_layer::{ batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, fetcher::FetcherCursor, genesis::perform_genesis_if_needed, ActionQueue, MainNodeClient, SyncState, @@ -47,6 +50,7 @@ async fn build_state_keeper( connection_pool: ConnectionPool, sync_state: SyncState, l2_erc20_bridge_addr: Address, + miniblock_sealer_handle: MiniblockSealerHandle, stop_receiver: watch::Receiver, chain_id: L2ChainId, ) -> ZkSyncStateKeeper { @@ -73,6 +77,7 @@ async fn build_state_keeper( let main_node_client = ::json_rpc(&main_node_url) .expect("Failed creating JSON-RPC client for main node"); let io = ExternalIO::new( + miniblock_sealer_handle, connection_pool, action_queue, sync_state, @@ -106,6 +111,14 @@ async fn init_tasks( let sync_state = SyncState::new(); let (action_queue_sender, action_queue) = ActionQueue::new(); + + let mut task_handles = vec![]; + let (miniblock_sealer, miniblock_sealer_handle) = MiniblockSealer::new( + connection_pool.clone(), + config.optional.miniblock_seal_queue_capacity, + ); + task_handles.push(tokio::spawn(miniblock_sealer.run())); + let state_keeper = build_state_keeper( action_queue, config.required.state_cache_path.clone(), @@ -113,6 +126,7 @@ async fn init_tasks( connection_pool.clone(), sync_state.clone(), config.remote.l2_erc20_bridge_addr, + miniblock_sealer_handle, stop_receiver.clone(), config.remote.l2_chain_id, ) @@ -271,7 +285,6 @@ async fn init_tasks( healthchecks, ); - let mut task_handles = vec![]; if let Some(port) = config.optional.prometheus_port { let prometheus_task = PrometheusExporterConfig::pull(port).run(stop_receiver.clone()); task_handles.push(tokio::spawn(prometheus_task)); diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index f10ad87580c..1d3ad506df6 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -274,6 +274,8 @@ where self.current_l1_batch_number, self.current_miniblock_number, self.l2_erc20_bridge_addr, + None, + false, ); self.miniblock_sealer_handle.submit(command).await; self.current_miniblock_number += 1; @@ -323,6 +325,7 @@ where l1_batch_env, finished_batch, self.l2_erc20_bridge_addr, + None, ) .await; self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index 858c46b2e70..313c363418d 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -130,7 +130,7 @@ struct Completable { /// Handle for [`MiniblockSealer`] allowing to submit [`MiniblockSealCommand`]s. #[derive(Debug)] -pub(crate) struct MiniblockSealerHandle { +pub struct MiniblockSealerHandle { commands_sender: mpsc::Sender>, latest_completion_receiver: Option>, // If true, `submit()` will wait for the operation to complete. @@ -144,7 +144,7 @@ impl MiniblockSealerHandle { /// /// If there are currently too many unprocessed commands, this method will wait until /// enough of them are processed (i.e., there is back pressure). - pub async fn submit(&mut self, command: MiniblockSealCommand) { + pub(crate) async fn submit(&mut self, command: MiniblockSealCommand) { let miniblock_number = command.miniblock_number; tracing::debug!( "Enqueuing sealing command for miniblock #{miniblock_number} with #{} txs (L1 batch #{})", @@ -209,7 +209,7 @@ impl MiniblockSealerHandle { /// Component responsible for sealing miniblocks (i.e., storing their data to Postgres). #[derive(Debug)] -pub(crate) struct MiniblockSealer { +pub struct MiniblockSealer { pool: ConnectionPool, is_sync: bool, // Weak sender handle to get queue capacity stats. @@ -220,10 +220,7 @@ pub(crate) struct MiniblockSealer { impl MiniblockSealer { /// Creates a sealer that will use the provided Postgres connection and will have the specified /// `command_capacity` for unprocessed sealing commands. - pub(crate) fn new( - pool: ConnectionPool, - mut command_capacity: usize, - ) -> (Self, MiniblockSealerHandle) { + pub fn new(pool: ConnectionPool, mut command_capacity: usize) -> (Self, MiniblockSealerHandle) { let is_sync = command_capacity == 0; command_capacity = command_capacity.max(1); diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index ca2dc641909..4501be62f78 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -8,6 +8,7 @@ use std::{ }; use multivm::interface::{FinishedL1Batch, L1BatchEnv}; +use zksync_dal::blocks_dal::ConsensusBlockFields; use zksync_dal::StorageProcessor; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_types::{ @@ -18,14 +19,18 @@ use zksync_types::{ use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, event::{extract_added_tokens, extract_long_l2_to_l1_messages}, + l1::L1Tx, + l2::L2Tx, + protocol_version::ProtocolUpgradeTx, storage_writes_deduplicator::{ModifiedSlot, StorageWritesDeduplicator}, tx::{ tx_execution_info::DeduplicatedWritesMetrics, IncludedTxLocation, TransactionExecutionResult, }, zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, - AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber, LogQuery, MiniblockNumber, - StorageKey, StorageLog, StorageLogQuery, StorageValue, Transaction, VmEvent, H256, + AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, LogQuery, + MiniblockNumber, StorageKey, StorageLog, StorageLogQuery, StorageValue, Transaction, VmEvent, + H256, }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::{h256_to_u256, time::millis_since_epoch, u256_to_h256}; @@ -50,6 +55,7 @@ impl UpdatesManager { l1_batch_env: &L1BatchEnv, finished_batch: FinishedL1Batch, l2_erc20_bridge_addr: Address, + consensus: Option, ) { let started_at = Instant::now(); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::VmFinalization); @@ -63,6 +69,8 @@ impl UpdatesManager { l1_batch_env.number, current_miniblock_number, l2_erc20_bridge_addr, + consensus, + false, // fictive miniblocks don't have txs, so it's fine to pass `false` here. ); miniblock_command.seal_inner(&mut transaction, true).await; progress.observe(None); @@ -274,6 +282,36 @@ impl MiniblockSealCommand { async fn seal_inner(&self, storage: &mut StorageProcessor<'_>, is_fictive: bool) { self.assert_valid_miniblock(is_fictive); + let mut transaction = storage.start_transaction().await.unwrap(); + if self.pre_insert_txs { + let progress = MINIBLOCK_METRICS.start(MiniblockSealStage::PreInsertTxs, is_fictive); + for tx in &self.miniblock.executed_transactions { + if let Ok(l1_tx) = L1Tx::try_from(tx.transaction.clone()) { + let l1_block_number = L1BlockNumber(l1_tx.common_data.eth_block as u32); + transaction + .transactions_dal() + .insert_transaction_l1(l1_tx, l1_block_number) + .await; + } else if let Ok(l2_tx) = L2Tx::try_from(tx.transaction.clone()) { + // Using `Default` for execution metrics should be OK here, since this data is not used on the EN. + transaction + .transactions_dal() + .insert_transaction_l2(l2_tx, Default::default()) + .await; + } else if let Ok(protocol_system_upgrade_tx) = + ProtocolUpgradeTx::try_from(tx.transaction.clone()) + { + transaction + .transactions_dal() + .insert_system_transaction(protocol_system_upgrade_tx) + .await; + } else { + unreachable!("Transaction {:?} is neither L1 nor L2", tx.transaction); + } + } + progress.observe(Some(self.miniblock.executed_transactions.len())); + } + let l1_batch_number = self.l1_batch_number; let miniblock_number = self.miniblock_number; let started_at = Instant::now(); @@ -291,7 +329,6 @@ impl MiniblockSealCommand { event_count = self.miniblock.events.len() ); - let mut transaction = storage.start_transaction().await.unwrap(); let miniblock_header = MiniblockHeader { number: miniblock_number, timestamp: self.miniblock.timestamp, @@ -404,6 +441,18 @@ impl MiniblockSealCommand { .await; progress.observe(user_l2_to_l1_log_count); + let progress = MINIBLOCK_METRICS.start(MiniblockSealStage::InsertConsensus, is_fictive); + // We want to add miniblock consensus fields atomically with the miniblock data so that we + // don't need to deal with corner cases (e.g., a miniblock w/o consensus fields). + if let Some(consensus) = &self.consensus { + transaction + .blocks_dal() + .set_miniblock_consensus_fields(self.miniblock_number, consensus) + .await + .unwrap(); + } + progress.observe(None); + let progress = MINIBLOCK_METRICS.start(MiniblockSealStage::CommitMiniblock, is_fictive); let current_l2_virtual_block_info = transaction .storage_dal() diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 0c13a7a614b..2b924554f27 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -245,6 +245,8 @@ async fn processing_storage_logs_when_sealing_miniblock() { base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), l2_erc20_bridge_addr: Address::default(), + consensus: None, + pre_insert_txs: false, }; let mut conn = connection_pool .access_storage_tagged("state_keeper") @@ -321,6 +323,8 @@ async fn processing_events_when_sealing_miniblock() { base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), l2_erc20_bridge_addr: Address::default(), + consensus: None, + pre_insert_txs: false, }; let mut conn = pool.access_storage_tagged("state_keeper").await.unwrap(); conn.protocol_versions_dal() @@ -434,6 +438,8 @@ async fn miniblock_sealer_handle_blocking() { L1BatchNumber(1), MiniblockNumber(1), Address::default(), + None, + false, ); sealer_handle.submit(seal_command).await; @@ -442,6 +448,8 @@ async fn miniblock_sealer_handle_blocking() { L1BatchNumber(1), MiniblockNumber(2), Address::default(), + None, + false, ); { let submit_future = sealer_handle.submit(seal_command); @@ -470,6 +478,8 @@ async fn miniblock_sealer_handle_blocking() { L1BatchNumber(2), MiniblockNumber(3), Address::default(), + None, + false, ); sealer_handle.submit(seal_command).await; let command = sealer.commands_receiver.recv().await.unwrap(); @@ -489,6 +499,8 @@ async fn miniblock_sealer_handle_parallel_processing() { L1BatchNumber(1), MiniblockNumber(i), Address::default(), + None, + false, ); sealer_handle.submit(seal_command).await; } diff --git a/core/lib/zksync_core/src/state_keeper/metrics.rs b/core/lib/zksync_core/src/state_keeper/metrics.rs index f3f43324320..72b89c4a2b8 100644 --- a/core/lib/zksync_core/src/state_keeper/metrics.rs +++ b/core/lib/zksync_core/src/state_keeper/metrics.rs @@ -168,7 +168,6 @@ pub(super) enum L1BatchSealStage { FilterWrittenSlots, InsertInitialWrites, CommitL1Batch, - ExternalNodeStoreTransactions, } /// Buckets for positive integer, not-so-large values (e.g., initial writes count). @@ -221,10 +220,6 @@ impl L1BatchMetrics { latency_per_unit: &self.sealed_entity_per_unit[&stage], } } - - pub(crate) fn start_storing_on_en(&self) -> LatencyObserver<'_> { - self.sealed_time_stage[&L1BatchSealStage::ExternalNodeStoreTransactions].start() - } } #[vise::register] @@ -241,6 +236,7 @@ pub(super) enum MiniblockQueueStage { #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] #[metrics(rename_all = "snake_case")] pub(super) enum MiniblockSealStage { + PreInsertTxs, InsertMiniblockHeader, MarkTransactionsInMiniblock, InsertStorageLogs, @@ -253,6 +249,7 @@ pub(super) enum MiniblockSealStage { InsertEvents, ExtractL2ToL1Logs, InsertL2ToL1Logs, + InsertConsensus, CommitMiniblock, } diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index bdc1f90e206..5ec395267df 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -24,14 +24,14 @@ pub(crate) mod updates; pub use self::{ batch_executor::{L1BatchExecutorBuilder, MainBatchExecutorBuilder}, + io::{MiniblockSealer, MiniblockSealerHandle}, keeper::ZkSyncStateKeeper, }; pub(crate) use self::{ - io::MiniblockSealer, mempool_actor::MempoolFetcher, seal_criteria::ConditionalSealer, - types::MempoolGuard, + mempool_actor::MempoolFetcher, seal_criteria::ConditionalSealer, types::MempoolGuard, }; -use self::io::{MempoolIO, MiniblockSealerHandle}; +use self::io::MempoolIO; use crate::l1_gas_price::L1GasPriceProvider; #[allow(clippy::too_many_arguments)] diff --git a/core/lib/zksync_core/src/state_keeper/updates/mod.rs b/core/lib/zksync_core/src/state_keeper/updates/mod.rs index dc72893e703..3f09f7be30b 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/mod.rs @@ -1,6 +1,7 @@ use multivm::interface::{L1BatchEnv, VmExecutionResultAndLogs}; use zksync_contracts::BaseSystemContractsHashes; +use zksync_dal::blocks_dal::ConsensusBlockFields; use zksync_types::vm_trace::Call; use zksync_types::{ block::BlockGasCount, storage_writes_deduplicator::StorageWritesDeduplicator, @@ -81,6 +82,8 @@ impl UpdatesManager { l1_batch_number: L1BatchNumber, miniblock_number: MiniblockNumber, l2_erc20_bridge_addr: Address, + consensus: Option, + pre_insert_txs: bool, ) -> MiniblockSealCommand { MiniblockSealCommand { l1_batch_number, @@ -93,6 +96,8 @@ impl UpdatesManager { base_system_contracts_hashes: self.base_system_contract_hashes, protocol_version: Some(self.protocol_version), l2_erc20_bridge_addr, + consensus, + pre_insert_txs, } } @@ -172,6 +177,11 @@ pub(crate) struct MiniblockSealCommand { pub base_system_contracts_hashes: BaseSystemContractsHashes, pub protocol_version: Option, pub l2_erc20_bridge_addr: Address, + pub consensus: Option, + /// Whether transactions should be pre-inserted to DB. + /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB + /// before they are included into miniblocks. + pub pre_insert_txs: bool, } #[cfg(test)] diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 8e3ca863072..4e870b95674 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -1,20 +1,14 @@ use async_trait::async_trait; use futures::future; -use std::{ - collections::HashMap, - convert::{TryFrom, TryInto}, - iter::FromIterator, - time::Duration, -}; +use std::{collections::HashMap, convert::TryInto, iter::FromIterator, time::Duration}; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::ConnectionPool; use zksync_types::{ - ethabi::Address, l1::L1Tx, l2::L2Tx, protocol_version::ProtocolUpgradeTx, - witness_block_state::WitnessBlockState, L1BatchNumber, L1BlockNumber, L2ChainId, - MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, + ethabi::Address, protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, + L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, }; use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; @@ -29,9 +23,9 @@ use crate::{ extractors, io::{ common::{l1_batch_params, load_pending_batch, poll_iters}, - MiniblockParams, PendingBatchData, StateKeeperIO, + MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, - metrics::{KEEPER_METRICS, L1_BATCH_METRICS}, + metrics::KEEPER_METRICS, seal_criteria::IoSealCriteria, updates::UpdatesManager, }, @@ -48,6 +42,7 @@ const POLL_INTERVAL: Duration = Duration::from_millis(100); /// to the one in the mempool IO (which is used in the main node). #[derive(Debug)] pub struct ExternalIO { + miniblock_sealer_handle: MiniblockSealerHandle, pool: ConnectionPool, current_l1_batch_number: L1BatchNumber, @@ -64,7 +59,9 @@ pub struct ExternalIO { } impl ExternalIO { + #[allow(clippy::too_many_arguments)] pub async fn new( + miniblock_sealer_handle: MiniblockSealerHandle, pool: ConnectionPool, actions: ActionQueue, sync_state: SyncState, @@ -95,6 +92,7 @@ impl ExternalIO { sync_state.set_local_block(last_miniblock_number); Self { + miniblock_sealer_handle, pool, current_l1_batch_number: last_sealed_l1_batch_header.number + 1, current_miniblock_number: last_miniblock_number + 1, @@ -459,56 +457,15 @@ impl StateKeeperIO for ExternalIO { panic!("State keeper requested to seal miniblock, but the next action is {action:?}"); }; - let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let mut transaction = storage.start_transaction().await.unwrap(); - - let store_latency = L1_BATCH_METRICS.start_storing_on_en(); - // We don't store the transactions in the database until they're executed to not overcomplicate the state - // recovery on restart. So we have to store them here. - for tx in &updates_manager.miniblock.executed_transactions { - if let Ok(l1_tx) = L1Tx::try_from(tx.transaction.clone()) { - let l1_block_number = L1BlockNumber(l1_tx.common_data.eth_block as u32); - transaction - .transactions_dal() - .insert_transaction_l1(l1_tx, l1_block_number) - .await; - } else if let Ok(l2_tx) = L2Tx::try_from(tx.transaction.clone()) { - // Using `Default` for execution metrics should be OK here, since this data is not used on the EN. - transaction - .transactions_dal() - .insert_transaction_l2(l2_tx, Default::default()) - .await; - } else if let Ok(protocol_system_upgrade_tx) = - ProtocolUpgradeTx::try_from(tx.transaction.clone()) - { - transaction - .transactions_dal() - .insert_system_transaction(protocol_system_upgrade_tx) - .await; - } else { - unreachable!("Transaction {:?} is neither L1 nor L2", tx.transaction); - } - } - store_latency.observe(); - // Now transactions are stored, and we may mark them as executed. let command = updates_manager.seal_miniblock_command( self.current_l1_batch_number, self.current_miniblock_number, self.l2_erc20_bridge_addr, + consensus, + true, ); - command.seal(&mut transaction).await; - - // We want to add miniblock consensus fields atomically with the miniblock data so that we - // don't need to deal with corner cases (e.g., a miniblock w/o consensus fields). - if let Some(consensus) = &consensus { - transaction - .blocks_dal() - .set_miniblock_consensus_fields(self.current_miniblock_number, consensus) - .await - .unwrap(); - } - transaction.commit().await.unwrap(); + self.miniblock_sealer_handle.submit(command).await; self.sync_state .set_local_block(self.current_miniblock_number); @@ -531,6 +488,9 @@ impl StateKeeperIO for ExternalIO { ); }; + // We cannot start sealing an L1 batch until we've sealed all miniblocks included in it. + self.miniblock_sealer_handle.wait_for_all_commands().await; + let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); let mut transaction = storage.start_transaction().await.unwrap(); updates_manager @@ -540,15 +500,9 @@ impl StateKeeperIO for ExternalIO { l1_batch_env, finished_batch, self.l2_erc20_bridge_addr, + consensus, ) .await; - if let Some(consensus) = &consensus { - transaction - .blocks_dal() - .set_miniblock_consensus_fields(self.current_miniblock_number, consensus) - .await - .unwrap(); - } transaction.commit().await.unwrap(); tracing::info!("Batch {} is sealed", self.current_l1_batch_number); diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 20bafc51cf6..10582c7d9f9 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -22,7 +22,7 @@ use crate::{ genesis::{ensure_genesis_state, GenesisParams}, state_keeper::{ tests::{create_l1_batch_metadata, create_l2_transaction, TestBatchExecutorBuilder}, - ZkSyncStateKeeper, + MiniblockSealer, ZkSyncStateKeeper, }, }; @@ -156,7 +156,11 @@ impl StateKeeperHandles { ensure_genesis(&mut pool.access_storage().await.unwrap()).await; let sync_state = SyncState::new(); + let (miniblock_sealer, miniblock_sealer_handle) = MiniblockSealer::new(pool.clone(), 5); + tokio::spawn(miniblock_sealer.run()); + let io = ExternalIO::new( + miniblock_sealer_handle, pool, actions, sync_state.clone(), diff --git a/spellcheck/era.dic b/spellcheck/era.dic index 13dd303a3dc..e56162fcf02 100644 --- a/spellcheck/era.dic +++ b/spellcheck/era.dic @@ -276,6 +276,7 @@ versa blake2 AR16MT Preimages +EN's // Names Vyper From 66e76b5f6baa7a7e7bea434376aeaa9f42701898 Mon Sep 17 00:00:00 2001 From: Jean <148654781+oxJean@users.noreply.github.com> Date: Wed, 6 Dec 2023 17:56:20 +0800 Subject: [PATCH 107/115] chore: fix document path (#615) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ fix document path ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Aleksanov --- docs/advanced/how_call_works.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/advanced/how_call_works.md b/docs/advanced/how_call_works.md index 4ba4859c410..178a95b3239 100644 --- a/docs/advanced/how_call_works.md +++ b/docs/advanced/how_call_works.md @@ -69,7 +69,8 @@ opcodes similar to EVM, but operates on registers rather than a stack. We have t 'pure rust' without circuits (in the zk_evm repository), and the other has circuits (in the sync_vm repository). In this example, the api server uses the 'zk_evm' implementation without circuits. -Most of the code that the server uses to interact with the VM is in [core/lib/vm/src/vm.rs][vm_code]. +Most of the code that the server uses to interact with the VM is in +[core/lib/multivm/src/versions/vm_latest/implementation/execution.rs][vm_code]. In this line, we're calling self.state.cycle(), which executes a single VM instruction. You can see that we do a lot of things around this, such as executing multiple tracers after each instruction. This allows us to debug and provide From 66af65029428d38a80ccfca29a0b5eecb1938e13 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Wed, 6 Dec 2023 15:31:06 +0400 Subject: [PATCH 108/115] chore: Remove era-reviewers from codeowners (#618) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removes era-reviewers group from codeowners. ## Why ❔ - Too noisy. - We have internal processes for that anyways. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- CODEOWNERS | 1 - 1 file changed, 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 8cde1cc1ade..eea7f1fa137 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,4 +1,3 @@ -* @matter-labs/era-reviewers .github/release-please/** @RomanBrodetski @perekopskiy @Deniallugo @popzxc **/CHANGELOG.md @RomanBrodetski @perekopskiy @Deniallugo @popzxc CODEOWNERS @RomanBrodetski @perekopskiy @Deniallugo @popzxc From c4dc1e1d5b0e88cf22679aea46287b5725fcc9bc Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Wed, 6 Dec 2023 13:33:28 +0100 Subject: [PATCH 109/115] chore(main): release core 18.6.1 (#616) :robot: I have created a release *beep* *boop* --- ## [18.6.1](https://github.com/matter-labs/zksync-era/compare/core-v18.6.0...core-v18.6.1) (2023-12-06) ### Performance Improvements * **external-node:** Use async miniblock sealing in external IO ([#611](https://github.com/matter-labs/zksync-era/issues/611)) ([5cf7210](https://github.com/matter-labs/zksync-era/commit/5cf7210dc77bb615944352f23ed39fad324b914f)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- .github/release-please/manifest.json | 2 +- core/CHANGELOG.md | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index bce64523644..5fa24197fc7 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { "sdk/zksync-rs": "0.4.0", - "core": "18.6.0", + "core": "18.6.1", "prover": "10.0.0" } diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 41439ca4651..77773c81ba7 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## [18.6.1](https://github.com/matter-labs/zksync-era/compare/core-v18.6.0...core-v18.6.1) (2023-12-06) + + +### Performance Improvements + +* **external-node:** Use async miniblock sealing in external IO ([#611](https://github.com/matter-labs/zksync-era/issues/611)) ([5cf7210](https://github.com/matter-labs/zksync-era/commit/5cf7210dc77bb615944352f23ed39fad324b914f)) + ## [18.6.0](https://github.com/matter-labs/zksync-era/compare/core-v18.5.0...core-v18.6.0) (2023-12-05) From c7d4315fcc2711b7a6435c1414f61896d85ede8f Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 6 Dec 2023 19:45:54 +0700 Subject: [PATCH 110/115] feat(hyperchain-wizard): zkStack CLI GPU support (#612) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Support for creating zk hyperchain via zk cli with GPU-based provers ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [X] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --------- Co-authored-by: Igor Borodin --- docker-compose-zkstack-common.yml | 35 ++++++ ...=> docker-compose-hyperchain-template.hbs} | 81 +++++++++++--- infrastructure/zk/src/hyperchain_wizard.ts | 104 +++++++++++++----- infrastructure/zk/src/prover_setup.ts | 72 ++++++++---- infrastructure/zk/src/up.ts | 8 +- 5 files changed, 230 insertions(+), 70 deletions(-) create mode 100644 docker-compose-zkstack-common.yml rename etc/hyperchains/{docker-compose-hyperchain-template => docker-compose-hyperchain-template.hbs} (70%) diff --git a/docker-compose-zkstack-common.yml b/docker-compose-zkstack-common.yml new file mode 100644 index 00000000000..5d92de5d31d --- /dev/null +++ b/docker-compose-zkstack-common.yml @@ -0,0 +1,35 @@ +version: '3.2' +networks: + zkstack: + driver: bridge +services: + geth: + image: "matterlabs/geth:latest" + ports: + - "127.0.0.1:8545:8545" + - "127.0.0.1:8546:8546" + volumes: + - type: bind + source: ./volumes/geth + target: /var/lib/geth/data + networks: + - zkstack + container_name: geth + postgres: + image: "postgres:14" + container_name: postgres + ports: + - "127.0.0.1:5432:5432" + volumes: + - type: bind + source: ./volumes/postgres + target: /var/lib/postgresql/data + environment: + # We bind only to 127.0.0.1, so setting insecure password is acceptable here + - POSTGRES_PASSWORD=notsecurepassword + command: + - "postgres" + - "-c" + - "max_connections=1000" + networks: + - zkstack diff --git a/etc/hyperchains/docker-compose-hyperchain-template b/etc/hyperchains/docker-compose-hyperchain-template.hbs similarity index 70% rename from etc/hyperchains/docker-compose-hyperchain-template rename to etc/hyperchains/docker-compose-hyperchain-template.hbs index 00cb0ebc2a7..a1b4f92e3ab 100644 --- a/etc/hyperchains/docker-compose-hyperchain-template +++ b/etc/hyperchains/docker-compose-hyperchain-template.hbs @@ -1,13 +1,14 @@ version: '3.2' networks: - zkstack: - driver: bridge + zksync-era_zkstack: + external: true volumes: artifacts: services: zkstack-core: + container_name: zkstack-core networks: - - zkstack + - zksync-era_zkstack image: {{orgName}}/server-v2:latest command: ["--components", "tree_new,eth,data_fetcher,state_keeper,housekeeper,proof_data_handler"] healthcheck: @@ -24,20 +25,23 @@ services: - "3320:3320" # proof_data_handler api volumes: - artifacts:{{artifactsPath}} + zkstack-apis: networks: - - zkstack + - zksync-era_zkstack image: {{orgName}}/server-v2:latest command: ["--components", "http_api,ws_api"] env_file: - {{envFilePath}} environment: ZKSYNC_HOME: / + FRI_PROVER_GATEWAY_API_URL: http://zkstack-core:3320 ports: # assumes default ports in .env - "3071:3071" # health - "3312:3312" # prometheus metrics # we need a separate metrics port for each component - "3050:3050" # http_api - "3051:3051" # ws_api + {{#if hasProver}} # System requirements for CPU proving: # ~16+ CPU cores @@ -46,7 +50,7 @@ services: # - (PRO-47): Figure out how to properly set metrics ports for each service in env zkstack-prover-fri-gateway: networks: - - zkstack + - zksync-era_zkstack image: matterlabs/prover-fri-gateway:latest depends_on: zkstack-core: @@ -62,7 +66,7 @@ services: - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} zkstack-witness-generator-basic-circuits: networks: - - zkstack + - zksync-era_zkstack image: matterlabs/witness-generator:latest command: ["--round", "basic_circuits"] env_file: @@ -72,9 +76,10 @@ services: volumes: - artifacts:{{artifactsPath}} - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} + zkstack-witness-generator-leaf-aggregation: networks: - - zkstack + - zksync-era_zkstack image: matterlabs/witness-generator:latest command: ["--round", "leaf_aggregation"] env_file: @@ -84,9 +89,10 @@ services: volumes: - artifacts:{{artifactsPath}} - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} + zkstack-witness-generator-node-aggregation: networks: - - zkstack + - zksync-era_zkstack image: matterlabs/witness-generator:latest command: ["--round", "node_aggregation"] env_file: @@ -96,9 +102,10 @@ services: volumes: - artifacts:{{artifactsPath}} - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} + zkstack-witness-generator-scheduler: networks: - - zkstack + - zksync-era_zkstack image: matterlabs/witness-generator:latest command: ["--round", "scheduler"] env_file: @@ -108,10 +115,11 @@ services: volumes: - artifacts:{{artifactsPath}} - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} - zkstack-prover-fri: + + zkstack-proof-fri-compressor: networks: - - zkstack - image: matterlabs/prover-fri:latest + - zksync-era_zkstack + image: matterlabs/proof-fri-compressor:latest env_file: - {{envFilePath}} # ports: # assumes default ports in .env @@ -119,16 +127,57 @@ services: volumes: - artifacts:{{artifactsPath}} - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} - zkstack-proof-fri-compressor: + witness-vector-generator: networks: - - zkstack - image: matterlabs/proof-fri-compressor:latest + - zksync-era_zkstack + image: matterlabs/witness-vector-generator:latest + restart: always env_file: - {{envFilePath}} + deploy: + mode: replicated + replicas: {{witnessVectorGensCount}} # ports: # assumes default ports in .env # - "3312:3312" # prometheus metrics volumes: - artifacts:{{artifactsPath}} - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} {{/if}} - \ No newline at end of file + {{#ifAnd hasProver hasCPUProver}} + zkstack-prover-cpu-fri: + networks: + - zksync-era_zkstack + image: matterlabs/prover-fri:latest + env_file: + - {{envFilePath}} + # - "3312:3312" # prometheus metrics + volumes: + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} + {{/ifAnd}} + {{#ifAnd hasProver hasGPUProver}} + zkstack-prover-gpu-fri: + networks: + - zksync-era_zkstack + {{#if needBuildProver}} + build: # Needed for anything that is not NVIDIA CUDA_ARCH 89 + dockerfile: ./docker/prover-gpu-fri/Dockerfile + args: + CUDA_ARCH: {{cudaArch}} + {{else}} + image: matterlabs/prover-gpu-fri:latest # Only works for NVIDIA CUDA_ARCH 89 + {{/if}} + env_file: + - {{envFilePath}} + # - "3312:3312" # prometheus metrics + volumes: + - artifacts:{{artifactsPath}} + - {{proverSetupArtifacts}}:{{proverSetupArtifacts}} + security_opt: # HACK: Might work on vanilla Ubuntu distros without this + - seccomp:unconfined + deploy: + resources: + reservations: + devices: + - capabilities: [gpu] + {{/ifAnd}} diff --git a/infrastructure/zk/src/hyperchain_wizard.ts b/infrastructure/zk/src/hyperchain_wizard.ts index 1fdc191ec92..05997127ca8 100644 --- a/infrastructure/zk/src/hyperchain_wizard.ts +++ b/infrastructure/zk/src/hyperchain_wizard.ts @@ -30,7 +30,7 @@ enum BaseNetwork { enum ProverTypeOption { NONE = 'No (this hyperchain is for testing purposes only)', CPU = 'Yes - With a CPU implementation', - GPU = 'Yes - With a GPU implementation (Coming soon)' + GPU = 'Yes - With a GPU implementation' } export interface BasePromptOptions { @@ -43,6 +43,9 @@ export interface BasePromptOptions { skip?: ((state: object) => boolean | Promise) | boolean; } +// PLA:681 +let isLocalhost = false; + // An init command that allows configuring and spinning up a new hyperchain network. async function initHyperchain() { await announced('Initializing hyperchain creation', setupConfiguration()); @@ -70,6 +73,15 @@ async function initHyperchain() { await init(initArgs); + // if we used matterlabs/geth network, we need custom ENV file for hyperchain compose parts + // This breaks `zk status prover` command, but neccessary for working in isolated docker-network + // TODO: Think about better implementation + // PLA:681 + if (isLocalhost) { + wrapEnvModify('ETH_CLIENT_WEB3_URL', 'http://geth:8545'); + wrapEnvModify('DATABASE_URL', 'postgres://postgres:notsecurepassword@postgres:5432/zksync_local'); + } + env.mergeInitToEnv(); console.log(announce(`\nYour hyperchain configuration is available at ${process.env.ENV_FILE}\n`)); @@ -251,9 +263,12 @@ async function setHyperchainMetadata() { feeReceiverAddress = keyResults.feeReceiver; } } else { + // PLA:681 + isLocalhost = true; l1Rpc = 'http://localhost:8545'; l1Id = 9; - databaseUrl = 'postgres://postgres@localhost/zksync_local'; + databaseUrl = 'postgres://postgres:notsecurepassword@localhost:5432/zksync_local'; + wrapEnvModify('DATABASE_URL', databaseUrl); const richWalletsRaw = await fetch( 'https://raw.githubusercontent.com/matter-labs/local-setup/main/rich-wallets.json' @@ -267,7 +282,7 @@ async function setHyperchainMetadata() { feeReceiver = undefined; feeReceiverAddress = richWallets[3].address; - await up(); + await up('docker-compose-zkstack-common.yml'); await announced('Ensuring databases are up', db.wait()); } @@ -318,7 +333,8 @@ async function setHyperchainMetadata() { await compileConfig(environment); env.set(environment); - + // TODO: Generate url for data-compressor with selected region or fix env variable for keys location + // PLA-595 wrapEnvModify('DATABASE_URL', databaseUrl); wrapEnvModify('ETH_CLIENT_CHAIN_ID', l1Id.toString()); wrapEnvModify('ETH_CLIENT_WEB3_URL', l1Rpc); @@ -362,23 +378,6 @@ async function setupHyperchainProver() { proverType = proverResults.prover; - if (proverType === ProverTypeOption.GPU) { - const gpuQuestions: BasePromptOptions[] = [ - { - message: 'GPU prover is not yet available. Do you want to use the CPU implementation?', - name: 'prover', - type: 'confirm', - required: true - } - ]; - - const gpuResults: any = await enquirer.prompt(gpuQuestions); - - if (gpuResults.prover) { - proverType = ProverTypeOption.CPU; - } - } - switch (proverType) { case ProverTypeOption.NONE: wrapEnvModify('ETH_SENDER_SENDER_PROOF_SENDING_MODE', 'SkipEveryProof'); @@ -396,9 +395,12 @@ function printAddressInfo(name: string, address: string) { } async function initializeTestERC20s() { + // TODO: For now selecting NO breaks server-core deployment, should be always YES or create empty-mock file for v2-core + // PLA-595 const questions: BasePromptOptions[] = [ { - message: 'Do you want to deploy some test ERC20s to your hyperchain (only use on testing scenarios)?', + message: + 'Do you want to deploy some test ERC20s to your hyperchain? NB: Temporary broken, always select YES', name: 'deployERC20s', type: 'confirm' } @@ -410,7 +412,7 @@ async function initializeTestERC20s() { wrapEnvModify('DEPLOY_TEST_TOKENS', 'true'); console.log( warning( - `The addresses for the tokens will be available at the /etc/tokens/${getEnv( + `The addresses for the generated test ECR20 tokens will be available at the /etc/tokens/${getEnv( process.env.CHAIN_ETH_NETWORK! )}.json file.` ) @@ -474,7 +476,7 @@ async function initializeWethTokenForHyperchain() { async function startServer() { const YES_DEFAULT = 'Yes (default components)'; const YES_CUSTOM = 'Yes (custom components)'; - const NO = 'Not right now'; + const NO = 'Not right now (you can now configure prover, generate docker files, or just run the server later)'; const questions: BasePromptOptions[] = [ { @@ -651,10 +653,8 @@ async function generateDockerImages(cmd: Command) { async function _generateDockerImages(_orgName?: string) { console.log(warning(`\nThis process will build the docker images and it can take a while. Please be patient.\n`)); - const envName = await selectHyperchainConfiguration(); env.set(envName); - const orgName = _orgName || envName; await docker.customBuildForHyperchain('server-v2', orgName); @@ -662,7 +662,12 @@ async function _generateDockerImages(_orgName?: string) { console.log(warning(`\nDocker image for server created: Server image: ${orgName}/server-v2:latest\n`)); let hasProver = false; + let hasGPUProver = false; + let hasCPUProver = false; + let needBuildProver = false; let artifactsPath, proverSetupArtifacts; + let witnessVectorGensCount = 0; + let cudaArch = ''; if (process.env.ETH_SENDER_SENDER_PROOF_SENDING_MODE !== 'SkipEveryProof') { hasProver = true; @@ -672,9 +677,27 @@ async function _generateDockerImages(_orgName?: string) { } if (process.env.PROVER_TYPE === ProverType.GPU) { - throw new Error('GPU prover configuration not available yet'); + hasGPUProver = true; + const cudaArchPrompt: BasePromptOptions[] = [ + { + message: + 'What is your GPU Compute Capability version? You can find it in table here - https://en.wikipedia.org/wiki/CUDA#GPUs_supported. Input only 2 numbers withous dot, e.g. if you have RTX 3070 -> Compute Capability 8.6 -> Answer is 86', + name: 'cudaArch', + type: 'input', + required: true + } + ]; + const cudaRes: any = await enquirer.prompt(cudaArchPrompt); + cudaArch = cudaRes.cudaArch; + } else { + hasCPUProver = true; } + // TODO: Make this param configurable + // We need to generate at least 4 witnes-vector-generators per prover, but it can be less, and can be more + // PLA-683 + witnessVectorGensCount = 4; + // For Now use only the public images. Too soon to allow prover to be customized // await docker.customBuildForHyperchain('witness-generator', orgName); // await docker.customBuildForHyperchain('witness-vector-generator', orgName); @@ -689,15 +712,38 @@ async function _generateDockerImages(_orgName?: string) { // } } + // TODO: Autodetect version via nvidia-smi + // We have precompiled GPU prover image only for CUDA arch 89 aka ADA, all others need to be re-build + // PLA-682 + if (process.env.PROVER_TYPE === ProverType.GPU && cudaArch != '89') { + needBuildProver = true; + } + const composeArgs = { envFilePath: `./etc/env/${envName}.env`, orgName, hasProver, artifactsPath, - proverSetupArtifacts + proverSetupArtifacts, + hasGPUProver, + hasCPUProver, + cudaArch, + needBuildProver, + witnessVectorGensCount }; - const templateFileName = './etc/hyperchains/docker-compose-hyperchain-template'; + // Creating simple handlebars helper "if (foo AND bar)" to reduce copypaste in compose template + Handlebars.registerHelper( + 'ifAnd', + function (this: boolean, a: boolean, b: boolean, options: Handlebars.HelperOptions) { + if (a && b) { + return options.fn(this); + } + return options.inverse(this); + } + ); + + const templateFileName = './etc/hyperchains/docker-compose-hyperchain-template.hbs'; const templateString = fs.existsSync(templateFileName) && fs.readFileSync(templateFileName).toString().trim(); const template = Handlebars.compile(templateString); const result = template(composeArgs); diff --git a/infrastructure/zk/src/prover_setup.ts b/infrastructure/zk/src/prover_setup.ts index b438eea055d..d1de98166d0 100644 --- a/infrastructure/zk/src/prover_setup.ts +++ b/infrastructure/zk/src/prover_setup.ts @@ -12,6 +12,12 @@ export enum ProverType { GPU = 'gpu' } +enum KeysRegionOption { + US = 'us', + EU = 'europe', + ASIA = 'asia' +} + export async function setupProver(proverType: ProverType) { // avoid doing work if receives the wrong param from the CLI if (proverType == ProverType.GPU || proverType == ProverType.CPU) { @@ -37,14 +43,16 @@ export async function setupProver(proverType: ProverType) { } } -async function downloadCSR(proverType: ProverType) { +async function downloadCSR(proverType: ProverType, region: string) { const currentEnv = env.get(); fs.mkdirSync(`${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`, { recursive: true }); process.chdir(`${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`); console.log(chalk.yellow('Downloading ceremony (CSR) file')); - await utils.spawn('wget -c https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2^24.key'); + await utils.spawn( + `wget -q --show-progress -c https://storage.googleapis.com/matterlabs-setup-keys-${region}/setup-keys/setup_2^24.key` + ); await utils.sleep(1); process.chdir(process.env.ZKSYNC_HOME as string); wrapEnvModify('CRS_FILE', `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`); @@ -53,6 +61,8 @@ async function downloadCSR(proverType: ProverType) { async function setupProverKeys(proverType: ProverType) { const DOWNLOAD = 'Download default keys'; const GENERATE = 'Generate locally'; + let keysRegion = ''; + const questions: BasePromptOptions[] = [ { message: @@ -65,9 +75,20 @@ async function setupProverKeys(proverType: ProverType) { const results: any = await enquirer.prompt(questions); - await downloadCSR(proverType); + const proverKeysQuestions: BasePromptOptions[] = [ + { + message: 'From which s3 region download ceremony (CSR) file and/or Prover Keys?', + name: 'proverKeys', + type: 'select', + required: true, + choices: [KeysRegionOption.US, KeysRegionOption.EU, KeysRegionOption.ASIA] + } + ]; + const proverKeysResults: any = await enquirer.prompt(proverKeysQuestions); + keysRegion = proverKeysResults.proverKeys; + await downloadCSR(proverType, keysRegion); if (results.proverKeys == DOWNLOAD) { - await downloadDefaultSetupKeys(proverType); + await downloadDefaultSetupKeys(proverType, keysRegion); } else { await generateAllSetupData(proverType); } @@ -165,17 +186,20 @@ async function generateSetupDataForRecursiveLayers(proverType: ProverType) { async function generateSetupData(isBaseLayer: boolean, proverType: ProverType) { const currentEnv = env.get(); - fs.mkdirSync(`${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`, { - recursive: true - }); - process.chdir(`${process.env.ZKSYNC_HOME}/prover`); - await utils.spawn( - `for i in {1..${isBaseLayer ? '13' : '15'}}; do zk f cargo run ${ - proverType == ProverType.GPU ? '--features "gpu"' : '' - } --release --bin zksync_setup_data_generator_fri -- --numeric-circuit $i ${ + + const proverKeysDir = `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/`; + fs.mkdirSync(proverKeysDir, { recursive: true }); + const proverDir = `${process.env.ZKSYNC_HOME}/prover`; + process.chdir(proverDir); + const range = isBaseLayer ? 13 : 15; + const gpuFeatureFlag = proverType == ProverType.GPU ? '--features "gpu"' : ''; + for (let i = 1; i <= range; i++) { + const spawnCommand = `zk f cargo run ${gpuFeatureFlag} --release --bin zksync_setup_data_generator_fri -- --numeric-circuit ${i} ${ isBaseLayer ? '--is_base_layer' : '' - }; done` - ); + }`; + await utils.spawn(spawnCommand); + } + process.chdir(process.env.ZKSYNC_HOME as string); } @@ -184,7 +208,7 @@ async function generateAllSetupData(proverType: ProverType) { await generateSetupDataForRecursiveLayers(proverType); } -async function downloadDefaultSetupKeys(proverType: ProverType, region: 'us' | 'asia' | 'europe' = 'us') { +async function downloadDefaultSetupKeys(proverType: ProverType, region: string) { const proverKeysUrls = require(`${process.env.ZKSYNC_HOME}/prover/setup-data-${proverType}-keys.json`); const currentEnv = env.get(); await downloadFilesFromGCP( @@ -216,14 +240,16 @@ async function downloadFilesFromGCP(gcpUri: string, destination: string): Promis fs.mkdirSync(destination, { recursive: true }); process.chdir(destination); - const length = files.length; - for (const index in files) { - console.log(chalk.yellow(`Downloading file ${Number(index) + 1} of ${length}`)); - const file = files[index]; - await utils.spawn(`wget -c ${file}`); - await utils.sleep(1); - console.log(``); - } + // Download all files in parallel + await Promise.all( + files.map((file, index) => { + return (async () => { + console.log(chalk.yellow(`Downloading file ${index + 1} of ${files.length}`)); + await utils.spawn(`wget -q --show-progress -c "${file}"`); + await utils.sleep(1); + })(); + }) + ); process.chdir(process.env.ZKSYNC_HOME as string); } diff --git a/infrastructure/zk/src/up.ts b/infrastructure/zk/src/up.ts index 5057f4ca9d3..94be874dc38 100644 --- a/infrastructure/zk/src/up.ts +++ b/infrastructure/zk/src/up.ts @@ -8,9 +8,13 @@ function createVolumes() { fs.mkdirSync(`${process.env.ZKSYNC_HOME}/volumes/postgres`, { recursive: true }); } -export async function up() { +export async function up(composeFile?: string) { createVolumes(); - await utils.spawn('docker-compose up -d geth postgres'); + if (composeFile) { + await utils.spawn(`docker compose -f ${composeFile} up -d geth postgres`); + } else { + await utils.spawn('docker compose up -d geth postgres'); + } } export const command = new Command('up').description('start development containers').action(up); From ec5907b70ff7d868a05b685a1641d96dc4fa9d69 Mon Sep 17 00:00:00 2001 From: Lech <88630083+Artemka374@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:57:06 +0200 Subject: [PATCH 111/115] fix: Cursor not moving correctly after poll in `get_filter_changes` (#546) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ When polling filter changes, add 1 to actual from_block value ## Why ❔ Otherwise, last block that was included in poll will be included to the next one. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Fedor Sakharov --- core/lib/dal/sqlx-data.json | 54 +++++++++---------- core/lib/dal/src/blocks_web3_dal.rs | 6 +-- core/lib/dal/src/events_web3_dal.rs | 13 ++--- core/lib/types/src/api/mod.rs | 2 +- .../src/api_server/web3/namespaces/eth.rs | 45 ++++++++++------ .../src/api_server/web3/namespaces/zks.rs | 2 +- .../zksync_core/src/api_server/web3/state.rs | 3 +- .../src/api_server/web3/tests/mod.rs | 24 ++++----- 8 files changed, 76 insertions(+), 73 deletions(-) diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index 9084adb61cd..2d1773482ea 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -1928,6 +1928,33 @@ }, "query": "SELECT * from prover_jobs where id=$1" }, + "2044947d6d29f29cda508b2160c39f74a8bfd524afa2ffc20a98ae039bc86ed7": { + "describe": { + "columns": [ + { + "name": "number", + "ordinal": 0, + "type_info": "Int8" + }, + { + "name": "hash", + "ordinal": 1, + "type_info": "Bytea" + } + ], + "nullable": [ + false, + false + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + } + }, + "query": "SELECT number, hash FROM miniblocks WHERE number >= $1 ORDER BY number ASC LIMIT $2" + }, "20b22fd457417e9a72f5941887448f9a11b97b449db4759da0b9d368ce93996b": { "describe": { "columns": [ @@ -9492,33 +9519,6 @@ }, "query": "\n SELECT id, circuit_input_blob_url FROM prover_jobs\n WHERE status='successful'\n AND circuit_input_blob_url is NOT NULL\n AND updated_at < NOW() - INTERVAL '30 days'\n LIMIT $1;\n " }, - "b479b7d3334f8d4566c294a44e2adb282fbc66a87be5c248c65211c2a8a07db0": { - "describe": { - "columns": [ - { - "name": "number", - "ordinal": 0, - "type_info": "Int8" - }, - { - "name": "hash", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - } - }, - "query": "SELECT number, hash FROM miniblocks WHERE number > $1 ORDER BY number ASC LIMIT $2" - }, "b4a3c902646725188f7c79ebac992cdce5896fc6fcc9f485c0cba9d90c4c982c": { "describe": { "columns": [ diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index e42a645966f..0c2a8b4e188 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -161,15 +161,15 @@ impl BlocksWeb3Dal<'_, '_> { })) } - /// Returns hashes of blocks with numbers greater than `from_block` and the number of the last block. - pub async fn get_block_hashes_after( + /// Returns hashes of blocks with numbers starting from `from_block` and the number of the last block. + pub async fn get_block_hashes_since( &mut self, from_block: MiniblockNumber, limit: usize, ) -> sqlx::Result<(Vec, Option)> { let rows = sqlx::query!( "SELECT number, hash FROM miniblocks \ - WHERE number > $1 \ + WHERE number >= $1 \ ORDER BY number ASC \ LIMIT $2", from_block.0 as i64, diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 82a65c18444..7cdf2dba646 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -6,9 +6,7 @@ use zksync_types::{ }; use crate::{ - instrument::InstrumentExt, - models::{storage_block::web3_block_number_to_sql, storage_event::StorageWeb3Log}, - SqlxError, StorageProcessor, + instrument::InstrumentExt, models::storage_event::StorageWeb3Log, SqlxError, StorageProcessor, }; #[derive(Debug)] @@ -119,10 +117,8 @@ impl EventsWeb3Dal<'_, '_> { let mut where_sql = format!("(miniblock_number >= {})", filter.from_block.0 as i64); - if let Some(to_block) = filter.to_block { - let block_sql = web3_block_number_to_sql(to_block); - where_sql += &format!(" AND (miniblock_number <= {})", block_sql); - } + where_sql += &format!(" AND (miniblock_number <= {})", filter.to_block.0 as i64); + if !filter.addresses.is_empty() { where_sql += &format!(" AND (address = ANY(${}))", arg_index); arg_index += 1; @@ -172,7 +168,6 @@ impl EventsWeb3Dal<'_, '_> { #[cfg(test)] mod tests { - use zksync_types::api::BlockNumber; use zksync_types::{Address, H256}; use super::*; @@ -185,7 +180,7 @@ mod tests { let events_web3_dal = EventsWeb3Dal { storage }; let filter = GetLogsFilter { from_block: MiniblockNumber(100), - to_block: Some(BlockNumber::Number(200.into())), + to_block: MiniblockNumber(200), addresses: vec![Address::from_low_u64_be(123)], topics: vec![(0, vec![H256::from_low_u64_be(456)])], }; diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 24ac74ab335..1ad54ce6d1a 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -548,7 +548,7 @@ pub struct TransactionDetails { #[derive(Debug, Clone)] pub struct GetLogsFilter { pub from_block: MiniblockNumber, - pub to_block: Option, + pub to_block: MiniblockNumber, pub addresses: Vec
, pub topics: Vec<(u32, Vec)>, } diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs index 4cabb8e15da..0aa9255c3db 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs @@ -564,7 +564,7 @@ impl EthNamespace { .installed_filters .lock() .await - .add(TypedFilter::Blocks(last_block_number)); + .add(TypedFilter::Blocks(last_block_number + 1)); method_latency.observe(); Ok(idx) } @@ -773,14 +773,19 @@ impl EthNamespace { .map_err(|err| internal_error(METHOD_NAME, err))?; let (block_hashes, last_block_number) = conn .blocks_web3_dal() - .get_block_hashes_after(*from_block, self.state.api_config.req_entities_limit) + .get_block_hashes_since(*from_block, self.state.api_config.req_entities_limit) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - *from_block = last_block_number.unwrap_or(*from_block); + + *from_block = match last_block_number { + Some(last_block_number) => last_block_number + 1, + None => *from_block, + }; + FilterChanges::Hashes(block_hashes) } - TypedFilter::PendingTransactions(from_timestamp) => { + TypedFilter::PendingTransactions(from_timestamp_excluded) => { let mut conn = self .state .connection_pool @@ -790,12 +795,14 @@ impl EthNamespace { let (tx_hashes, last_timestamp) = conn .transactions_web3_dal() .get_pending_txs_hashes_after( - *from_timestamp, + *from_timestamp_excluded, Some(self.state.api_config.req_entities_limit), ) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - *from_timestamp = last_timestamp.unwrap_or(*from_timestamp); + + *from_timestamp_excluded = last_timestamp.unwrap_or(*from_timestamp_excluded); + FilterChanges::Hashes(tx_hashes) } @@ -816,16 +823,26 @@ impl EthNamespace { } else { vec![] }; + + let mut to_block = self + .state + .resolve_filter_block_number(filter.to_block) + .await?; + + if matches!(filter.to_block, Some(BlockNumber::Number(_))) { + to_block = to_block.min( + self.state + .resolve_filter_block_number(Some(BlockNumber::Latest)) + .await?, + ); + } + let get_logs_filter = GetLogsFilter { from_block: *from_block, - to_block: filter.to_block, + to_block, addresses, topics, }; - let to_block = self - .state - .resolve_filter_block_number(filter.to_block) - .await?; let mut storage = self .state @@ -859,11 +876,7 @@ impl EthNamespace { .get_logs(get_logs_filter, i32::MAX as usize) .await .map_err(|err| internal_error(METHOD_NAME, err))?; - *from_block = logs - .last() - .map(|log| MiniblockNumber(log.block_number.unwrap().as_u32())) - .unwrap_or(*from_block); - // FIXME: why is `from_block` not updated? + *from_block = to_block + 1; FilterChanges::Logs(logs) } }; diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 7f38c6afc52..9e3a90dde04 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -283,7 +283,7 @@ impl ZksNamespace { .get_logs( GetLogsFilter { from_block: first_miniblock_of_l1_batch, - to_block: Some(block_number.0.into()), + to_block: block_number, addresses: vec![L1_MESSENGER_ADDRESS], topics: vec![(2, vec![address_to_h256(&sender)]), (3, vec![msg])], }, diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index 0463d482320..ea52b2ae61c 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -505,9 +505,10 @@ impl RpcState { .enumerate() .filter_map(|(idx, topics)| topics.map(|topics| (idx as u32 + 1, topics.0))) .collect(); + let get_logs_filter = GetLogsFilter { from_block, - to_block: filter.to_block, + to_block, addresses, topics, }; diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 12bb6481213..1bb14df52fa 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -386,10 +386,10 @@ impl HttpTest for LogFilterChanges { assert_logs_match(&topics_logs, &[events[1], events[3]]); let new_all_logs = client.get_filter_changes(all_logs_filter_id).await?; - let FilterChanges::Logs(new_all_logs) = new_all_logs else { + let FilterChanges::Hashes(new_all_logs) = new_all_logs else { panic!("Unexpected getFilterChanges output: {:?}", new_all_logs); }; - assert_eq!(new_all_logs, all_logs); // FIXME(#546): update test after behavior is fixed + assert!(new_all_logs.is_empty()); Ok(()) } } @@ -458,11 +458,10 @@ impl HttpTest for LogFilterChangesWithBlockBoundaries { }; assert_logs_match(&lower_bound_logs, &new_events); - // FIXME(#546): update test after behavior is fixed let new_upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?; - assert_eq!(new_upper_bound_logs, FilterChanges::Logs(upper_bound_logs)); + assert_matches!(new_upper_bound_logs, FilterChanges::Hashes(hashes) if hashes.is_empty()); let new_bounded_logs = client.get_filter_changes(bounded_filter_id).await?; - assert_eq!(new_bounded_logs, FilterChanges::Logs(bounded_logs)); + assert_matches!(new_bounded_logs, FilterChanges::Hashes(hashes) if hashes.is_empty()); // Add miniblock #3. It should not be picked up by the bounded and upper bound filters, // and should be picked up by the lower bound filter. @@ -472,27 +471,22 @@ impl HttpTest for LogFilterChangesWithBlockBoundaries { let new_events: Vec<_> = new_events.iter().collect(); let bounded_logs = client.get_filter_changes(bounded_filter_id).await?; - let FilterChanges::Logs(bounded_logs) = bounded_logs else { + let FilterChanges::Hashes(bounded_logs) = bounded_logs else { panic!("Unexpected getFilterChanges output: {:?}", bounded_logs); }; - assert!(bounded_logs - .iter() - .all(|log| log.block_number.unwrap() < 3.into())); + assert!(bounded_logs.is_empty()); let upper_bound_logs = client.get_filter_changes(upper_bound_filter_id).await?; - let FilterChanges::Logs(upper_bound_logs) = upper_bound_logs else { + let FilterChanges::Hashes(upper_bound_logs) = upper_bound_logs else { panic!("Unexpected getFilterChanges output: {:?}", upper_bound_logs); }; - assert!(upper_bound_logs - .iter() - .all(|log| log.block_number.unwrap() < 3.into())); + assert!(upper_bound_logs.is_empty()); let lower_bound_logs = client.get_filter_changes(lower_bound_filter_id).await?; let FilterChanges::Logs(lower_bound_logs) = lower_bound_logs else { panic!("Unexpected getFilterChanges output: {:?}", lower_bound_logs); }; - let start_idx = lower_bound_logs.len() - 4; - assert_logs_match(&lower_bound_logs[start_idx..], &new_events); + assert_logs_match(&lower_bound_logs, &new_events); Ok(()) } } From 8a8cad6ce62f2d34bb34adcd956f6920c08f94b8 Mon Sep 17 00:00:00 2001 From: Dustin Brickwood Date: Wed, 6 Dec 2023 12:35:47 -0600 Subject: [PATCH 112/115] fix: update google cloud dependencies that do not depend on rsa (#622) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR updates the dependencies of `google-cloud-storage` and `google-cloud-auth`. The changes are as follows: - From google-cloud-storage = "0.12.0" to google-cloud-storage = "0.15.0" - From google-cloud-auth = "0.11.0" to google-cloud-auth = "0.13.0" Relevant google-cloud changes: https://github.com/yoshidan/google-cloud-rust/pull/217 ## Why ❔ The primary reason for these updates is to address a security vulnerability associated with the `rsa` crate, as indicated by a recent `cargo-deny` check. The vulnerability (Marvin Attack, RUSTSEC-2023-0071) was detected in rsa v0.6.1, which is a dependency of `google-cloud-storage v0.12.0`. By updating to `google-cloud-storage v0.15.0`, we eliminate the use of the `rsa` crate, as the newer version of `google-cloud-storage` does not depend on it. Similarly, `google-cloud-auth` is updated for compatibility. Cargo deny output: ``` error[vulnerability]: Marvin Attack: potential key recovery through timing sidechannels ┌─ /Users/dustinbrickwood/Documents/dev/dut/forks/foundry-zksync/Cargo.lock:759:1 │ 759 │ rsa 0.6.1 registry+https://github.com/rust-lang/crates.io-index │ --------------------------------------------------------------- security vulnerability detected │ = ID: RUSTSEC-2023-0071 = Advisory: https://rustsec.org/advisories/RUSTSEC-2023-0071 = ### Impact Due to a non-constant-time implementation, information about the private key is leaked through timing information which is observable over the network. An attacker may be able to use that information to recover the key. ### Patches No patch is yet available, however work is underway to migrate to a fully constant-time implementation. ### Workarounds The only currently available workaround is to avoid using the `rsa` crate in settings where attackers are able to observe timing information, e.g. local use on a non-compromised computer is fine. ### References This vulnerability was discovered as part of the "[Marvin Attack]", which revealed several implementations of RSA including OpenSSL had not properly mitigated timing sidechannel attacks. [Marvin Attack]: https://people.redhat.com/~hkario/marvin/ = Announcement: https://github.com/RustCrypto/RSA/issues/19#issuecomment-1822995643 = Solution: No safe upgrade is available! = rsa v0.6.1 └── google-cloud-storage v0.12.0 └── zksync_object_store v0.1.0 ├── zksync_core v0.1.0 │ └── era_test_node v0.1.0-alpha.12 │ └── era_revm v0.0.1-alpha │ ├── foundry-common v0.2.0 │ │ ├── anvil v0.2.0 │ │ │ ├── (dev) forge v0.2.0 │ │ │ └── (dev) zkforge v0.2.0 │ │ ├── cast v0.2.0 │ │ ├── chisel v0.2.0 │ │ ├── forge v0.2.0 (*) │ │ ├── foundry-cli v0.2.0 │ │ │ ├── cast v0.2.0 (*) │ │ │ ├── chisel v0.2.0 (*) │ │ │ ├── forge v0.2.0 (*) │ │ │ ├── zkcast v0.2.0 │ │ │ │ └── zkforge v0.2.0 (*) │ │ │ └── zkforge v0.2.0 (*) │ │ ├── foundry-debugger v0.2.0 │ │ │ ├── forge v0.2.0 (*) │ │ │ ├── foundry-cli v0.2.0 (*) │ │ │ └── zkforge v0.2.0 (*) │ │ ├── foundry-evm v0.2.0 │ │ │ ├── anvil v0.2.0 (*) │ │ │ ├── anvil-core v0.2.0 │ │ │ │ └── anvil v0.2.0 (*) │ │ │ ├── cast v0.2.0 (*) │ │ │ ├── chisel v0.2.0 (*) │ │ │ ├── forge v0.2.0 (*) │ │ │ ├── foundry-cli v0.2.0 (*) │ │ │ ├── foundry-debugger v0.2.0 (*) │ │ │ ├── zkcast v0.2.0 (*) │ │ │ └── zkforge v0.2.0 (*) │ │ ├── foundry-test-utils v0.2.0 │ │ │ ├── (dev) cast v0.2.0 (*) │ │ │ ├── (dev) forge v0.2.0 (*) │ │ │ ├── (dev) zkcast v0.2.0 (*) │ │ │ └── (dev) zkforge v0.2.0 (*) │ │ ├── (dev) foundry-utils v0.2.0 │ │ │ ├── anvil v0.2.0 (*) │ │ │ ├── anvil-core v0.2.0 (*) │ │ │ ├── cast v0.2.0 (*) │ │ │ ├── chisel v0.2.0 (*) │ │ │ ├── forge v0.2.0 (*) │ │ │ ├── forge-doc v0.2.0 │ │ │ │ ├── forge v0.2.0 (*) │ │ │ │ └── zkforge v0.2.0 (*) │ │ │ ├── foundry-cli v0.2.0 (*) │ │ │ ├── foundry-debugger v0.2.0 (*) │ │ │ ├── (dev) foundry-evm v0.2.0 (*) │ │ │ ├── foundry-test-utils v0.2.0 (*) │ │ │ ├── zkcast v0.2.0 (*) │ │ │ └── zkforge v0.2.0 (*) │ │ ├── zkcast v0.2.0 (*) │ │ └── zkforge v0.2.0 (*) │ └── foundry-evm v0.2.0 (*) └── zksync_prover_utils v0.1.0 ├── zksync_core v0.1.0 (*) └── zksync_verification_key_generator_and_server v0.1.0 └── zksync_core v0.1.0 (*) ``` ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- Cargo.lock | 161 +++++++++---------------------- core/lib/object_store/Cargo.toml | 4 +- deny.toml | 1 - 3 files changed, 47 insertions(+), 119 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3fbf3deb35e..ec650188c8a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1436,12 +1436,6 @@ dependencies = [ "windows-sys 0.45.0", ] -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - [[package]] name = "const-oid" version = "0.9.5" @@ -1748,16 +1742,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" -dependencies = [ - "generic-array 0.14.7", - "subtle", -] - [[package]] name = "crypto-bigint" version = "0.4.9" @@ -1948,24 +1932,13 @@ dependencies = [ "uuid", ] -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid 0.7.1", - "crypto-bigint 0.3.2", - "pem-rfc7468", -] - [[package]] name = "der" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "const-oid 0.9.5", + "const-oid", "zeroize", ] @@ -1975,7 +1948,8 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ - "const-oid 0.9.5", + "const-oid", + "pem-rfc7468", "zeroize", ] @@ -2797,9 +2771,9 @@ dependencies = [ [[package]] name = "google-cloud-auth" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f40175857d0b8d7b6cad6cd9594284da5041387fa2ddff30ab6d8faef65eb" +checksum = "af1087f1fbd2dd3f58c17c7574ddd99cd61cbbbc2c4dc81114b8687209b196cb" dependencies = [ "async-trait", "base64 0.21.5", @@ -2819,9 +2793,9 @@ dependencies = [ [[package]] name = "google-cloud-metadata" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +checksum = "cc279bfb50487d7bcd900e8688406475fc750fe474a835b2ab9ade9eb1fc90e2" dependencies = [ "reqwest", "thiserror", @@ -2830,11 +2804,12 @@ dependencies = [ [[package]] name = "google-cloud-storage" -version = "0.12.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "215abab97e07d144428425509c1dad07e57ea72b84b21bcdb6a8a5f12a5c4932" +checksum = "ac04b29849ebdeb9fb008988cc1c4d1f0c9d121b4c7f1ddeb8061df124580e93" dependencies = [ "async-stream", + "async-trait", "base64 0.21.5", "bytes 1.5.0", "futures-util", @@ -2844,10 +2819,10 @@ dependencies = [ "hex", "once_cell", "percent-encoding", + "pkcs8 0.10.2", "regex", "reqwest", - "ring", - "rsa", + "ring 0.17.7", "serde", "serde_json", "sha2 0.10.8", @@ -3722,7 +3697,7 @@ checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ "base64 0.21.5", "pem", - "ring", + "ring 0.16.20", "serde", "serde_json", "simple_asn1", @@ -3770,9 +3745,6 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = [ - "spin", -] [[package]] name = "lazycell" @@ -4398,23 +4370,6 @@ dependencies = [ "serde", ] -[[package]] -name = "num-bigint-dig" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" -dependencies = [ - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.8.5", - "smallvec", - "zeroize", -] - [[package]] name = "num-complex" version = "0.3.1" @@ -4906,9 +4861,9 @@ dependencies = [ [[package]] name = "pem-rfc7468" -version = "0.3.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" dependencies = [ "base64ct", ] @@ -5006,28 +4961,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs1" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" -dependencies = [ - "der 0.5.1", - "pkcs8 0.8.0", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der 0.5.1", - "spki 0.5.4", - "zeroize", -] - [[package]] name = "pkcs8" version = "0.9.0" @@ -5882,12 +5815,26 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", - "untrusted", + "spin 0.5.2", + "untrusted 0.7.1", "web-sys", "winapi 0.3.9", ] +[[package]] +name = "ring" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +dependencies = [ + "cc", + "getrandom 0.2.10", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.48.0", +] + [[package]] name = "ripemd160" version = "0.9.1" @@ -5931,26 +5878,6 @@ dependencies = [ "zksync_storage", ] -[[package]] -name = "rsa" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" -dependencies = [ - "byteorder", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", - "num-iter", - "num-traits", - "pkcs1", - "pkcs8 0.8.0", - "rand_core 0.6.4", - "smallvec", - "subtle", - "zeroize", -] - [[package]] name = "rustc-demangle" version = "0.1.23" @@ -5998,7 +5925,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", - "ring", + "ring 0.16.20", "rustls-webpki", "sct", ] @@ -6030,8 +5957,8 @@ version = "0.101.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c7d5dece342910d9ba34d259310cae3e0154b873b35408b787b59bce53d34fe" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -6101,8 +6028,8 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ - "ring", - "untrusted", + "ring 0.16.20", + "untrusted 0.7.1", ] [[package]] @@ -6680,14 +6607,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "spki" -version = "0.5.4" +name = "spin" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der 0.5.1", -] +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" @@ -7638,6 +7561,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "ureq" version = "2.8.0" diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index 941674d6e50..20f52a995a8 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -17,8 +17,8 @@ zksync_types = { path = "../types" } anyhow = "1.0" async-trait = "0.1" bincode = "1" -google-cloud-storage = "0.12.0" -google-cloud-auth = "0.11.0" +google-cloud-storage = "0.15.0" +google-cloud-auth = "0.13.0" http = "0.2.9" tokio = { version = "1.21.2", features = ["full"] } tracing = "0.1" diff --git a/deny.toml b/deny.toml index 7fa3c835088..b50b165b72f 100644 --- a/deny.toml +++ b/deny.toml @@ -8,7 +8,6 @@ yanked = "warn" notice = "warn" ignore = [ "RUSTSEC-2023-0018", - "RUSTSEC-2023-0071" ] [licenses] From 43c09644254e574ad06e49040f3ca6c2a811f866 Mon Sep 17 00:00:00 2001 From: Thomas Knauth Date: Thu, 7 Dec 2023 08:26:47 +0100 Subject: [PATCH 113/115] docs: Include command to create rich L2 wallets. (#569) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Improve documentation by including the command to create rich L2 wallets. ## Why ❔ Save other people time figuring out the exact invocation. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. - [ ] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- infrastructure/local-setup-preparation/README.md | 6 ++++++ infrastructure/local-setup-preparation/src/index.ts | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/infrastructure/local-setup-preparation/README.md b/infrastructure/local-setup-preparation/README.md index 6fbbb7b3f37..fe59f930bf4 100644 --- a/infrastructure/local-setup-preparation/README.md +++ b/infrastructure/local-setup-preparation/README.md @@ -2,3 +2,9 @@ This project contains scripts that should be executed when preparing the zkSync local setup used by outside developers, e.g. deposit ETH to some of the test accounts. + +With the server running (`zk server`), execute the following from `$ZKSYNC_HOME` to fund the L2 wallets + +``` +zk f bash -c 'cd infrastructure/local-setup-preparation ; yarn start' +``` diff --git a/infrastructure/local-setup-preparation/src/index.ts b/infrastructure/local-setup-preparation/src/index.ts index 585df599f82..435cc26aa38 100644 --- a/infrastructure/local-setup-preparation/src/index.ts +++ b/infrastructure/local-setup-preparation/src/index.ts @@ -35,7 +35,7 @@ async function depositWithRichAccounts() { }; const balance = await wallet.getBalance(); - console.log(`Wallet balance is ${ethers.utils.formatEther(balance)} ETH`); + console.log(`Wallet ${wallet.address} balance is ${ethers.utils.formatEther(balance)} ETH`); // TODO: Currently we're providing zero as an operator fee, which works right now, // but will be changed in the future. From 53a6bcf8d4f51aa3363665fc93bdbd4dceb6041a Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 7 Dec 2023 10:50:04 +0200 Subject: [PATCH 114/115] chore: Enforce uniform import structure (#617) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ...using `zk fmt` command by suppling relevant command-line args to rustfmt. These args work on stable Rust (at least for now) despite being unstable. ## Why ❔ More structured imports are easier to read. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- core/bin/block_reverter/src/main.rs | 8 +- core/bin/contract-verifier/src/main.rs | 5 +- core/bin/contract-verifier/src/verifier.rs | 19 +-- .../bin/contract-verifier/src/zksolc_utils.rs | 6 +- .../contract-verifier/src/zkvyper_utils.rs | 6 +- core/bin/external_node/src/config/mod.rs | 8 +- core/bin/external_node/src/main.rs | 7 +- .../src/main.rs | 5 +- core/bin/rocksdb_util/src/main.rs | 4 +- .../src/consistency.rs | 1 - .../storage_logs_dedup_migration/src/main.rs | 1 - .../src/intrinsic_costs.rs | 8 +- .../system-constants-generator/src/main.rs | 21 +-- .../system-constants-generator/src/utils.rs | 25 ++-- .../src/json_to_binary_vk_converter.rs | 4 +- .../src/lib.rs | 44 +++--- .../src/main.rs | 15 +- .../src/tests.rs | 14 +- core/bin/verified_sources_fetcher/src/main.rs | 1 + core/bin/zksync_server/src/main.rs | 11 +- core/lib/basic_types/src/lib.rs | 31 +++-- core/lib/circuit_breaker/src/l1_txs.rs | 3 +- core/lib/circuit_breaker/src/lib.rs | 1 - core/lib/config/src/configs/api.rs | 6 +- core/lib/config/src/configs/chain.rs | 10 +- .../config/src/configs/contract_verifier.rs | 3 +- core/lib/config/src/configs/database.rs | 4 +- core/lib/config/src/configs/eth_sender.rs | 4 +- core/lib/config/src/configs/eth_watch.rs | 3 +- core/lib/config/src/configs/fetcher.rs | 3 +- .../src/configs/fri_proof_compressor.rs | 3 +- core/lib/config/src/configs/fri_prover.rs | 3 +- .../config/src/configs/fri_prover_gateway.rs | 3 +- .../config/src/configs/fri_prover_group.rs | 2 +- core/lib/config/src/configs/mod.rs | 31 +++-- .../config/src/configs/proof_data_handler.rs | 3 +- core/lib/config/src/configs/utils.rs | 4 +- core/lib/contracts/src/lib.rs | 11 +- core/lib/contracts/src/test_contracts.rs | 6 +- core/lib/crypto/src/hasher/blake2.rs | 2 +- core/lib/crypto/src/hasher/keccak.rs | 3 +- core/lib/crypto/src/hasher/sha256.rs | 2 +- .../src/basic_witness_input_producer_dal.rs | 12 +- core/lib/dal/src/blocks_dal.rs | 1 - core/lib/dal/src/blocks_web3_dal.rs | 16 ++- core/lib/dal/src/connection/holder.rs | 5 +- core/lib/dal/src/connection/mod.rs | 10 +- core/lib/dal/src/contract_verification_dal.rs | 14 +- core/lib/dal/src/eth_sender_dal.rs | 23 ++-- core/lib/dal/src/events_dal.rs | 9 +- core/lib/dal/src/events_web3_dal.rs | 1 - core/lib/dal/src/fri_gpu_prover_queue_dal.rs | 4 +- core/lib/dal/src/fri_proof_compressor_dal.rs | 20 +-- core/lib/dal/src/fri_protocol_versions_dal.rs | 3 +- .../fri_scheduler_dependency_tracker_dal.rs | 3 +- core/lib/dal/src/fri_witness_generator_dal.rs | 8 +- core/lib/dal/src/gpu_prover_queue_dal.rs | 7 +- core/lib/dal/src/healthcheck.rs | 1 - core/lib/dal/src/instrument.rs | 4 +- core/lib/dal/src/lib.rs | 60 +++----- core/lib/dal/src/metrics.rs | 4 +- core/lib/dal/src/models/storage_block.rs | 1 - core/lib/dal/src/models/storage_eth_tx.rs | 11 +- .../src/models/storage_protocol_version.rs | 4 +- .../dal/src/models/storage_prover_job_info.rs | 15 +- core/lib/dal/src/models/storage_sync.rs | 6 +- core/lib/dal/src/models/storage_token.rs | 1 - .../lib/dal/src/models/storage_transaction.rs | 31 +++-- .../models/storage_verification_request.rs | 10 +- .../src/models/storage_witness_job_info.rs | 14 +- core/lib/dal/src/proof_generation_dal.rs | 5 +- core/lib/dal/src/protocol_versions_dal.rs | 7 +- .../lib/dal/src/protocol_versions_web3_dal.rs | 3 +- core/lib/dal/src/prover_dal.rs | 3 +- core/lib/dal/src/storage_dal.rs | 6 +- core/lib/dal/src/storage_logs_dal.rs | 12 +- core/lib/dal/src/storage_logs_dedup_dal.rs | 6 +- core/lib/dal/src/tests/mod.rs | 20 +-- core/lib/dal/src/time_utils.rs | 4 +- core/lib/dal/src/tokens_dal.rs | 3 +- core/lib/dal/src/tokens_web3_dal.rs | 5 +- core/lib/dal/src/transactions_dal.rs | 7 +- core/lib/dal/src/transactions_web3_dal.rs | 18 +-- core/lib/dal/src/witness_generator_dal.rs | 21 ++- core/lib/env_config/src/alerts.rs | 3 +- core/lib/env_config/src/api.rs | 4 +- core/lib/env_config/src/chain.rs | 3 +- core/lib/env_config/src/contracts.rs | 3 +- core/lib/env_config/src/database.rs | 3 +- .../env_config/src/fri_proof_compressor.rs | 3 +- core/lib/env_config/src/test_utils.rs | 3 +- core/lib/env_config/src/utils.rs | 3 +- core/lib/eth_client/src/clients/http/mod.rs | 10 +- core/lib/eth_client/src/clients/http/query.rs | 14 +- .../eth_client/src/clients/http/signing.rs | 29 ++-- core/lib/eth_client/src/clients/mock.rs | 13 +- core/lib/eth_client/src/lib.rs | 9 +- core/lib/eth_signer/src/json_rpc_signer.rs | 25 ++-- core/lib/eth_signer/src/lib.rs | 9 +- core/lib/eth_signer/src/pk_signer.rs | 14 +- core/lib/eth_signer/src/raw_ethereum_tx.rs | 12 +- core/lib/health_check/src/lib.rs | 9 +- core/lib/mempool/src/mempool_store.rs | 3 +- core/lib/mempool/src/tests.rs | 22 +-- core/lib/mempool/src/types.rs | 8 +- .../lib/merkle_tree/examples/loadtest/main.rs | 13 +- core/lib/merkle_tree/examples/recovery.rs | 5 +- core/lib/merkle_tree/src/consistency.rs | 10 +- core/lib/merkle_tree/src/domain.rs | 14 +- core/lib/merkle_tree/src/errors.rs | 3 +- core/lib/merkle_tree/src/hasher/mod.rs | 13 +- core/lib/merkle_tree/src/hasher/nodes.rs | 3 +- core/lib/merkle_tree/src/lib.rs | 34 ++--- core/lib/merkle_tree/src/metrics.rs | 3 +- core/lib/merkle_tree/src/recovery.rs | 3 +- core/lib/merkle_tree/src/storage/mod.rs | 17 ++- core/lib/merkle_tree/src/storage/patch.rs | 4 +- core/lib/merkle_tree/src/storage/rocksdb.rs | 10 +- .../merkle_tree/src/storage/serialization.rs | 3 +- core/lib/merkle_tree/src/storage/tests.rs | 8 +- core/lib/merkle_tree/src/types/internal.rs | 3 +- core/lib/merkle_tree/src/types/mod.rs | 4 +- .../merkle_tree/tests/integration/common.rs | 3 +- .../tests/integration/consistency.rs | 2 +- .../merkle_tree/tests/integration/domain.rs | 5 +- .../tests/integration/merkle_tree.rs | 13 +- .../merkle_tree/tests/integration/recovery.rs | 3 +- core/lib/mini_merkle_tree/benches/tree.rs | 1 - core/lib/mini_merkle_tree/src/lib.rs | 4 +- core/lib/multivm/src/glue/tracers/mod.rs | 5 +- .../src/glue/types/vm/block_context_mode.rs | 3 +- .../src/glue/types/vm/vm_block_result.rs | 10 +- .../glue/types/vm/vm_tx_execution_result.rs | 7 +- .../traits/tracers/dyn_tracers/vm_1_3_3.rs | 6 +- .../traits/tracers/dyn_tracers/vm_1_4_0.rs | 6 +- core/lib/multivm/src/interface/traits/vm.rs | 24 ++-- .../src/interface/types/errors/halt.rs | 3 +- .../types/errors/tx_revert_reason.rs | 4 +- .../interface/types/inputs/l1_batch_env.rs | 2 +- .../types/outputs/execution_result.rs | 13 +- .../types/outputs/execution_state.rs | 6 +- .../src/interface/types/outputs/mod.rs | 15 +- core/lib/multivm/src/lib.rs | 23 ++-- .../multivm/src/tracers/call_tracer/mod.rs | 6 +- .../src/tracers/call_tracer/vm_latest/mod.rs | 19 +-- .../call_tracer/vm_refunds_enhancement/mod.rs | 21 +-- .../call_tracer/vm_virtual_blocks/mod.rs | 29 ++-- .../multivm/src/tracers/multivm_dispatcher.rs | 3 +- .../storage_invocation/vm_latest/mod.rs | 17 ++- .../vm_refunds_enhancement/mod.rs | 15 +- .../vm_virtual_blocks/mod.rs | 15 +- core/lib/multivm/src/tracers/validator/mod.rs | 15 +- .../multivm/src/tracers/validator/types.rs | 8 +- .../src/tracers/validator/vm_latest/mod.rs | 34 ++--- .../validator/vm_refunds_enhancement/mod.rs | 33 +++-- .../validator/vm_virtual_blocks/mod.rs | 30 ++-- .../vm_1_3_2/errors/vm_revert_reason.rs | 6 +- .../src/versions/vm_1_3_2/event_sink.rs | 10 +- .../src/versions/vm_1_3_2/history_recorder.rs | 6 +- .../multivm/src/versions/vm_1_3_2/memory.rs | 22 +-- core/lib/multivm/src/versions/vm_1_3_2/mod.rs | 31 +++-- .../src/versions/vm_1_3_2/oracle_tools.rs | 17 ++- .../versions/vm_1_3_2/oracles/decommitter.rs | 18 ++- .../src/versions/vm_1_3_2/oracles/mod.rs | 7 +- .../versions/vm_1_3_2/oracles/precompile.rs | 7 +- .../src/versions/vm_1_3_2/oracles/storage.rs | 19 ++- .../vm_1_3_2/oracles/tracer/bootloader.rs | 16 ++- .../versions/vm_1_3_2/oracles/tracer/call.rs | 36 ++--- .../versions/vm_1_3_2/oracles/tracer/mod.rs | 26 ++-- .../vm_1_3_2/oracles/tracer/one_tx.rs | 20 +-- .../oracles/tracer/transaction_result.rs | 18 +-- .../versions/vm_1_3_2/oracles/tracer/utils.rs | 21 +-- .../vm_1_3_2/oracles/tracer/validation.rs | 24 ++-- .../src/versions/vm_1_3_2/pubdata_utils.rs | 16 ++- .../multivm/src/versions/vm_1_3_2/refunds.rs | 11 +- .../src/versions/vm_1_3_2/test_utils.rs | 6 +- .../src/versions/vm_1_3_2/transaction_data.rs | 14 +- .../multivm/src/versions/vm_1_3_2/utils.rs | 13 +- core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 33 +++-- .../src/versions/vm_1_3_2/vm_instance.rs | 81 ++++++----- .../versions/vm_1_3_2/vm_with_bootloader.rs | 6 +- .../vm_latest/bootloader_state/l2_block.rs | 12 +- .../vm_latest/bootloader_state/state.rs | 26 ++-- .../versions/vm_latest/bootloader_state/tx.rs | 3 +- .../vm_latest/bootloader_state/utils.rs | 27 ++-- .../src/versions/vm_latest/constants.rs | 8 +- .../vm_latest/implementation/bytecode.rs | 11 +- .../vm_latest/implementation/execution.rs | 22 +-- .../versions/vm_latest/implementation/gas.rs | 7 +- .../versions/vm_latest/implementation/logs.rs | 20 +-- .../vm_latest/implementation/snapshots.rs | 3 +- .../vm_latest/implementation/statistics.rs | 10 +- .../versions/vm_latest/implementation/tx.rs | 17 ++- .../lib/multivm/src/versions/vm_latest/mod.rs | 41 +++--- .../versions/vm_latest/old_vm/event_sink.rs | 12 +- .../vm_latest/old_vm/history_recorder.rs | 8 +- .../src/versions/vm_latest/old_vm/memory.rs | 24 ++-- .../vm_latest/old_vm/oracles/decommitter.rs | 22 ++- .../vm_latest/old_vm/oracles/precompile.rs | 7 +- .../src/versions/vm_latest/old_vm/utils.rs | 19 ++- .../src/versions/vm_latest/oracles/storage.rs | 28 ++-- .../versions/vm_latest/tests/bootloader.rs | 18 +-- .../vm_latest/tests/bytecode_publishing.rs | 14 +- .../versions/vm_latest/tests/call_tracer.rs | 22 ++- .../versions/vm_latest/tests/default_aa.rs | 22 ++- .../src/versions/vm_latest/tests/gas_limit.rs | 16 +-- .../vm_latest/tests/get_used_contracts.rs | 22 +-- .../vm_latest/tests/is_write_initial.rs | 14 +- .../vm_latest/tests/l1_tx_execution.rs | 25 ++-- .../src/versions/vm_latest/tests/l2_blocks.rs | 28 ++-- .../versions/vm_latest/tests/nonce_holder.rs | 21 ++- .../src/versions/vm_latest/tests/refunds.rs | 17 ++- .../vm_latest/tests/require_eip712.rs | 26 ++-- .../src/versions/vm_latest/tests/rollbacks.rs | 29 ++-- .../vm_latest/tests/simple_execution.rs | 10 +- .../vm_latest/tests/tester/inner_state.rs | 18 ++- .../tests/tester/transaction_test_info.rs | 12 +- .../vm_latest/tests/tester/vm_tester.rs | 41 +++--- .../tests/tracing_execution_error.rs | 15 +- .../src/versions/vm_latest/tests/upgrade.rs | 36 ++--- .../src/versions/vm_latest/tests/utils.rs | 15 +- .../vm_latest/tracers/default_tracers.rs | 47 ++++--- .../versions/vm_latest/tracers/dispatcher.rs | 15 +- .../vm_latest/tracers/pubdata_tracer.rs | 34 ++--- .../src/versions/vm_latest/tracers/refunds.rs | 44 +++--- .../vm_latest/tracers/result_tracer.rs | 30 ++-- .../src/versions/vm_latest/tracers/traits.rs | 17 ++- .../src/versions/vm_latest/tracers/utils.rs | 22 +-- .../vm_latest/types/internals/pubdata.rs | 2 +- .../types/internals/transaction_data.rs | 21 +-- .../vm_latest/types/internals/vm_state.rs | 42 +++--- .../src/versions/vm_latest/types/l1_batch.rs | 3 +- .../src/versions/vm_latest/utils/l2_blocks.rs | 10 +- .../src/versions/vm_latest/utils/logs.rs | 7 +- .../src/versions/vm_latest/utils/overhead.rs | 10 +- .../vm_latest/utils/transaction_encoding.rs | 3 +- core/lib/multivm/src/versions/vm_latest/vm.rs | 32 +++-- .../versions/vm_m5/errors/vm_revert_reason.rs | 6 +- .../multivm/src/versions/vm_m5/event_sink.rs | 7 +- .../src/versions/vm_m5/history_recorder.rs | 5 +- core/lib/multivm/src/versions/vm_m5/memory.rs | 18 ++- core/lib/multivm/src/versions/vm_m5/mod.rs | 27 ++-- .../src/versions/vm_m5/oracle_tools.rs | 19 +-- .../src/versions/vm_m5/oracles/decommitter.rs | 19 ++- .../multivm/src/versions/vm_m5/oracles/mod.rs | 7 +- .../src/versions/vm_m5/oracles/precompile.rs | 7 +- .../src/versions/vm_m5/oracles/storage.rs | 29 ++-- .../src/versions/vm_m5/oracles/tracer.rs | 23 ++-- .../src/versions/vm_m5/pubdata_utils.rs | 23 ++-- .../lib/multivm/src/versions/vm_m5/refunds.rs | 12 +- .../lib/multivm/src/versions/vm_m5/storage.rs | 5 +- .../multivm/src/versions/vm_m5/test_utils.rs | 11 +- .../src/versions/vm_m5/transaction_data.rs | 15 +- core/lib/multivm/src/versions/vm_m5/utils.rs | 10 +- core/lib/multivm/src/versions/vm_m5/vm.rs | 36 ++--- .../multivm/src/versions/vm_m5/vm_instance.rs | 83 ++++++----- .../src/versions/vm_m5/vm_with_bootloader.rs | 6 +- .../versions/vm_m6/errors/vm_revert_reason.rs | 6 +- .../multivm/src/versions/vm_m6/event_sink.rs | 12 +- .../src/versions/vm_m6/history_recorder.rs | 5 +- core/lib/multivm/src/versions/vm_m6/memory.rs | 22 +-- .../src/versions/vm_m6/oracle_tools.rs | 20 +-- .../src/versions/vm_m6/oracles/decommitter.rs | 18 +-- .../multivm/src/versions/vm_m6/oracles/mod.rs | 7 +- .../src/versions/vm_m6/oracles/precompile.rs | 7 +- .../src/versions/vm_m6/oracles/storage.rs | 28 ++-- .../vm_m6/oracles/tracer/bootloader.rs | 16 ++- .../src/versions/vm_m6/oracles/tracer/call.rs | 42 +++--- .../src/versions/vm_m6/oracles/tracer/mod.rs | 26 ++-- .../versions/vm_m6/oracles/tracer/one_tx.rs | 20 +-- .../oracles/tracer/transaction_result.rs | 18 +-- .../versions/vm_m6/oracles/tracer/utils.rs | 21 +-- .../vm_m6/oracles/tracer/validation.rs | 31 ++--- .../src/versions/vm_m6/pubdata_utils.rs | 23 ++-- .../lib/multivm/src/versions/vm_m6/refunds.rs | 13 +- .../lib/multivm/src/versions/vm_m6/storage.rs | 5 +- .../multivm/src/versions/vm_m6/test_utils.rs | 9 +- .../src/versions/vm_m6/transaction_data.rs | 17 ++- core/lib/multivm/src/versions/vm_m6/utils.rs | 18 +-- core/lib/multivm/src/versions/vm_m6/vm.rs | 33 ++--- .../multivm/src/versions/vm_m6/vm_instance.rs | 87 ++++++------ .../src/versions/vm_m6/vm_with_bootloader.rs | 3 +- .../bootloader_state/l2_block.rs | 12 +- .../bootloader_state/state.rs | 23 ++-- .../bootloader_state/tx.rs | 3 +- .../bootloader_state/utils.rs | 23 ++-- .../vm_refunds_enhancement/constants.rs | 8 +- .../implementation/bytecode.rs | 11 +- .../implementation/execution.rs | 21 +-- .../implementation/gas.rs | 7 +- .../implementation/logs.rs | 20 +-- .../implementation/snapshots.rs | 11 +- .../implementation/statistics.rs | 10 +- .../implementation/tx.rs | 19 +-- .../versions/vm_refunds_enhancement/mod.rs | 37 +++-- .../old_vm/event_sink.rs | 10 +- .../old_vm/history_recorder.rs | 8 +- .../vm_refunds_enhancement/old_vm/memory.rs | 24 ++-- .../old_vm/oracles/decommitter.rs | 22 ++- .../old_vm/oracles/precompile.rs | 7 +- .../vm_refunds_enhancement/old_vm/utils.rs | 19 ++- .../vm_refunds_enhancement/oracles/storage.rs | 25 ++-- .../tracers/default_tracers.rs | 38 ++--- .../tracers/dispatcher.rs | 15 +- .../vm_refunds_enhancement/tracers/refunds.rs | 37 +++-- .../tracers/result_tracer.rs | 32 +++-- .../vm_refunds_enhancement/tracers/traits.rs | 17 ++- .../vm_refunds_enhancement/tracers/utils.rs | 17 +-- .../types/internals/transaction_data.rs | 21 +-- .../types/internals/vm_state.rs | 42 +++--- .../vm_refunds_enhancement/types/l1_batch.rs | 3 +- .../vm_refunds_enhancement/utils/l2_blocks.rs | 10 +- .../vm_refunds_enhancement/utils/overhead.rs | 10 +- .../utils/transaction_encoding.rs | 3 +- .../src/versions/vm_refunds_enhancement/vm.rs | 28 ++-- .../bootloader_state/l2_block.rs | 12 +- .../bootloader_state/state.rs | 25 ++-- .../vm_virtual_blocks/bootloader_state/tx.rs | 3 +- .../bootloader_state/utils.rs | 23 ++-- .../versions/vm_virtual_blocks/constants.rs | 8 +- .../implementation/bytecode.rs | 11 +- .../implementation/execution.rs | 24 ++-- .../vm_virtual_blocks/implementation/gas.rs | 7 +- .../vm_virtual_blocks/implementation/logs.rs | 20 +-- .../implementation/snapshots.rs | 9 +- .../implementation/statistics.rs | 10 +- .../vm_virtual_blocks/implementation/tx.rs | 19 +-- .../src/versions/vm_virtual_blocks/mod.rs | 38 +++-- .../vm_virtual_blocks/old_vm/event_sink.rs | 10 +- .../old_vm/history_recorder.rs | 8 +- .../vm_virtual_blocks/old_vm/memory.rs | 24 ++-- .../old_vm/oracles/decommitter.rs | 22 ++- .../old_vm/oracles/precompile.rs | 7 +- .../old_vm/oracles/storage.rs | 20 ++- .../vm_virtual_blocks/old_vm/utils.rs | 19 ++- .../tracers/default_tracers.rs | 42 +++--- .../vm_virtual_blocks/tracers/dispatcher.rs | 19 +-- .../vm_virtual_blocks/tracers/refunds.rs | 40 +++--- .../tracers/result_tracer.rs | 38 ++--- .../vm_virtual_blocks/tracers/traits.rs | 17 ++- .../vm_virtual_blocks/tracers/utils.rs | 22 +-- .../types/internals/transaction_data.rs | 21 +-- .../types/internals/vm_state.rs | 42 +++--- .../vm_virtual_blocks/types/l1_batch_env.rs | 3 +- .../vm_virtual_blocks/utils/l2_blocks.rs | 10 +- .../vm_virtual_blocks/utils/overhead.rs | 10 +- .../utils/transaction_encoding.rs | 3 +- .../src/versions/vm_virtual_blocks/vm.rs | 31 +++-- core/lib/multivm/src/vm_instance.rs | 17 +-- core/lib/object_store/src/file.rs | 4 +- core/lib/object_store/src/gcs.rs | 18 +-- core/lib/object_store/src/metrics.rs | 4 +- core/lib/object_store/src/mock.rs | 4 +- core/lib/object_store/src/objects.rs | 8 +- core/lib/object_store/src/raw.rs | 6 +- core/lib/object_store/tests/integration.rs | 1 - core/lib/prometheus_exporter/src/lib.rs | 4 +- .../lib/prover_utils/src/gcs_proof_fetcher.rs | 3 +- core/lib/prover_utils/src/region_fetcher.rs | 7 +- .../prover_utils/src/vk_commitment_helper.rs | 3 +- core/lib/queued_job_processor/src/lib.rs | 14 +- core/lib/state/src/cache/metrics.rs | 4 +- core/lib/state/src/in_memory.rs | 3 +- core/lib/state/src/postgres/metrics.rs | 4 +- core/lib/state/src/postgres/mod.rs | 11 +- core/lib/state/src/postgres/tests.rs | 7 +- core/lib/state/src/rocksdb/metrics.rs | 4 +- core/lib/state/src/rocksdb/mod.rs | 10 +- core/lib/state/src/shadow_storage.rs | 2 +- core/lib/state/src/storage_view.rs | 10 +- core/lib/state/src/test_utils.rs | 4 +- core/lib/state/src/witness.rs | 3 +- core/lib/storage/src/db.rs | 10 +- core/lib/storage/src/metrics.rs | 6 +- core/lib/test_account/src/lib.rs | 18 +-- core/lib/types/src/aggregated_operations.rs | 12 +- core/lib/types/src/api/mod.rs | 11 +- core/lib/types/src/block.rs | 5 +- core/lib/types/src/circuit.rs | 3 +- core/lib/types/src/commitment.rs | 18 +-- .../types/src/contract_verification_api.rs | 3 +- core/lib/types/src/eth_sender.rs | 3 +- core/lib/types/src/event.rs | 15 +- core/lib/types/src/l1/mod.rs | 5 +- core/lib/types/src/l2/mod.rs | 28 ++-- core/lib/types/src/l2_to_l1_log.rs | 7 +- core/lib/types/src/lib.rs | 11 +- .../lib/types/src/priority_op_onchain_data.rs | 4 +- core/lib/types/src/proofs.rs | 28 ++-- core/lib/types/src/protocol_version.rs | 12 +- core/lib/types/src/prover_server_api/mod.rs | 9 +- core/lib/types/src/storage/log.rs | 3 +- .../types/src/storage/witness_block_state.rs | 6 +- .../types/src/storage/writes/compression.rs | 3 +- core/lib/types/src/storage/writes/mod.rs | 11 +- .../types/src/storage_writes_deduplicator.rs | 11 +- core/lib/types/src/system_contracts.rs | 2 +- core/lib/types/src/transaction_request.rs | 13 +- core/lib/types/src/tx/execute.rs | 3 +- core/lib/types/src/tx/mod.rs | 11 +- .../eip712_signature/member_types.rs | 9 +- .../eip712_signature/struct_builder.rs | 3 +- .../tx/primitives/eip712_signature/tests.rs | 21 ++- .../eip712_signature/typed_structure.rs | 8 +- .../tx/primitives/eip712_signature/utils.rs | 3 +- .../src/tx/primitives/packed_eth_signature.rs | 7 +- core/lib/types/src/tx/tx_execution_info.rs | 7 +- core/lib/types/src/utils.rs | 13 +- core/lib/types/src/vk_transform.rs | 4 +- core/lib/types/src/vm_trace.rs | 12 +- core/lib/utils/src/bytecode.rs | 10 +- core/lib/utils/src/convert.rs | 9 +- core/lib/utils/src/http_with_retries.rs | 3 +- core/lib/utils/src/misc.rs | 3 +- core/lib/vlog/src/lib.rs | 12 +- core/lib/web3_decl/src/namespaces/debug.rs | 8 +- core/lib/web3_decl/src/namespaces/eth.rs | 16 +-- core/lib/web3_decl/src/namespaces/mod.rs | 13 +- core/lib/web3_decl/src/namespaces/zks.rs | 1 - core/lib/web3_decl/src/types.rs | 10 +- .../contract_verification/api_decl.rs | 1 - .../contract_verification/api_impl.rs | 1 - .../contract_verification/metrics.rs | 4 +- .../api_server/contract_verification/mod.rs | 10 +- .../src/api_server/execution_sandbox/apply.rs | 10 +- .../src/api_server/execution_sandbox/error.rs | 3 +- .../api_server/execution_sandbox/execute.rs | 12 +- .../src/api_server/execution_sandbox/mod.rs | 23 ++-- .../api_server/execution_sandbox/tracers.rs | 8 +- .../api_server/execution_sandbox/validate.rs | 14 +- .../execution_sandbox/vm_metrics.rs | 11 +- .../zksync_core/src/api_server/healthcheck.rs | 5 +- .../src/api_server/tree/metrics.rs | 4 +- .../zksync_core/src/api_server/tree/mod.rs | 11 +- .../zksync_core/src/api_server/tree/tests.rs | 3 +- .../src/api_server/tx_sender/mod.rs | 48 +++---- .../src/api_server/tx_sender/proxy.rs | 2 +- .../src/api_server/tx_sender/result.rs | 11 +- .../batch_limiter_middleware.rs | 4 +- .../api_server/web3/backend_jsonrpc/error.rs | 4 +- .../web3/backend_jsonrpc/namespaces/debug.rs | 8 +- .../web3/backend_jsonrpc/namespaces/en.rs | 6 - .../web3/backend_jsonrpc/namespaces/eth.rs | 12 +- .../web3/backend_jsonrpc/namespaces/net.rs | 6 - .../web3/backend_jsonrpc/namespaces/zks.rs | 11 +- .../web3/backend_jsonrpc/pub_sub.rs | 4 +- .../api_server/web3/backend_jsonrpsee/mod.rs | 7 +- .../web3/backend_jsonrpsee/namespaces/zks.rs | 3 +- .../src/api_server/web3/metrics.rs | 12 +- .../zksync_core/src/api_server/web3/mod.rs | 46 ++++--- .../src/api_server/web3/namespaces/debug.rs | 30 ++-- .../src/api_server/web3/namespaces/en.rs | 2 +- .../src/api_server/web3/namespaces/zks.rs | 11 +- .../zksync_core/src/api_server/web3/pubsub.rs | 5 +- .../zksync_core/src/api_server/web3/state.rs | 5 +- .../src/api_server/web3/tests/mod.rs | 9 +- .../src/api_server/web3/tests/ws.rs | 1 - .../src/basic_witness_input_producer/mod.rs | 22 ++- .../vm_interactions.rs | 13 +- .../lib/zksync_core/src/block_reverter/mod.rs | 27 ++-- core/lib/zksync_core/src/consensus/payload.rs | 4 +- core/lib/zksync_core/src/data_fetchers/mod.rs | 3 +- .../src/data_fetchers/token_list/mock.rs | 6 +- .../src/data_fetchers/token_list/mod.rs | 4 +- .../src/data_fetchers/token_list/one_inch.rs | 4 +- .../data_fetchers/token_price/coingecko.rs | 4 +- .../src/data_fetchers/token_price/mock.rs | 3 +- .../src/data_fetchers/token_price/mod.rs | 7 +- .../src/eth_sender/eth_tx_aggregator.rs | 15 +- .../src/eth_sender/eth_tx_manager.rs | 9 +- .../lib/zksync_core/src/eth_sender/metrics.rs | 3 +- .../src/eth_sender/publish_criterion.rs | 5 +- core/lib/zksync_core/src/eth_sender/tests.rs | 11 +- .../event_processors/governance_upgrades.rs | 13 +- .../src/eth_watch/event_processors/mod.rs | 3 +- .../eth_watch/event_processors/upgrades.rs | 1 + core/lib/zksync_core/src/eth_watch/metrics.rs | 4 +- core/lib/zksync_core/src/eth_watch/mod.rs | 15 +- core/lib/zksync_core/src/eth_watch/tests.rs | 10 +- core/lib/zksync_core/src/genesis.rs | 10 +- .../src/house_keeper/blocks_state_reporter.rs | 1 - .../fri_proof_compressor_queue_monitor.rs | 3 +- .../fri_scheduler_circuit_queuer.rs | 1 - .../fri_witness_generator_queue_monitor.rs | 3 +- .../house_keeper/gpu_prover_queue_monitor.rs | 1 - .../house_keeper/prover_job_retry_manager.rs | 1 - .../src/house_keeper/prover_queue_monitor.rs | 4 +- ...waiting_to_queued_fri_witness_job_mover.rs | 1 - .../waiting_to_queued_witness_job_mover.rs | 1 - .../witness_generator_queue_monitor.rs | 3 +- .../src/l1_gas_price/gas_adjuster/mod.rs | 9 +- .../src/l1_gas_price/gas_adjuster/tests.rs | 7 +- .../src/l1_gas_price/main_node_fetcher.rs | 1 - core/lib/zksync_core/src/l1_gas_price/mod.rs | 3 +- .../zksync_core/src/l1_gas_price/singleton.rs | 12 +- core/lib/zksync_core/src/lib.rs | 89 ++++++------ .../src/metadata_calculator/helpers.rs | 8 +- .../src/metadata_calculator/metrics.rs | 5 +- .../src/metadata_calculator/mod.rs | 15 +- .../src/metadata_calculator/tests.rs | 5 +- .../src/metadata_calculator/updater.rs | 5 +- core/lib/zksync_core/src/metrics.rs | 3 +- .../zksync_core/src/proof_data_handler/mod.rs | 9 +- .../proof_data_handler/request_processor.rs | 23 ++-- .../lib/zksync_core/src/reorg_detector/mod.rs | 6 +- .../src/state_keeper/batch_executor/mod.rs | 22 ++- .../state_keeper/batch_executor/tests/mod.rs | 6 +- .../batch_executor/tests/tester.rs | 18 +-- .../src/state_keeper/extractors.rs | 3 +- .../zksync_core/src/state_keeper/io/common.rs | 6 +- .../src/state_keeper/io/mempool.rs | 11 +- .../zksync_core/src/state_keeper/io/mod.rs | 13 +- .../src/state_keeper/io/seal_logic.rs | 15 +- .../src/state_keeper/io/tests/mod.rs | 12 +- .../src/state_keeper/io/tests/tester.rs | 7 +- .../zksync_core/src/state_keeper/keeper.rs | 11 +- .../src/state_keeper/mempool_actor.rs | 3 +- .../zksync_core/src/state_keeper/metrics.rs | 9 +- core/lib/zksync_core/src/state_keeper/mod.rs | 28 ++-- .../criteria/geometry_seal_criteria.rs | 3 +- .../zksync_core/src/state_keeper/tests/mod.rs | 35 ++--- .../src/state_keeper/tests/tester.rs | 15 +- .../state_keeper/updates/l1_batch_updates.rs | 13 +- .../state_keeper/updates/miniblock_updates.rs | 20 +-- .../src/state_keeper/updates/mod.rs | 13 +- .../src/sync_layer/batch_status_updater.rs | 5 +- core/lib/zksync_core/src/sync_layer/client.rs | 5 +- .../zksync_core/src/sync_layer/external_io.rs | 5 +- .../lib/zksync_core/src/sync_layer/fetcher.rs | 5 +- .../lib/zksync_core/src/sync_layer/genesis.rs | 1 - .../src/sync_layer/gossip/buffered/mod.rs | 9 +- .../src/sync_layer/gossip/buffered/tests.rs | 5 +- .../src/sync_layer/gossip/conversions.rs | 2 +- .../src/sync_layer/gossip/metrics.rs | 4 +- .../zksync_core/src/sync_layer/gossip/mod.rs | 11 +- .../src/sync_layer/gossip/storage/mod.rs | 14 +- .../src/sync_layer/gossip/storage/tests.rs | 1 - .../src/sync_layer/gossip/tests.rs | 8 +- .../lib/zksync_core/src/sync_layer/metrics.rs | 4 +- core/lib/zksync_core/src/sync_layer/tests.rs | 5 +- .../src/witness_generator/basic_circuits.rs | 15 +- .../src/witness_generator/leaf_aggregation.rs | 13 +- .../zksync_core/src/witness_generator/mod.rs | 3 +- .../src/witness_generator/node_aggregation.rs | 6 +- .../precalculated_merkle_paths_provider.rs | 14 +- .../src/witness_generator/scheduler.rs | 3 +- .../src/witness_generator/storage_oracle.rs | 6 +- .../src/witness_generator/tests.rs | 10 +- .../src/witness_generator/utils.rs | 12 +- .../src/checker.rs | 9 +- .../src/config.rs | 3 +- .../src/divergence.rs | 1 + .../src/helpers.rs | 7 +- .../cross_external_nodes_checker/src/main.rs | 13 +- .../src/pubsub_checker.rs | 14 +- .../src/account/api_request_executor.rs | 1 - core/tests/loadnext/src/account/mod.rs | 9 +- .../loadnext/src/account/pubsub_executor.rs | 6 +- .../src/account/tx_command_executor.rs | 10 +- core/tests/loadnext/src/account_pool.rs | 1 - core/tests/loadnext/src/command/api.rs | 1 - core/tests/loadnext/src/command/tx_command.rs | 1 - core/tests/loadnext/src/config.rs | 9 +- core/tests/loadnext/src/corrupted_tx.rs | 20 +-- core/tests/loadnext/src/executor.rs | 21 +-- core/tests/loadnext/src/fs_utils.rs | 11 +- core/tests/loadnext/src/main.rs | 8 +- core/tests/loadnext/src/report.rs | 2 +- .../loadnext/src/report_collector/mod.rs | 4 +- .../operation_results_collector.rs | 4 +- core/tests/loadnext/src/rng.rs | 1 - core/tests/loadnext/src/utils.rs | 1 + .../vm-benchmark/benches/diy_benchmark.rs | 3 +- core/tests/vm-benchmark/harness/src/lib.rs | 14 +- .../vm-benchmark/src/compare_iai_results.rs | 5 +- core/tests/vm-benchmark/src/find_slowest.rs | 1 + .../src/iai_results_to_prometheus.rs | 1 + .../tests/vm-benchmark/src/with_prometheus.rs | 3 +- infrastructure/zk/src/fmt.ts | 7 +- prover/Cargo.lock | 130 +++--------------- .../src/circuit_synthesizer.rs | 42 +++--- prover/circuit_synthesizer/src/main.rs | 3 +- prover/circuit_synthesizer/src/metrics.rs | 1 + prover/proof_fri_compressor/src/compressor.rs | 39 +++--- prover/proof_fri_compressor/src/main.rs | 10 +- prover/proof_fri_compressor/src/metrics.rs | 1 + prover/prover/src/artifact_provider.rs | 8 +- prover/prover/src/metrics.rs | 1 + prover/prover/src/prover.rs | 14 +- prover/prover/src/prover_params.rs | 1 - prover/prover/src/run.rs | 18 ++- prover/prover/src/socket_listener.rs | 15 +- .../src/synthesized_circuit_provider.rs | 14 +- .../src/gpu_prover_job_processor.rs | 51 +++---- prover/prover_fri/src/main.rs | 33 +++-- prover/prover_fri/src/metrics.rs | 1 + prover/prover_fri/src/prover_job_processor.rs | 46 +++---- prover/prover_fri/src/socket_listener.rs | 27 ++-- prover/prover_fri/src/utils.rs | 36 ++--- prover/prover_fri/tests/basic_test.rs | 11 +- .../src/api_data_fetcher.rs | 6 +- prover/prover_fri_gateway/src/main.rs | 8 +- .../src/proof_gen_data_fetcher.rs | 1 - .../prover_fri_gateway/src/proof_submitter.rs | 7 +- prover/prover_fri_types/src/lib.rs | 32 ++--- prover/prover_fri_utils/src/lib.rs | 17 ++- prover/prover_fri_utils/src/metrics.rs | 1 + prover/prover_fri_utils/src/socket_utils.rs | 12 +- .../setup_key_generator_and_server/src/lib.rs | 26 ++-- .../src/main.rs | 11 +- .../src/commitment_utils.rs | 19 ++- .../src/lib.rs | 93 +++++++------ .../src/main.rs | 20 +-- .../src/setup_data_generator.rs | 40 +++--- .../src/tests.rs | 12 +- .../src/utils.rs | 113 ++++++++------- .../src/vk_generator.rs | 27 ++-- .../witness_generator/src/basic_circuits.rs | 65 +++++---- .../witness_generator/src/leaf_aggregation.rs | 56 ++++---- prover/witness_generator/src/main.rs | 26 ++-- prover/witness_generator/src/metrics.rs | 1 + .../witness_generator/src/node_aggregation.rs | 48 ++++--- .../precalculated_merkle_paths_provider.rs | 11 +- prover/witness_generator/src/scheduler.rs | 48 ++++--- .../witness_generator/src/storage_oracle.rs | 6 +- prover/witness_generator/src/tests.rs | 12 +- prover/witness_generator/src/utils.rs | 36 ++--- prover/witness_generator/tests/basic_test.rs | 22 +-- .../witness_vector_generator/src/generator.rs | 27 ++-- prover/witness_vector_generator/src/main.rs | 10 +- .../witness_vector_generator/src/metrics.rs | 1 + .../tests/basic_test.rs | 4 +- sdk/zksync-rs/src/ethereum/mod.rs | 24 ++-- sdk/zksync-rs/src/lib.rs | 21 ++- sdk/zksync-rs/src/operations/mod.rs | 4 +- sdk/zksync-rs/src/operations/transfer.rs | 15 +- sdk/zksync-rs/src/operations/withdraw.rs | 6 +- sdk/zksync-rs/src/signer.rs | 11 +- sdk/zksync-rs/src/utils.rs | 1 - sdk/zksync-rs/src/wallet.rs | 5 +- 640 files changed, 4414 insertions(+), 4176 deletions(-) diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index bc49b731d14..c1b02a1a120 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -1,15 +1,13 @@ use anyhow::Context as _; use clap::{Parser, Subcommand}; use tokio::io::{self, AsyncReadExt}; - use zksync_config::{ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig, PostgresConfig}; -use zksync_dal::ConnectionPool; -use zksync_env_config::FromEnv; -use zksync_types::{L1BatchNumber, U256}; - use zksync_core::block_reverter::{ BlockReverter, BlockReverterEthConfig, BlockReverterFlags, L1ExecutedBatchesRevert, }; +use zksync_dal::ConnectionPool; +use zksync_env_config::FromEnv; +use zksync_types::{L1BatchNumber, U256}; #[derive(Debug, Parser)] #[command(author = "Matter Labs", version, about = "Block revert utility", long_about = None)] diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index 05ee51139dd..33090697c51 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -1,16 +1,15 @@ use std::cell::RefCell; use anyhow::Context as _; +use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; use prometheus_exporter::PrometheusExporterConfig; +use tokio::sync::watch; use zksync_config::{configs::PrometheusConfig, ApiConfig, ContractVerifierConfig, PostgresConfig}; use zksync_dal::ConnectionPool; use zksync_env_config::FromEnv; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::wait_for_tasks; -use futures::{channel::mpsc, executor::block_on, SinkExt, StreamExt}; -use tokio::sync::watch; - use crate::verifier::ContractVerifier; pub mod error; diff --git a/core/bin/contract-verifier/src/verifier.rs b/core/bin/contract-verifier/src/verifier.rs index e34b4784c1c..63c46ed90f7 100644 --- a/core/bin/contract-verifier/src/verifier.rs +++ b/core/bin/contract-verifier/src/verifier.rs @@ -1,7 +1,9 @@ -use std::collections::HashMap; -use std::env; -use std::path::Path; -use std::time::{Duration, Instant}; +use std::{ + collections::HashMap, + env, + path::Path, + time::{Duration, Instant}, +}; use anyhow::Context as _; use chrono::Utc; @@ -9,7 +11,6 @@ use ethabi::{Contract, Token}; use lazy_static::lazy_static; use regex::Regex; use tokio::time; - use zksync_config::ContractVerifierConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_env_config::FromEnv; @@ -22,11 +23,11 @@ use zksync_types::{ Address, }; -use crate::error::ContractVerifierError; -use crate::zksolc_utils::{ - Optimizer, Settings, Source, StandardJson, ZkSolc, ZkSolcInput, ZkSolcOutput, +use crate::{ + error::ContractVerifierError, + zksolc_utils::{Optimizer, Settings, Source, StandardJson, ZkSolc, ZkSolcInput, ZkSolcOutput}, + zkvyper_utils::{ZkVyper, ZkVyperInput}, }; -use crate::zkvyper_utils::{ZkVyper, ZkVyperInput}; lazy_static! { static ref DEPLOYER_CONTRACT: Contract = zksync_contracts::deployer_contract(); diff --git a/core/bin/contract-verifier/src/zksolc_utils.rs b/core/bin/contract-verifier/src/zksolc_utils.rs index 4fba999453c..560bacb809f 100644 --- a/core/bin/contract-verifier/src/zksolc_utils.rs +++ b/core/bin/contract-verifier/src/zksolc_utils.rs @@ -1,8 +1,6 @@ +use std::{collections::HashMap, io::Write, path::PathBuf, process::Stdio}; + use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::io::Write; -use std::path::PathBuf; -use std::process::Stdio; use crate::error::ContractVerifierError; diff --git a/core/bin/contract-verifier/src/zkvyper_utils.rs b/core/bin/contract-verifier/src/zkvyper_utils.rs index 33a99f256f9..c597f78d458 100644 --- a/core/bin/contract-verifier/src/zkvyper_utils.rs +++ b/core/bin/contract-verifier/src/zkvyper_utils.rs @@ -1,8 +1,4 @@ -use std::collections::HashMap; -use std::fs::File; -use std::io::Write; -use std::path::PathBuf; -use std::process::Stdio; +use std::{collections::HashMap, fs::File, io::Write, path::PathBuf, process::Stdio}; use crate::error::ContractVerifierError; diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index c116201b91d..aea48bc0aeb 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -1,14 +1,14 @@ +use std::{env, time::Duration}; + use anyhow::Context; use serde::Deserialize; -use std::{env, time::Duration}; use url::Url; - use zksync_basic_types::{Address, L1ChainId, L2ChainId, MiniblockNumber}; use zksync_core::api_server::{ - tx_sender::TxSenderConfig, web3::state::InternalApiConfig, web3::Namespace, + tx_sender::TxSenderConfig, + web3::{state::InternalApiConfig, Namespace}, }; use zksync_types::api::BridgeAddresses; - use zksync_web3_decl::{ jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, namespaces::{EnNamespaceClient, EthNamespaceClient, ZksNamespaceClient}, diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 6324b0599a6..da28329c18f 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -1,11 +1,10 @@ -use anyhow::Context; -use clap::Parser; -use tokio::{sync::watch, task, time::sleep}; - use std::{sync::Arc, time::Duration}; +use anyhow::Context; +use clap::Parser; use futures::{future::FusedFuture, FutureExt}; use prometheus_exporter::PrometheusExporterConfig; +use tokio::{sync::watch, task, time::sleep}; use zksync_basic_types::{Address, L2ChainId}; use zksync_core::{ api_server::{ diff --git a/core/bin/merkle_tree_consistency_checker/src/main.rs b/core/bin/merkle_tree_consistency_checker/src/main.rs index b132bda87fa..8cac3d99724 100644 --- a/core/bin/merkle_tree_consistency_checker/src/main.rs +++ b/core/bin/merkle_tree_consistency_checker/src/main.rs @@ -1,8 +1,7 @@ -use anyhow::Context as _; -use clap::Parser; - use std::{path::Path, time::Instant}; +use anyhow::Context as _; +use clap::Parser; use zksync_config::DBConfig; use zksync_env_config::FromEnv; use zksync_merkle_tree::domain::ZkSyncTree; diff --git a/core/bin/rocksdb_util/src/main.rs b/core/bin/rocksdb_util/src/main.rs index 30d3d42e771..1fd60ca67c7 100644 --- a/core/bin/rocksdb_util/src/main.rs +++ b/core/bin/rocksdb_util/src/main.rs @@ -1,6 +1,5 @@ use anyhow::Context as _; use clap::{Parser, Subcommand}; - use zksync_config::DBConfig; use zksync_env_config::FromEnv; use zksync_storage::rocksdb::{ @@ -57,9 +56,10 @@ fn main() -> anyhow::Result<()> { #[cfg(test)] mod tests { - use super::*; use tempfile::TempDir; + use super::*; + #[test] fn backup_restore_workflow() { let backup_dir = TempDir::new().expect("failed to get temporary directory for RocksDB"); diff --git a/core/bin/storage_logs_dedup_migration/src/consistency.rs b/core/bin/storage_logs_dedup_migration/src/consistency.rs index 3c63c8c81a7..dc0b3da389c 100644 --- a/core/bin/storage_logs_dedup_migration/src/consistency.rs +++ b/core/bin/storage_logs_dedup_migration/src/consistency.rs @@ -1,5 +1,4 @@ use clap::Parser; - use zksync_config::PostgresConfig; use zksync_dal::ConnectionPool; use zksync_env_config::FromEnv; diff --git a/core/bin/storage_logs_dedup_migration/src/main.rs b/core/bin/storage_logs_dedup_migration/src/main.rs index 7277c231e43..733976b44e1 100644 --- a/core/bin/storage_logs_dedup_migration/src/main.rs +++ b/core/bin/storage_logs_dedup_migration/src/main.rs @@ -1,7 +1,6 @@ use std::collections::hash_map::{Entry, HashMap}; use clap::Parser; - use zksync_config::PostgresConfig; use zksync_dal::ConnectionPool; use zksync_env_config::FromEnv; diff --git a/core/bin/system-constants-generator/src/intrinsic_costs.rs b/core/bin/system-constants-generator/src/intrinsic_costs.rs index e15abf7d134..94cec591e00 100644 --- a/core/bin/system-constants-generator/src/intrinsic_costs.rs +++ b/core/bin/system-constants-generator/src/intrinsic_costs.rs @@ -4,13 +4,13 @@ //! as well as contracts/SystemConfig.json //! +use multivm::vm_latest::constants::BOOTLOADER_TX_ENCODING_SPACE; +use zksync_types::{ethabi::Address, IntrinsicSystemGasConstants, U256}; + use crate::utils::{ execute_internal_transfer_test, execute_user_txs_in_test_gas_vm, get_l1_tx, get_l1_txs, - get_l2_txs, + get_l2_txs, metrics_from_txs, TransactionGenerator, }; -use crate::utils::{metrics_from_txs, TransactionGenerator}; -use multivm::vm_latest::constants::BOOTLOADER_TX_ENCODING_SPACE; -use zksync_types::{ethabi::Address, IntrinsicSystemGasConstants, U256}; #[derive(Debug, Clone, Copy, PartialEq)] pub(crate) struct VmSpentResourcesResult { diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index ed906e1c9bb..44659d21781 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -1,7 +1,18 @@ use std::fs; +use codegen::{Block, Scope}; +use multivm::vm_latest::constants::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_L1_GAS, BOOTLOADER_TX_ENCODING_SPACE, MAX_PUBDATA_PER_BLOCK, +}; use serde::{Deserialize, Serialize}; use zksync_types::{ + zkevm_test_harness::zk_evm::zkevm_opcode_defs::{ + circuit_prices::{ + ECRECOVER_CIRCUIT_COST_IN_ERGS, KECCAK256_CIRCUIT_COST_IN_ERGS, + SHA256_CIRCUIT_COST_IN_ERGS, + }, + system_params::MAX_TX_ERGS_LIMIT, + }, IntrinsicSystemGasConstants, GUARANTEED_PUBDATA_IN_TX, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, }; @@ -9,16 +20,6 @@ use zksync_types::{ mod intrinsic_costs; mod utils; -use codegen::Block; -use codegen::Scope; -use multivm::vm_latest::constants::{ - BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_L1_GAS, BOOTLOADER_TX_ENCODING_SPACE, MAX_PUBDATA_PER_BLOCK, -}; -use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::circuit_prices::{ - ECRECOVER_CIRCUIT_COST_IN_ERGS, KECCAK256_CIRCUIT_COST_IN_ERGS, SHA256_CIRCUIT_COST_IN_ERGS, -}; -use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; - // Params needed for L1 contracts #[derive(Copy, Clone, Debug, Serialize, Deserialize)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index fc576ff44ee..b138c5261a8 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -1,16 +1,17 @@ -use once_cell::sync::Lazy; -use std::cell::RefCell; -use std::rc::Rc; - -use multivm::interface::{ - dyn_tracers::vm_1_4_0::DynTracer, tracer::VmExecutionStopReason, L1BatchEnv, L2BlockEnv, - SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, -}; -use multivm::vm_latest::{ - constants::{BLOCK_GAS_LIMIT, BOOTLOADER_HEAP_PAGE}, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, Vm, VmTracer, - ZkSyncVmState, +use std::{cell::RefCell, rc::Rc}; + +use multivm::{ + interface::{ + dyn_tracers::vm_1_4_0::DynTracer, tracer::VmExecutionStopReason, L1BatchEnv, L2BlockEnv, + SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + }, + vm_latest::{ + constants::{BLOCK_GAS_LIMIT, BOOTLOADER_HEAP_PAGE}, + BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, Vm, VmTracer, + ZkSyncVmState, + }, }; +use once_cell::sync::Lazy; use zksync_contracts::{ load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, read_zbin_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, diff --git a/core/bin/verification_key_generator_and_server/src/json_to_binary_vk_converter.rs b/core/bin/verification_key_generator_and_server/src/json_to_binary_vk_converter.rs index 65a2e3361bf..c04a6712833 100644 --- a/core/bin/verification_key_generator_and_server/src/json_to_binary_vk_converter.rs +++ b/core/bin/verification_key_generator_and_server/src/json_to_binary_vk_converter.rs @@ -1,6 +1,6 @@ +use std::{fs::File, io::BufWriter}; + use bincode::serialize_into; -use std::fs::File; -use std::io::BufWriter; use structopt::StructOpt; use zksync_verification_key_server::get_vk_for_circuit_type; diff --git a/core/bin/verification_key_generator_and_server/src/lib.rs b/core/bin/verification_key_generator_and_server/src/lib.rs index 2b05363595b..20260a30b20 100644 --- a/core/bin/verification_key_generator_and_server/src/lib.rs +++ b/core/bin/verification_key_generator_and_server/src/lib.rs @@ -1,29 +1,29 @@ -use ff::to_hex; -use once_cell::sync::Lazy; -use std::collections::HashMap; -use std::path::Path; -use std::str::FromStr; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::setup::VerificationKey; -use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; +use std::{collections::HashMap, path::Path, str::FromStr}; +use ff::to_hex; use itertools::Itertools; +use once_cell::sync::Lazy; use structopt::lazy_static::lazy_static; -use zksync_types::circuit::SCHEDULER_CIRCUIT_INDEX; -use zksync_types::circuit::{ - GEOMETRY_CONFIG, LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, - NODE_SPLITTING_FACTOR, SCHEDULER_UPPER_BOUND, -}; -use zksync_types::protocol_version::{L1VerifierConfig, VerifierParams}; -use zksync_types::vk_transform::generate_vk_commitment; -use zksync_types::zkevm_test_harness::witness; -use zksync_types::zkevm_test_harness::witness::full_block_artifact::BlockBasicCircuits; -use zksync_types::zkevm_test_harness::witness::recursive_aggregation::{ - erase_vk_type, padding_aggregations, +use zksync_types::{ + circuit::{ + GEOMETRY_CONFIG, LEAF_CIRCUIT_INDEX, LEAF_SPLITTING_FACTOR, NODE_CIRCUIT_INDEX, + NODE_SPLITTING_FACTOR, SCHEDULER_CIRCUIT_INDEX, SCHEDULER_UPPER_BOUND, + }, + protocol_version::{L1VerifierConfig, VerifierParams}, + vk_transform::generate_vk_commitment, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{bn256::Bn256, plonk::better_better_cs::setup::VerificationKey}, + witness, + witness::{ + full_block_artifact::BlockBasicCircuits, + oracle::VmWitnessOracle, + recursive_aggregation::{erase_vk_type, padding_aggregations}, + vk_set_generator::circuits_for_vk_generation, + }, + }, + H256, }; -use zksync_types::zkevm_test_harness::witness::vk_set_generator::circuits_for_vk_generation; -use zksync_types::H256; #[cfg(test)] mod tests; diff --git a/core/bin/verification_key_generator_and_server/src/main.rs b/core/bin/verification_key_generator_and_server/src/main.rs index 30ffb0574d4..b64e5757fce 100644 --- a/core/bin/verification_key_generator_and_server/src/main.rs +++ b/core/bin/verification_key_generator_and_server/src/main.rs @@ -1,9 +1,12 @@ -use std::collections::HashSet; -use std::env; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::cs::PlonkCsWidth4WithNextStepAndCustomGatesParams; -use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; +use std::{collections::HashSet, env}; + +use zksync_types::zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{ + bn256::Bn256, plonk::better_better_cs::cs::PlonkCsWidth4WithNextStepAndCustomGatesParams, + }, + witness::oracle::VmWitnessOracle, +}; use zksync_verification_key_server::{get_circuits_for_vk, save_vk_for_circuit_type}; /// Creates verification keys for the given circuit. diff --git a/core/bin/verification_key_generator_and_server/src/tests.rs b/core/bin/verification_key_generator_and_server/src/tests.rs index 8f013bad200..f0fea866de6 100644 --- a/core/bin/verification_key_generator_and_server/src/tests.rs +++ b/core/bin/verification_key_generator_and_server/src/tests.rs @@ -1,12 +1,14 @@ -use crate::{get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment}; +use std::collections::HashMap; + use itertools::Itertools; use serde_json::Value; -use std::collections::HashMap; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::setup::VerificationKey; +use zksync_types::zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{bn256::Bn256, plonk::better_better_cs::setup::VerificationKey}, + witness::oracle::VmWitnessOracle, +}; -use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; +use crate::{get_vk_for_circuit_type, get_vks_for_basic_circuits, get_vks_for_commitment}; #[test] fn test_get_vk_for_circuit_type() { diff --git a/core/bin/verified_sources_fetcher/src/main.rs b/core/bin/verified_sources_fetcher/src/main.rs index 6bb6ee66cee..cc53229329f 100644 --- a/core/bin/verified_sources_fetcher/src/main.rs +++ b/core/bin/verified_sources_fetcher/src/main.rs @@ -1,4 +1,5 @@ use std::io::Write; + use zksync_config::PostgresConfig; use zksync_dal::ConnectionPool; use zksync_env_config::FromEnv; diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index f2aed9c75c2..9a5ccf8be60 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -1,8 +1,7 @@ -use anyhow::Context as _; -use clap::Parser; - use std::{str::FromStr, time::Duration}; +use anyhow::Context as _; +use clap::Parser; use zksync_config::{ configs::{ api::{HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig}, @@ -18,11 +17,9 @@ use zksync_config::{ ApiConfig, ContractsConfig, DBConfig, ETHClientConfig, ETHSenderConfig, ETHWatchConfig, FetcherConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, ProverConfigs, }; - -use zksync_core::temp_config_store::TempConfigStore; use zksync_core::{ - genesis_init, initialize_components, is_genesis_needed, setup_sigint_handler, Component, - Components, + genesis_init, initialize_components, is_genesis_needed, setup_sigint_handler, + temp_config_store::TempConfigStore, Component, Components, }; use zksync_env_config::FromEnv; use zksync_storage::RocksDB; diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 6c6223fbb17..aa9bf615c91 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -2,25 +2,25 @@ //! //! Most of them are just re-exported from the `web3` crate. +use std::{ + convert::{Infallible, TryFrom, TryInto}, + fmt, + num::ParseIntError, + ops::{Add, Deref, DerefMut, Sub}, + str::FromStr, +}; + +use serde::{de, Deserialize, Deserializer, Serialize}; +pub use web3::{ + self, ethabi, + types::{Address, Bytes, Log, TransactionRequest, H128, H160, H2048, H256, U128, U256, U64}, +}; + #[macro_use] mod macros; - pub mod basic_fri_types; pub mod network; -use serde::{de, Deserialize, Deserializer, Serialize}; -use std::convert::{Infallible, TryFrom, TryInto}; -use std::fmt; -use std::num::ParseIntError; -use std::ops::{Add, Deref, DerefMut, Sub}; -use std::str::FromStr; - -pub use web3; -pub use web3::ethabi; -pub use web3::types::{ - Address, Bytes, Log, TransactionRequest, H128, H160, H2048, H256, U128, U256, U64, -}; - /// Account place in the global state tree is uniquely identified by its address. /// Binary this type is represented by 160 bit big-endian representation of account address. #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] @@ -222,9 +222,10 @@ impl Default for PriorityOpId { #[cfg(test)] mod tests { - use super::*; use serde_json::from_str; + use super::*; + #[test] fn test_from_str_valid_decimal() { let input = "42"; diff --git a/core/lib/circuit_breaker/src/l1_txs.rs b/core/lib/circuit_breaker/src/l1_txs.rs index 5279106637e..5d3c4dc9ccf 100644 --- a/core/lib/circuit_breaker/src/l1_txs.rs +++ b/core/lib/circuit_breaker/src/l1_txs.rs @@ -1,6 +1,7 @@ -use crate::{CircuitBreaker, CircuitBreakerError}; use zksync_dal::ConnectionPool; +use crate::{CircuitBreaker, CircuitBreakerError}; + #[derive(Debug)] pub struct FailedL1TransactionChecker { pub pool: ConnectionPool, diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index 878114f0d04..4c84f857a29 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -4,7 +4,6 @@ use anyhow::Context as _; use futures::channel::oneshot; use thiserror::Error; use tokio::sync::watch; - use zksync_config::configs::chain::CircuitBreakerConfig; pub mod l1_txs; diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index 14b3d81520c..348c1c95e2d 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -1,10 +1,10 @@ -use serde::Deserialize; - use std::{net::SocketAddr, time::Duration}; -pub use crate::configs::PrometheusConfig; +use serde::Deserialize; use zksync_basic_types::H256; +pub use crate::configs::PrometheusConfig; + /// API configuration. #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ApiConfig { diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index f09b5bb292c..eb77467183f 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -1,11 +1,7 @@ -/// External uses +use std::{str::FromStr, time::Duration}; + use serde::Deserialize; -use std::str::FromStr; -/// Built-in uses -use std::time::Duration; -// Local uses -use zksync_basic_types::network::Network; -use zksync_basic_types::{Address, L2ChainId}; +use zksync_basic_types::{network::Network, Address, L2ChainId}; #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ChainConfig { diff --git a/core/lib/config/src/configs/contract_verifier.rs b/core/lib/config/src/configs/contract_verifier.rs index 5c2a1608c8f..db3c8fa1b52 100644 --- a/core/lib/config/src/configs/contract_verifier.rs +++ b/core/lib/config/src/configs/contract_verifier.rs @@ -1,6 +1,5 @@ -// Built-in uses use std::time::Duration; -// External uses + use serde::Deserialize; #[derive(Debug, Deserialize, Clone, PartialEq)] diff --git a/core/lib/config/src/configs/database.rs b/core/lib/config/src/configs/database.rs index d257e661eb3..dcff7d486a8 100644 --- a/core/lib/config/src/configs/database.rs +++ b/core/lib/config/src/configs/database.rs @@ -1,8 +1,8 @@ +use std::time::Duration; + use anyhow::Context as _; use serde::{Deserialize, Serialize}; -use std::time::Duration; - /// Mode of operation for the Merkle tree. /// /// The mode does not influence how tree data is stored; i.e., a mode can be switched on the fly. diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 3d036483347..cd44daed17f 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -1,8 +1,6 @@ -// Built-in uses use std::time::Duration; -// External uses + use serde::Deserialize; -// Workspace uses use zksync_basic_types::H256; /// Configuration for the Ethereum sender crate. diff --git a/core/lib/config/src/configs/eth_watch.rs b/core/lib/config/src/configs/eth_watch.rs index 93d73ddf6bf..05afebf81c3 100644 --- a/core/lib/config/src/configs/eth_watch.rs +++ b/core/lib/config/src/configs/eth_watch.rs @@ -1,6 +1,5 @@ -// Built-in uses use std::time::Duration; -// External uses + use serde::Deserialize; /// Configuration for the Ethereum sender crate. diff --git a/core/lib/config/src/configs/fetcher.rs b/core/lib/config/src/configs/fetcher.rs index b1a5fca4b24..a1e63742e22 100644 --- a/core/lib/config/src/configs/fetcher.rs +++ b/core/lib/config/src/configs/fetcher.rs @@ -1,6 +1,7 @@ -use serde::Deserialize; use std::time::Duration; +use serde::Deserialize; + #[derive(Debug, Deserialize, Clone, Copy, PartialEq)] pub enum TokenListSource { OneInch, diff --git a/core/lib/config/src/configs/fri_proof_compressor.rs b/core/lib/config/src/configs/fri_proof_compressor.rs index bbf58f2d1c6..4b4e062dee2 100644 --- a/core/lib/config/src/configs/fri_proof_compressor.rs +++ b/core/lib/config/src/configs/fri_proof_compressor.rs @@ -1,6 +1,7 @@ -use serde::Deserialize; use std::time::Duration; +use serde::Deserialize; + /// Configuration for the fri proof compressor #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct FriProofCompressorConfig { diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs index aab358a4ada..44521ee3657 100644 --- a/core/lib/config/src/configs/fri_prover.rs +++ b/core/lib/config/src/configs/fri_prover.rs @@ -1,6 +1,7 @@ -use serde::Deserialize; use std::time::Duration; +use serde::Deserialize; + #[derive(Debug, Deserialize, Clone, PartialEq)] pub enum SetupLoadMode { FromDisk, diff --git a/core/lib/config/src/configs/fri_prover_gateway.rs b/core/lib/config/src/configs/fri_prover_gateway.rs index 652c7d1bc0f..86723ff3043 100644 --- a/core/lib/config/src/configs/fri_prover_gateway.rs +++ b/core/lib/config/src/configs/fri_prover_gateway.rs @@ -1,6 +1,7 @@ -use serde::Deserialize; use std::time::Duration; +use serde::Deserialize; + #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct FriProverGatewayConfig { pub api_url: String, diff --git a/core/lib/config/src/configs/fri_prover_group.rs b/core/lib/config/src/configs/fri_prover_group.rs index 71ed5d1f7d9..856ff59809f 100644 --- a/core/lib/config/src/configs/fri_prover_group.rs +++ b/core/lib/config/src/configs/fri_prover_group.rs @@ -1,6 +1,6 @@ -use serde::Deserialize; use std::collections::HashSet; +use serde::Deserialize; use zksync_basic_types::basic_fri_types::CircuitIdRoundTuple; /// Configuration for the grouping of specialized provers. diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 0c2ecc46103..710c128c951 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -1,15 +1,26 @@ // Public re-exports pub use self::{ - alerts::AlertsConfig, api::ApiConfig, chain::ChainConfig, - circuit_synthesizer::CircuitSynthesizerConfig, contract_verifier::ContractVerifierConfig, - contracts::ContractsConfig, database::DBConfig, database::PostgresConfig, - eth_client::ETHClientConfig, eth_sender::ETHSenderConfig, eth_sender::GasAdjusterConfig, - eth_watch::ETHWatchConfig, fetcher::FetcherConfig, - fri_proof_compressor::FriProofCompressorConfig, fri_prover::FriProverConfig, - fri_prover_gateway::FriProverGatewayConfig, fri_witness_generator::FriWitnessGeneratorConfig, - fri_witness_vector_generator::FriWitnessVectorGeneratorConfig, object_store::ObjectStoreConfig, - proof_data_handler::ProofDataHandlerConfig, prover::ProverConfig, prover::ProverConfigs, - prover_group::ProverGroupConfig, utils::PrometheusConfig, + alerts::AlertsConfig, + api::ApiConfig, + chain::ChainConfig, + circuit_synthesizer::CircuitSynthesizerConfig, + contract_verifier::ContractVerifierConfig, + contracts::ContractsConfig, + database::{DBConfig, PostgresConfig}, + eth_client::ETHClientConfig, + eth_sender::{ETHSenderConfig, GasAdjusterConfig}, + eth_watch::ETHWatchConfig, + fetcher::FetcherConfig, + fri_proof_compressor::FriProofCompressorConfig, + fri_prover::FriProverConfig, + fri_prover_gateway::FriProverGatewayConfig, + fri_witness_generator::FriWitnessGeneratorConfig, + fri_witness_vector_generator::FriWitnessVectorGeneratorConfig, + object_store::ObjectStoreConfig, + proof_data_handler::ProofDataHandlerConfig, + prover::{ProverConfig, ProverConfigs}, + prover_group::ProverGroupConfig, + utils::PrometheusConfig, witness_generator::WitnessGeneratorConfig, }; diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index e3efd6b7a4d..b773efbd7df 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -1,6 +1,7 @@ -use serde::Deserialize; use std::time::Duration; +use serde::Deserialize; + #[derive(Debug, Deserialize, Clone, Copy, PartialEq)] pub enum ProtocolVersionLoadingMode { FromDb, diff --git a/core/lib/config/src/configs/utils.rs b/core/lib/config/src/configs/utils.rs index bfa9e7e7f3e..977a48e82d2 100644 --- a/core/lib/config/src/configs/utils.rs +++ b/core/lib/config/src/configs/utils.rs @@ -1,7 +1,7 @@ -use serde::Deserialize; - use std::{env, time::Duration}; +use serde::Deserialize; + #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct PrometheusConfig { /// Port to which the Prometheus exporter server is listening. diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 766d2464d34..917bf7a6ffe 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -3,17 +3,18 @@ //! Careful: some of the methods are reading the contracts based on the ZKSYNC_HOME environment variable. #![allow(clippy::derive_partial_eq_without_eq)] + +use std::{ + fs::{self, File}, + path::{Path, PathBuf}, +}; + use ethabi::{ ethereum_types::{H256, U256}, Contract, Function, }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use std::{ - fs::{self, File}, - path::{Path, PathBuf}, -}; - use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words}; pub mod test_contracts; diff --git a/core/lib/contracts/src/test_contracts.rs b/core/lib/contracts/src/test_contracts.rs index 9db4051cfdb..eab1587f833 100644 --- a/core/lib/contracts/src/test_contracts.rs +++ b/core/lib/contracts/src/test_contracts.rs @@ -1,8 +1,8 @@ -use crate::get_loadnext_contract; -use ethabi::ethereum_types::U256; -use ethabi::{Bytes, Token}; +use ethabi::{ethereum_types::U256, Bytes, Token}; use serde::Deserialize; +use crate::get_loadnext_contract; + #[derive(Debug, Clone, Deserialize)] pub struct LoadnextContractExecutionParams { pub reads: usize, diff --git a/core/lib/crypto/src/hasher/blake2.rs b/core/lib/crypto/src/hasher/blake2.rs index 70d8c9797e8..97d3fbb8a1e 100644 --- a/core/lib/crypto/src/hasher/blake2.rs +++ b/core/lib/crypto/src/hasher/blake2.rs @@ -1,7 +1,7 @@ use blake2::{Blake2s256, Digest}; +use zksync_basic_types::H256; use crate::hasher::Hasher; -use zksync_basic_types::H256; #[derive(Default, Clone, Debug)] pub struct Blake2Hasher; diff --git a/core/lib/crypto/src/hasher/keccak.rs b/core/lib/crypto/src/hasher/keccak.rs index e4c441328de..d3baab873f9 100644 --- a/core/lib/crypto/src/hasher/keccak.rs +++ b/core/lib/crypto/src/hasher/keccak.rs @@ -1,6 +1,7 @@ -use crate::hasher::Hasher; use zksync_basic_types::{web3::signing::keccak256, H256}; +use crate::hasher::Hasher; + #[derive(Default, Clone, Debug)] pub struct KeccakHasher; diff --git a/core/lib/crypto/src/hasher/sha256.rs b/core/lib/crypto/src/hasher/sha256.rs index 73e593ead72..b976c79d210 100644 --- a/core/lib/crypto/src/hasher/sha256.rs +++ b/core/lib/crypto/src/hasher/sha256.rs @@ -1,7 +1,7 @@ use sha2::{Digest, Sha256}; +use zksync_basic_types::H256; use crate::hasher::Hasher; -use zksync_basic_types::H256; #[derive(Debug, Default, Clone, Copy)] pub struct Sha256Hasher; diff --git a/core/lib/dal/src/basic_witness_input_producer_dal.rs b/core/lib/dal/src/basic_witness_input_producer_dal.rs index ac0627a96a0..cae640e94b8 100644 --- a/core/lib/dal/src/basic_witness_input_producer_dal.rs +++ b/core/lib/dal/src/basic_witness_input_producer_dal.rs @@ -1,10 +1,14 @@ -use crate::instrument::InstrumentExt; -use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; -use crate::StorageProcessor; -use sqlx::postgres::types::PgInterval; use std::time::{Duration, Instant}; + +use sqlx::postgres::types::PgInterval; use zksync_types::L1BatchNumber; +use crate::{ + instrument::InstrumentExt, + time_utils::{duration_to_naive_time, pg_interval_from_duration}, + StorageProcessor, +}; + #[derive(Debug)] pub struct BasicWitnessInputProducerDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index c60d52e197b..16e926393fb 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -7,7 +7,6 @@ use std::{ use anyhow::Context as _; use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive}; use sqlx::Row; - use zksync_types::{ aggregated_operations::AggregatedActionType, block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 0c2a8b4e188..87f6fca1eb2 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -1,6 +1,5 @@ use bigdecimal::BigDecimal; use sqlx::Row; - use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, @@ -13,14 +12,17 @@ use zksync_types::{ }; use zksync_utils::bigdecimal_to_u256; -use crate::models::{ - storage_block::{ - bind_block_where_sql_params, web3_block_number_to_sql, web3_block_where_sql, - StorageBlockDetails, StorageL1BatchDetails, +use crate::{ + instrument::InstrumentExt, + models::{ + storage_block::{ + bind_block_where_sql_params, web3_block_number_to_sql, web3_block_where_sql, + StorageBlockDetails, StorageL1BatchDetails, + }, + storage_transaction::{extract_web3_transaction, web3_transaction_select_sql, CallTrace}, }, - storage_transaction::{extract_web3_transaction, web3_transaction_select_sql, CallTrace}, + StorageProcessor, }; -use crate::{instrument::InstrumentExt, StorageProcessor}; const BLOCK_GAS_LIMIT: u32 = system_params::VM_INITIAL_FRAME_ERGS; diff --git a/core/lib/dal/src/connection/holder.rs b/core/lib/dal/src/connection/holder.rs index 265b892c089..1174f834ae8 100644 --- a/core/lib/dal/src/connection/holder.rs +++ b/core/lib/dal/src/connection/holder.rs @@ -1,8 +1,7 @@ -// Built-in deps -use sqlx::pool::PoolConnection; -use sqlx::{postgres::Postgres, Transaction}; use std::fmt; +use sqlx::{pool::PoolConnection, postgres::Postgres, Transaction}; + /// Connection holder unifies the type of underlying connection, which /// can be either pooled or direct. pub(crate) enum ConnectionHolder<'a> { diff --git a/core/lib/dal/src/connection/mod.rs b/core/lib/dal/src/connection/mod.rs index 845dbc64dc4..b7f82d619ff 100644 --- a/core/lib/dal/src/connection/mod.rs +++ b/core/lib/dal/src/connection/mod.rs @@ -1,17 +1,15 @@ +use std::{env, fmt, time::Duration}; + +use anyhow::Context as _; use sqlx::{ pool::PoolConnection, postgres::{PgConnectOptions, PgPool, PgPoolOptions, Postgres}, }; -use anyhow::Context as _; -use std::env; -use std::fmt; -use std::time::Duration; +use crate::{metrics::CONNECTION_METRICS, StorageProcessor}; pub mod holder; -use crate::{metrics::CONNECTION_METRICS, StorageProcessor}; - /// Obtains the test database URL from the environment variable. fn get_test_database_url() -> anyhow::Result { env::var("TEST_DATABASE_URL").context( diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index a6c549f482b..5466b0c11b2 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -1,7 +1,10 @@ -use anyhow::Context as _; -use std::fmt::{Display, Formatter}; -use std::time::Duration; +use std::{ + fmt::{Display, Formatter}, + time::Duration, +}; +use anyhow::Context as _; +use sqlx::postgres::types::PgInterval; use zksync_types::{ contract_verification_api::{ DeployContractCalldata, VerificationIncomingRequest, VerificationInfo, VerificationRequest, @@ -10,10 +13,7 @@ use zksync_types::{ get_code_key, Address, CONTRACT_DEPLOYER_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, }; -use sqlx::postgres::types::PgInterval; - -use crate::models::storage_verification_request::StorageVerificationRequest; -use crate::StorageProcessor; +use crate::{models::storage_verification_request::StorageVerificationRequest, StorageProcessor}; #[derive(Debug)] pub struct ContractVerificationDal<'a, 'c> { diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 0d9d1da0dab..94d7adfe284 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -1,17 +1,22 @@ -use crate::models::storage_eth_tx::{ - L1BatchEthSenderStats, StorageEthTx, StorageTxHistory, StorageTxHistoryToSend, -}; -use crate::StorageProcessor; +use std::{convert::TryFrom, str::FromStr}; + use anyhow::Context as _; use sqlx::{ types::chrono::{DateTime, Utc}, Row, }; -use std::convert::TryFrom; -use std::str::FromStr; -use zksync_types::aggregated_operations::AggregatedActionType; -use zksync_types::eth_sender::{EthTx, TxHistory, TxHistoryToSend}; -use zksync_types::{Address, L1BatchNumber, H256, U256}; +use zksync_types::{ + aggregated_operations::AggregatedActionType, + eth_sender::{EthTx, TxHistory, TxHistoryToSend}, + Address, L1BatchNumber, H256, U256, +}; + +use crate::{ + models::storage_eth_tx::{ + L1BatchEthSenderStats, StorageEthTx, StorageTxHistory, StorageTxHistoryToSend, + }, + StorageProcessor, +}; #[derive(Debug)] pub struct EthSenderDal<'a, 'c> { diff --git a/core/lib/dal/src/events_dal.rs b/core/lib/dal/src/events_dal.rs index 6355deaf29a..22967982b3b 100644 --- a/core/lib/dal/src/events_dal.rs +++ b/core/lib/dal/src/events_dal.rs @@ -1,14 +1,14 @@ -use sqlx::types::chrono::Utc; - use std::fmt; -use crate::{models::storage_event::StorageL2ToL1Log, SqlxError, StorageProcessor}; +use sqlx::types::chrono::Utc; use zksync_types::{ l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, tx::IncludedTxLocation, MiniblockNumber, VmEvent, H256, }; +use crate::{models::storage_event::StorageL2ToL1Log, SqlxError, StorageProcessor}; + /// Wrapper around an optional event topic allowing to hex-format it for `COPY` instructions. #[derive(Debug)] struct EventTopic<'a>(Option<&'a H256>); @@ -196,9 +196,10 @@ impl EventsDal<'_, '_> { #[cfg(test)] mod tests { + use zksync_types::{Address, L1BatchNumber, ProtocolVersion}; + use super::*; use crate::{tests::create_miniblock_header, ConnectionPool}; - use zksync_types::{Address, L1BatchNumber, ProtocolVersion}; fn create_vm_event(index: u8, topic_count: u8) -> VmEvent { assert!(topic_count <= 4); diff --git a/core/lib/dal/src/events_web3_dal.rs b/core/lib/dal/src/events_web3_dal.rs index 7cdf2dba646..e8b1c802446 100644 --- a/core/lib/dal/src/events_web3_dal.rs +++ b/core/lib/dal/src/events_web3_dal.rs @@ -1,5 +1,4 @@ use sqlx::Row; - use zksync_types::{ api::{GetLogsFilter, Log}, Address, MiniblockNumber, H256, diff --git a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs b/core/lib/dal/src/fri_gpu_prover_queue_dal.rs index 46c46a15b73..141b2e0378e 100644 --- a/core/lib/dal/src/fri_gpu_prover_queue_dal.rs +++ b/core/lib/dal/src/fri_gpu_prover_queue_dal.rs @@ -1,8 +1,8 @@ use std::time::Duration; + use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; -use crate::time_utils::pg_interval_from_duration; -use crate::StorageProcessor; +use crate::{time_utils::pg_interval_from_duration, StorageProcessor}; #[derive(Debug)] pub struct FriGpuProverQueueDal<'a, 'c> { diff --git a/core/lib/dal/src/fri_proof_compressor_dal.rs b/core/lib/dal/src/fri_proof_compressor_dal.rs index b7f1d1921e9..6e6db0bb6d8 100644 --- a/core/lib/dal/src/fri_proof_compressor_dal.rs +++ b/core/lib/dal/src/fri_proof_compressor_dal.rs @@ -1,14 +1,16 @@ +use std::{collections::HashMap, str::FromStr, time::Duration}; + use sqlx::Row; -use std::collections::HashMap; -use std::str::FromStr; -use std::time::Duration; use strum::{Display, EnumString}; - -use zksync_types::proofs::{JobCountStatistics, StuckJobs}; -use zksync_types::L1BatchNumber; - -use crate::time_utils::{duration_to_naive_time, pg_interval_from_duration}; -use crate::StorageProcessor; +use zksync_types::{ + proofs::{JobCountStatistics, StuckJobs}, + L1BatchNumber, +}; + +use crate::{ + time_utils::{duration_to_naive_time, pg_interval_from_duration}, + StorageProcessor, +}; #[derive(Debug)] pub struct FriProofCompressorDal<'a, 'c> { diff --git a/core/lib/dal/src/fri_protocol_versions_dal.rs b/core/lib/dal/src/fri_protocol_versions_dal.rs index 8fbcf922d8b..7eac1190bb9 100644 --- a/core/lib/dal/src/fri_protocol_versions_dal.rs +++ b/core/lib/dal/src/fri_protocol_versions_dal.rs @@ -1,7 +1,6 @@ use std::convert::TryFrom; -use zksync_types::protocol_version::FriProtocolVersionId; -use zksync_types::protocol_version::L1VerifierConfig; +use zksync_types::protocol_version::{FriProtocolVersionId, L1VerifierConfig}; use crate::StorageProcessor; diff --git a/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs b/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs index 3844f5777ce..a9639dfe951 100644 --- a/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs +++ b/core/lib/dal/src/fri_scheduler_dependency_tracker_dal.rs @@ -1,6 +1,7 @@ -use crate::StorageProcessor; use zksync_types::L1BatchNumber; +use crate::StorageProcessor; + #[derive(Debug)] pub struct FriSchedulerDependencyTrackerDal<'a, 'c> { pub storage: &'a mut StorageProcessor<'c>, diff --git a/core/lib/dal/src/fri_witness_generator_dal.rs b/core/lib/dal/src/fri_witness_generator_dal.rs index c05dd3b3d1a..c11f20adec5 100644 --- a/core/lib/dal/src/fri_witness_generator_dal.rs +++ b/core/lib/dal/src/fri_witness_generator_dal.rs @@ -1,14 +1,12 @@ -use sqlx::Row; - -use std::convert::TryFrom; -use std::{collections::HashMap, time::Duration}; +use std::{collections::HashMap, convert::TryFrom, time::Duration}; -use zksync_types::protocol_version::FriProtocolVersionId; +use sqlx::Row; use zksync_types::{ proofs::{ AggregationRound, JobCountStatistics, LeafAggregationJobMetadata, NodeAggregationJobMetadata, StuckJobs, }, + protocol_version::FriProtocolVersionId, L1BatchNumber, }; diff --git a/core/lib/dal/src/gpu_prover_queue_dal.rs b/core/lib/dal/src/gpu_prover_queue_dal.rs index cc769ff3008..b4c348ab3e7 100644 --- a/core/lib/dal/src/gpu_prover_queue_dal.rs +++ b/core/lib/dal/src/gpu_prover_queue_dal.rs @@ -1,10 +1,9 @@ -use std::time::Duration; +use std::{collections::HashMap, time::Duration}; -use crate::time_utils::pg_interval_from_duration; -use crate::StorageProcessor; -use std::collections::HashMap; use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; +use crate::{time_utils::pg_interval_from_duration, StorageProcessor}; + #[derive(Debug)] pub struct GpuProverQueueDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, diff --git a/core/lib/dal/src/healthcheck.rs b/core/lib/dal/src/healthcheck.rs index 902a235ce54..ec56f8ea931 100644 --- a/core/lib/dal/src/healthcheck.rs +++ b/core/lib/dal/src/healthcheck.rs @@ -1,6 +1,5 @@ use serde::Serialize; use sqlx::PgPool; - use zksync_health_check::{async_trait, CheckHealth, Health, HealthStatus}; use crate::ConnectionPool; diff --git a/core/lib/dal/src/instrument.rs b/core/lib/dal/src/instrument.rs index cd761fb3500..5d99b0729de 100644 --- a/core/lib/dal/src/instrument.rs +++ b/core/lib/dal/src/instrument.rs @@ -1,5 +1,7 @@ //! DAL query instrumentation. +use std::{fmt, future::Future, panic::Location}; + use sqlx::{ postgres::{PgConnection, PgQueryResult, PgRow}, query::{Map, Query, QueryAs}, @@ -7,8 +9,6 @@ use sqlx::{ }; use tokio::time::{Duration, Instant}; -use std::{fmt, future::Future, panic::Location}; - use crate::metrics::REQUEST_METRICS; type ThreadSafeDebug<'a> = dyn fmt::Debug + Send + Sync + 'a; diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 9dfc9458202..68d19fe84b9 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -1,45 +1,27 @@ #![allow(clippy::derive_partial_eq_without_eq, clippy::format_push_string)] -// Built-in deps -pub use sqlx::Error as SqlxError; -use sqlx::{postgres::Postgres, Connection, PgConnection, Transaction}; -// External imports -use sqlx::pool::PoolConnection; -pub use sqlx::types::BigDecimal; - -// Local imports -use crate::accounts_dal::AccountsDal; -use crate::basic_witness_input_producer_dal::BasicWitnessInputProducerDal; -use crate::blocks_dal::BlocksDal; -use crate::blocks_web3_dal::BlocksWeb3Dal; -use crate::connection::holder::ConnectionHolder; +use sqlx::{pool::PoolConnection, postgres::Postgres, Connection, PgConnection, Transaction}; +pub use sqlx::{types::BigDecimal, Error as SqlxError}; + pub use crate::connection::ConnectionPool; -use crate::contract_verification_dal::ContractVerificationDal; -use crate::eth_sender_dal::EthSenderDal; -use crate::events_dal::EventsDal; -use crate::events_web3_dal::EventsWeb3Dal; -use crate::fri_gpu_prover_queue_dal::FriGpuProverQueueDal; -use crate::fri_proof_compressor_dal::FriProofCompressorDal; -use crate::fri_protocol_versions_dal::FriProtocolVersionsDal; -use crate::fri_prover_dal::FriProverDal; -use crate::fri_scheduler_dependency_tracker_dal::FriSchedulerDependencyTrackerDal; -use crate::fri_witness_generator_dal::FriWitnessGeneratorDal; -use crate::gpu_prover_queue_dal::GpuProverQueueDal; -use crate::proof_generation_dal::ProofGenerationDal; -use crate::protocol_versions_dal::ProtocolVersionsDal; -use crate::protocol_versions_web3_dal::ProtocolVersionsWeb3Dal; -use crate::prover_dal::ProverDal; -use crate::storage_dal::StorageDal; -use crate::storage_logs_dal::StorageLogsDal; -use crate::storage_logs_dedup_dal::StorageLogsDedupDal; -use crate::storage_web3_dal::StorageWeb3Dal; -use crate::sync_dal::SyncDal; -use crate::system_dal::SystemDal; -use crate::tokens_dal::TokensDal; -use crate::tokens_web3_dal::TokensWeb3Dal; -use crate::transactions_dal::TransactionsDal; -use crate::transactions_web3_dal::TransactionsWeb3Dal; -use crate::witness_generator_dal::WitnessGeneratorDal; +use crate::{ + accounts_dal::AccountsDal, basic_witness_input_producer_dal::BasicWitnessInputProducerDal, + blocks_dal::BlocksDal, blocks_web3_dal::BlocksWeb3Dal, connection::holder::ConnectionHolder, + contract_verification_dal::ContractVerificationDal, eth_sender_dal::EthSenderDal, + events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, + fri_gpu_prover_queue_dal::FriGpuProverQueueDal, + fri_proof_compressor_dal::FriProofCompressorDal, + fri_protocol_versions_dal::FriProtocolVersionsDal, fri_prover_dal::FriProverDal, + fri_scheduler_dependency_tracker_dal::FriSchedulerDependencyTrackerDal, + fri_witness_generator_dal::FriWitnessGeneratorDal, gpu_prover_queue_dal::GpuProverQueueDal, + proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal, + protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, prover_dal::ProverDal, + storage_dal::StorageDal, storage_logs_dal::StorageLogsDal, + storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, + sync_dal::SyncDal, system_dal::SystemDal, tokens_dal::TokensDal, + tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, + transactions_web3_dal::TransactionsWeb3Dal, witness_generator_dal::WitnessGeneratorDal, +}; #[macro_use] mod macro_utils; diff --git a/core/lib/dal/src/metrics.rs b/core/lib/dal/src/metrics.rs index 58e733acc90..4840d073f57 100644 --- a/core/lib/dal/src/metrics.rs +++ b/core/lib/dal/src/metrics.rs @@ -1,12 +1,12 @@ //! Metrics for the data access layer. +use std::{thread, time::Duration}; + use vise::{ Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, LabeledFamily, LatencyObserver, Metrics, }; -use std::{thread, time::Duration}; - /// Request-related DB metrics. #[derive(Debug, Metrics)] #[metrics(prefix = "sql")] diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 390bd3b2fd8..5d3eeba2a68 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -7,7 +7,6 @@ use sqlx::{ types::chrono::{DateTime, NaiveDateTime, Utc}, }; use thiserror::Error; - use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, diff --git a/core/lib/dal/src/models/storage_eth_tx.rs b/core/lib/dal/src/models/storage_eth_tx.rs index ed5a732ff79..9026be8326d 100644 --- a/core/lib/dal/src/models/storage_eth_tx.rs +++ b/core/lib/dal/src/models/storage_eth_tx.rs @@ -1,8 +1,11 @@ -use sqlx::types::chrono::NaiveDateTime; use std::str::FromStr; -use zksync_types::aggregated_operations::AggregatedActionType; -use zksync_types::eth_sender::{EthTx, TxHistory, TxHistoryToSend}; -use zksync_types::{Address, L1BatchNumber, Nonce, H256}; + +use sqlx::types::chrono::NaiveDateTime; +use zksync_types::{ + aggregated_operations::AggregatedActionType, + eth_sender::{EthTx, TxHistory, TxHistoryToSend}, + Address, L1BatchNumber, Nonce, H256, +}; #[derive(Debug, Clone)] pub struct StorageEthTx { diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index 93010f1b814..6eb6e94b003 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -1,4 +1,6 @@ use std::convert::TryInto; + +use sqlx::types::chrono::NaiveDateTime; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, @@ -6,8 +8,6 @@ use zksync_types::{ Address, H256, }; -use sqlx::types::chrono::NaiveDateTime; - #[derive(sqlx::FromRow)] pub struct StorageProtocolVersion { pub id: i32, diff --git a/core/lib/dal/src/models/storage_prover_job_info.rs b/core/lib/dal/src/models/storage_prover_job_info.rs index facec83e0c2..3242953b39d 100644 --- a/core/lib/dal/src/models/storage_prover_job_info.rs +++ b/core/lib/dal/src/models/storage_prover_job_info.rs @@ -1,14 +1,11 @@ -use core::panic; -use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use std::convert::TryFrom; -use std::str::FromStr; +use std::{convert::TryFrom, panic, str::FromStr}; -use zksync_types::proofs::{ - JobPosition, ProverJobStatus, ProverJobStatusFailed, ProverJobStatusInProgress, - ProverJobStatusSuccessful, -}; +use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; use zksync_types::{ - proofs::{AggregationRound, ProverJobInfo}, + proofs::{ + AggregationRound, JobPosition, ProverJobInfo, ProverJobStatus, ProverJobStatusFailed, + ProverJobStatusInProgress, ProverJobStatusSuccessful, + }, L1BatchNumber, }; diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 3415cb9b264..dc15250671b 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -2,8 +2,7 @@ use anyhow::Context as _; use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContractsHashes; use zksync_protobuf::{read_required, ProtoFmt}; -use zksync_types::api::en; -use zksync_types::{Address, L1BatchNumber, MiniblockNumber, Transaction, H160, H256}; +use zksync_types::{api::en, Address, L1BatchNumber, MiniblockNumber, Transaction, H160, H256}; #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageSyncBlock { @@ -133,10 +132,11 @@ impl ProtoFmt for ConsensusBlockFields { #[cfg(test)] mod tests { - use super::ConsensusBlockFields; use rand::Rng; use zksync_consensus_roles::validator; + use super::ConsensusBlockFields; + #[tokio::test] async fn encode_decode() { let rng = &mut rand::thread_rng(); diff --git a/core/lib/dal/src/models/storage_token.rs b/core/lib/dal/src/models/storage_token.rs index 1cc42405fe2..3acd7e03bc9 100644 --- a/core/lib/dal/src/models/storage_token.rs +++ b/core/lib/dal/src/models/storage_token.rs @@ -2,7 +2,6 @@ use sqlx::types::{ chrono::{DateTime, NaiveDateTime, Utc}, BigDecimal, }; - use zksync_types::tokens::TokenPrice; use zksync_utils::big_decimal_to_ratio; diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 40fd5aa692c..8e03590dcc5 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -1,29 +1,30 @@ use std::{convert::TryInto, str::FromStr}; -use crate::BigDecimal; use bigdecimal::Zero; - use serde::{Deserialize, Serialize}; -use sqlx::postgres::PgRow; -use sqlx::types::chrono::{DateTime, NaiveDateTime, Utc}; -use sqlx::{Error, FromRow, Row}; - -use zksync_types::l2::TransactionType; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; -use zksync_types::transaction_request::PaymasterParams; -use zksync_types::vm_trace::Call; -use zksync_types::web3::types::U64; -use zksync_types::{api, Bytes, ExecuteTransactionCommon}; +use sqlx::{ + postgres::PgRow, + types::chrono::{DateTime, NaiveDateTime, Utc}, + Error, FromRow, Row, +}; use zksync_types::{ + api, api::{TransactionDetails, TransactionStatus}, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, - Address, Execute, L1TxCommonData, L2ChainId, L2TxCommonData, Nonce, PackedEthSignature, - PriorityOpId, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H160, H256, - PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, + l2::TransactionType, + protocol_version::ProtocolUpgradeTxCommonData, + transaction_request::PaymasterParams, + vm_trace::Call, + web3::types::U64, + Address, Bytes, Execute, ExecuteTransactionCommon, L1TxCommonData, L2ChainId, L2TxCommonData, + Nonce, PackedEthSignature, PriorityOpId, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, + EIP_712_TX_TYPE, H160, H256, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::bigdecimal_to_u256; +use crate::BigDecimal; + #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageTransaction { pub priority_op_id: Option, diff --git a/core/lib/dal/src/models/storage_verification_request.rs b/core/lib/dal/src/models/storage_verification_request.rs index 47e9abd11db..e6c68ca16fd 100644 --- a/core/lib/dal/src/models/storage_verification_request.rs +++ b/core/lib/dal/src/models/storage_verification_request.rs @@ -1,8 +1,10 @@ -use zksync_types::contract_verification_api::{ - CompilerType, CompilerVersions, SourceCodeData, VerificationIncomingRequest, - VerificationRequest, +use zksync_types::{ + contract_verification_api::{ + CompilerType, CompilerVersions, SourceCodeData, VerificationIncomingRequest, + VerificationRequest, + }, + Address, }; -use zksync_types::Address; #[derive(Debug, Clone, sqlx::FromRow)] pub struct StorageVerificationRequest { diff --git a/core/lib/dal/src/models/storage_witness_job_info.rs b/core/lib/dal/src/models/storage_witness_job_info.rs index 1aa41032cfa..486b9f89681 100644 --- a/core/lib/dal/src/models/storage_witness_job_info.rs +++ b/core/lib/dal/src/models/storage_witness_job_info.rs @@ -1,11 +1,13 @@ +use std::{convert::TryFrom, str::FromStr}; + use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use std::convert::TryFrom; -use std::str::FromStr; -use zksync_types::proofs::{ - AggregationRound, JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, - WitnessJobStatusSuccessful, +use zksync_types::{ + proofs::{ + AggregationRound, JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, + WitnessJobStatusSuccessful, + }, + L1BatchNumber, }; -use zksync_types::L1BatchNumber; #[derive(sqlx::FromRow)] pub struct StorageWitnessJobInfo { diff --git a/core/lib/dal/src/proof_generation_dal.rs b/core/lib/dal/src/proof_generation_dal.rs index 22db4463469..684a3b08acb 100644 --- a/core/lib/dal/src/proof_generation_dal.rs +++ b/core/lib/dal/src/proof_generation_dal.rs @@ -1,10 +1,9 @@ use std::time::Duration; +use strum::{Display, EnumString}; use zksync_types::L1BatchNumber; -use crate::time_utils::pg_interval_from_duration; -use crate::{SqlxError, StorageProcessor}; -use strum::{Display, EnumString}; +use crate::{time_utils::pg_interval_from_duration, SqlxError, StorageProcessor}; #[derive(Debug)] pub struct ProofGenerationDal<'a, 'c> { diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index dde7574d390..6f62f3fb01b 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -1,14 +1,15 @@ use std::convert::{TryFrom, TryInto}; + use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolUpgradeTx, ProtocolVersion, VerifierParams}, Address, ProtocolVersionId, H256, }; -use crate::models::storage_protocol_version::{ - protocol_version_from_storage, StorageProtocolVersion, +use crate::{ + models::storage_protocol_version::{protocol_version_from_storage, StorageProtocolVersion}, + StorageProcessor, }; -use crate::StorageProcessor; #[derive(Debug)] pub struct ProtocolVersionsDal<'a, 'c> { diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index dc43dadbd22..2819d94f8d8 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -1,7 +1,6 @@ use zksync_types::api::ProtocolVersion; -use crate::models::storage_protocol_version::StorageProtocolVersion; -use crate::StorageProcessor; +use crate::{models::storage_protocol_version::StorageProtocolVersion, StorageProcessor}; #[derive(Debug)] pub struct ProtocolVersionsWeb3Dal<'a, 'c> { diff --git a/core/lib/dal/src/prover_dal.rs b/core/lib/dal/src/prover_dal.rs index d84d0628372..ea6eba5eda0 100644 --- a/core/lib/dal/src/prover_dal.rs +++ b/core/lib/dal/src/prover_dal.rs @@ -1,5 +1,3 @@ -use sqlx::Error; - use std::{ collections::HashMap, convert::{TryFrom, TryInto}, @@ -7,6 +5,7 @@ use std::{ time::Duration, }; +use sqlx::Error; use zksync_types::{ aggregated_operations::L1BatchProofForL1, proofs::{ diff --git a/core/lib/dal/src/storage_dal.rs b/core/lib/dal/src/storage_dal.rs index 8ec6d916493..4512d028488 100644 --- a/core/lib/dal/src/storage_dal.rs +++ b/core/lib/dal/src/storage_dal.rs @@ -1,7 +1,6 @@ -use itertools::Itertools; - use std::collections::{HashMap, HashSet}; +use itertools::Itertools; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_types::{MiniblockNumber, StorageKey, StorageLog, StorageValue, H256, U256}; use zksync_utils::{bytes_to_be_words, bytes_to_chunks}; @@ -210,9 +209,10 @@ impl StorageDal<'_, '_> { #[cfg(test)] mod tests { + use zksync_types::{AccountTreeId, Address}; + use super::*; use crate::ConnectionPool; - use zksync_types::{AccountTreeId, Address}; #[tokio::test] async fn applying_storage_logs() { diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index c368e5adc8d..dc23d29af5c 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -1,14 +1,13 @@ -use sqlx::types::chrono::Utc; -use sqlx::Row; - use std::{collections::HashMap, time::Instant}; -use crate::{instrument::InstrumentExt, StorageProcessor}; +use sqlx::{types::chrono::Utc, Row}; use zksync_types::{ get_code_key, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, StorageLog, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H256, }; +use crate::{instrument::InstrumentExt, StorageProcessor}; + #[derive(Debug)] pub struct StorageLogsDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, @@ -529,14 +528,15 @@ impl StorageLogsDal<'_, '_> { #[cfg(test)] mod tests { - use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ block::{BlockGasCount, L1BatchHeader}, ProtocolVersion, ProtocolVersionId, }; + use super::*; + use crate::{tests::create_miniblock_header, ConnectionPool}; + async fn insert_miniblock(conn: &mut StorageProcessor<'_>, number: u32, logs: Vec) { let mut header = L1BatchHeader::new( L1BatchNumber(number), diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 8a70ceb50fe..25e0a8f6eef 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -1,9 +1,11 @@ -use crate::StorageProcessor; -use sqlx::types::chrono::Utc; use std::collections::HashSet; + +use sqlx::types::chrono::Utc; use zksync_types::{AccountTreeId, Address, L1BatchNumber, LogQuery, StorageKey, H256}; use zksync_utils::u256_to_h256; +use crate::StorageProcessor; + #[derive(Debug)] pub struct StorageLogsDedupDal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index c383ea7f944..94fb6e9ebf6 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -1,5 +1,4 @@ -use std::fs; -use std::time::Duration; +use std::{fs, time::Duration}; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ @@ -14,14 +13,15 @@ use zksync_types::{ PriorityOpId, ProtocolVersion, ProtocolVersionId, H160, H256, MAX_GAS_PER_PUBDATA_BYTE, U256, }; -use crate::blocks_dal::BlocksDal; -use crate::connection::ConnectionPool; -use crate::protocol_versions_dal::ProtocolVersionsDal; -use crate::prover_dal::{GetProverJobsParams, ProverDal}; -use crate::transactions_dal::L2TxSubmissionResult; -use crate::transactions_dal::TransactionsDal; -use crate::transactions_web3_dal::TransactionsWeb3Dal; -use crate::witness_generator_dal::WitnessGeneratorDal; +use crate::{ + blocks_dal::BlocksDal, + connection::ConnectionPool, + protocol_versions_dal::ProtocolVersionsDal, + prover_dal::{GetProverJobsParams, ProverDal}, + transactions_dal::{L2TxSubmissionResult, TransactionsDal}, + transactions_web3_dal::TransactionsWeb3Dal, + witness_generator_dal::WitnessGeneratorDal, +}; const DEFAULT_GAS_PER_PUBDATA: u32 = 100; diff --git a/core/lib/dal/src/time_utils.rs b/core/lib/dal/src/time_utils.rs index 45ff661a319..0ede5e6fc57 100644 --- a/core/lib/dal/src/time_utils.rs +++ b/core/lib/dal/src/time_utils.rs @@ -1,7 +1,7 @@ -use sqlx::postgres::types::PgInterval; -use sqlx::types::chrono::NaiveTime; use std::time::Duration; +use sqlx::{postgres::types::PgInterval, types::chrono::NaiveTime}; + pub fn duration_to_naive_time(duration: Duration) -> NaiveTime { let total_seconds = duration.as_secs() as u32; NaiveTime::from_hms_opt( diff --git a/core/lib/dal/src/tokens_dal.rs b/core/lib/dal/src/tokens_dal.rs index f7b64aed69e..5c0f306cc05 100644 --- a/core/lib/dal/src/tokens_dal.rs +++ b/core/lib/dal/src/tokens_dal.rs @@ -1,4 +1,3 @@ -use crate::StorageProcessor; use num::{rational::Ratio, BigUint}; use sqlx::types::chrono::Utc; use zksync_types::{ @@ -8,6 +7,8 @@ use zksync_types::{ }; use zksync_utils::ratio_to_big_decimal; +use crate::StorageProcessor; + // Precision of the USD price per token pub(crate) const STORED_USD_PRICE_PRECISION: usize = 6; diff --git a/core/lib/dal/src/tokens_web3_dal.rs b/core/lib/dal/src/tokens_web3_dal.rs index aa3674b6c3d..753f57c85c6 100644 --- a/core/lib/dal/src/tokens_web3_dal.rs +++ b/core/lib/dal/src/tokens_web3_dal.rs @@ -1,11 +1,10 @@ -use crate::models::storage_token::StorageTokenPrice; -use crate::SqlxError; -use crate::StorageProcessor; use zksync_types::{ tokens::{TokenInfo, TokenMetadata, TokenPrice}, Address, }; +use crate::{models::storage_token::StorageTokenPrice, SqlxError, StorageProcessor}; + #[derive(Debug)] pub struct TokensWeb3Dal<'a, 'c> { pub(crate) storage: &'a mut StorageProcessor<'c>, diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index cbca986b16c..78da3e0fc04 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1,10 +1,9 @@ +use std::{collections::HashMap, fmt, time::Duration}; + +use anyhow::Context; use bigdecimal::BigDecimal; use itertools::Itertools; use sqlx::{error, types::chrono::NaiveDateTime}; - -use anyhow::Context; -use std::{collections::HashMap, fmt, time::Duration}; - use zksync_types::{ block::MiniblockExecutionData, fee::TransactionExecutionMetrics, diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index 5e2342d05b7..d87ddc9a517 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -1,20 +1,22 @@ use sqlx::types::chrono::NaiveDateTime; - use zksync_types::{ api, Address, L2ChainId, MiniblockNumber, Transaction, ACCOUNT_CODE_STORAGE_ADDRESS, FAILED_CONTRACT_DEPLOYMENT_BYTECODE_HASH, H160, H256, U256, U64, }; use zksync_utils::{bigdecimal_to_u256, h256_to_account_address}; -use crate::models::{ - storage_block::{bind_block_where_sql_params, web3_block_where_sql}, - storage_event::StorageWeb3Log, - storage_transaction::{ - extract_web3_transaction, web3_transaction_select_sql, StorageTransaction, - StorageTransactionDetails, +use crate::{ + instrument::InstrumentExt, + models::{ + storage_block::{bind_block_where_sql_params, web3_block_where_sql}, + storage_event::StorageWeb3Log, + storage_transaction::{ + extract_web3_transaction, web3_transaction_select_sql, StorageTransaction, + StorageTransactionDetails, + }, }, + SqlxError, StorageProcessor, }; -use crate::{instrument::InstrumentExt, SqlxError, StorageProcessor}; #[derive(Debug)] pub struct TransactionsWeb3Dal<'a, 'c> { diff --git a/core/lib/dal/src/witness_generator_dal.rs b/core/lib/dal/src/witness_generator_dal.rs index a8079a9dcce..b437c2ad34f 100644 --- a/core/lib/dal/src/witness_generator_dal.rs +++ b/core/lib/dal/src/witness_generator_dal.rs @@ -1,17 +1,16 @@ -use itertools::Itertools; -use sqlx::Row; - use std::{collections::HashMap, ops::Range, time::Duration}; -use zksync_types::proofs::{ - AggregationRound, JobCountStatistics, WitnessGeneratorJobMetadata, WitnessJobInfo, +use itertools::Itertools; +use sqlx::Row; +use zksync_types::{ + proofs::{AggregationRound, JobCountStatistics, WitnessGeneratorJobMetadata, WitnessJobInfo}, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::{ZkSyncCircuit, ZkSyncProof}, + bellman::{bn256::Bn256, plonk::better_better_cs::proof::Proof}, + witness::oracle::VmWitnessOracle, + }, + L1BatchNumber, ProtocolVersionId, }; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncProof; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::proof::Proof; -use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; use crate::{ instrument::InstrumentExt, diff --git a/core/lib/env_config/src/alerts.rs b/core/lib/env_config/src/alerts.rs index c72b23bbd9f..63cbde48bdf 100644 --- a/core/lib/env_config/src/alerts.rs +++ b/core/lib/env_config/src/alerts.rs @@ -1,6 +1,7 @@ -use crate::{envy_load, FromEnv}; use zksync_config::configs::AlertsConfig; +use crate::{envy_load, FromEnv}; + impl FromEnv for AlertsConfig { fn from_env() -> anyhow::Result { envy_load("sporadic_crypto_errors_substrs", "ALERTS_") diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 20ecfe41e21..d256e27ceca 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -1,6 +1,4 @@ use anyhow::Context as _; - -use crate::{envy_load, FromEnv}; use zksync_config::configs::{ api::{ ContractVerificationApiConfig, HealthCheckConfig, MerkleTreeApiConfig, Web3JsonRpcConfig, @@ -8,6 +6,8 @@ use zksync_config::configs::{ ApiConfig, PrometheusConfig, }; +use crate::{envy_load, FromEnv}; + impl FromEnv for ApiConfig { fn from_env() -> anyhow::Result { Ok(Self { diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index e64ba3c36b8..7c2aa7e5941 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -1,10 +1,11 @@ -use crate::{envy_load, FromEnv}; use anyhow::Context as _; use zksync_config::configs::chain::{ ChainConfig, CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, StateKeeperConfig, }; +use crate::{envy_load, FromEnv}; + impl FromEnv for ChainConfig { fn from_env() -> anyhow::Result { Ok(Self { diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 8c58483db06..537b68414c6 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -10,9 +10,10 @@ impl FromEnv for ContractsConfig { #[cfg(test)] mod tests { + use zksync_config::configs::contracts::ProverAtGenesis; + use super::*; use crate::test_utils::{addr, hash, EnvMutex}; - use zksync_config::configs::contracts::ProverAtGenesis; static MUTEX: EnvMutex = EnvMutex::new(); diff --git a/core/lib/env_config/src/database.rs b/core/lib/env_config/src/database.rs index 939725d6773..e350c487662 100644 --- a/core/lib/env_config/src/database.rs +++ b/core/lib/env_config/src/database.rs @@ -1,5 +1,6 @@ -use anyhow::Context as _; use std::env; + +use anyhow::Context as _; use zksync_config::{DBConfig, PostgresConfig}; use crate::{envy_load, FromEnv}; diff --git a/core/lib/env_config/src/fri_proof_compressor.rs b/core/lib/env_config/src/fri_proof_compressor.rs index 2594433025e..777bdb03c58 100644 --- a/core/lib/env_config/src/fri_proof_compressor.rs +++ b/core/lib/env_config/src/fri_proof_compressor.rs @@ -10,9 +10,8 @@ impl FromEnv for FriProofCompressorConfig { #[cfg(test)] mod tests { - use crate::test_utils::EnvMutex; - use super::*; + use crate::test_utils::EnvMutex; static MUTEX: EnvMutex = EnvMutex::new(); diff --git a/core/lib/env_config/src/test_utils.rs b/core/lib/env_config/src/test_utils.rs index 013d12493ae..2909071df39 100644 --- a/core/lib/env_config/src/test_utils.rs +++ b/core/lib/env_config/src/test_utils.rs @@ -1,4 +1,3 @@ -// Built-in uses. use std::{ collections::HashMap, env, @@ -6,7 +5,7 @@ use std::{ mem, sync::{Mutex, MutexGuard, PoisonError}, }; -// Workspace uses + use zksync_basic_types::{Address, H256}; /// Mutex that allows to modify certain env variables and roll them back to initial values when diff --git a/core/lib/env_config/src/utils.rs b/core/lib/env_config/src/utils.rs index 655d3b2e6d5..211e73ae2b1 100644 --- a/core/lib/env_config/src/utils.rs +++ b/core/lib/env_config/src/utils.rs @@ -1,6 +1,7 @@ -use crate::{envy_load, FromEnv}; use zksync_config::configs::PrometheusConfig; +use crate::{envy_load, FromEnv}; + impl FromEnv for PrometheusConfig { fn from_env() -> anyhow::Result { envy_load("prometheus", "API_PROMETHEUS_") diff --git a/core/lib/eth_client/src/clients/http/mod.rs b/core/lib/eth_client/src/clients/http/mod.rs index 5d94a383171..e3295ee4b76 100644 --- a/core/lib/eth_client/src/clients/http/mod.rs +++ b/core/lib/eth_client/src/clients/http/mod.rs @@ -1,17 +1,17 @@ +use std::time::Duration; + use vise::{ Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, LabeledFamily, Metrics, }; -use std::time::Duration; - -mod query; -mod signing; - pub use self::{ query::QueryClient, signing::{PKSigningClient, SigningClient}, }; +mod query; +mod signing; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "method", rename_all = "snake_case")] enum Method { diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 0094c76f88a..3e88944ca0e 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -1,12 +1,6 @@ -use async_trait::async_trait; - use std::sync::Arc; -use crate::{ - clients::http::{Method, COUNTERS, LATENCIES}, - types::{Error, ExecutedTxStatus, FailureInfo}, - EthInterface, -}; +use async_trait::async_trait; use zksync_types::web3::{ self, contract::{ @@ -22,6 +16,12 @@ use zksync_types::web3::{ Web3, }; +use crate::{ + clients::http::{Method, COUNTERS, LATENCIES}, + types::{Error, ExecutedTxStatus, FailureInfo}, + EthInterface, +}; + /// An "anonymous" Ethereum client that can invoke read-only methods that aren't /// tied to a particular account. #[derive(Debug, Clone)] diff --git a/core/lib/eth_client/src/clients/http/signing.rs b/core/lib/eth_client/src/clients/http/signing.rs index a0a6647db5f..8b56dc1cfbd 100644 --- a/core/lib/eth_client/src/clients/http/signing.rs +++ b/core/lib/eth_client/src/clients/http/signing.rs @@ -1,24 +1,25 @@ -use async_trait::async_trait; - use std::{fmt, sync::Arc}; +use async_trait::async_trait; use zksync_config::{ContractsConfig, ETHClientConfig, ETHSenderConfig}; use zksync_contracts::zksync_contract; use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner, PrivateKeySigner}; -use zksync_types::web3::{ - self, - contract::{ - tokens::{Detokenize, Tokenize}, - Options, - }, - ethabi, - transports::Http, - types::{ - Address, Block, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, H160, - H256, U256, U64, +use zksync_types::{ + web3::{ + self, + contract::{ + tokens::{Detokenize, Tokenize}, + Options, + }, + ethabi, + transports::Http, + types::{ + Address, Block, BlockId, BlockNumber, Filter, Log, Transaction, TransactionReceipt, + H160, H256, U256, U64, + }, }, + L1ChainId, PackedEthSignature, EIP_1559_TX_TYPE, }; -use zksync_types::{L1ChainId, PackedEthSignature, EIP_1559_TX_TYPE}; use super::{query::QueryClient, Method, LATENCIES}; use crate::{ diff --git a/core/lib/eth_client/src/clients/mock.rs b/core/lib/eth_client/src/clients/mock.rs index 576fbac21a7..a8eceac75af 100644 --- a/core/lib/eth_client/src/clients/mock.rs +++ b/core/lib/eth_client/src/clients/mock.rs @@ -1,9 +1,13 @@ -use std::sync::atomic::{AtomicU64, Ordering}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{ + atomic::{AtomicU64, Ordering}, + RwLock, + }, +}; use async_trait::async_trait; use jsonrpc_core::types::error::Error as RpcError; -use std::collections::{BTreeMap, HashMap}; -use std::sync::RwLock; use zksync_types::{ web3::{ contract::{ @@ -92,8 +96,7 @@ impl MockEthereum { /// A fake `sha256` hasher, which calculates an `std::hash` instead. /// This is done for simplicity and it's also much faster. pub fn fake_sha256(data: &[u8]) -> H256 { - use std::collections::hash_map::DefaultHasher; - use std::hash::Hasher; + use std::{collections::hash_map::DefaultHasher, hash::Hasher}; let mut hasher = DefaultHasher::new(); hasher.write(data); diff --git a/core/lib/eth_client/src/lib.rs b/core/lib/eth_client/src/lib.rs index f61814893bb..5bb40f60a08 100644 --- a/core/lib/eth_client/src/lib.rs +++ b/core/lib/eth_client/src/lib.rs @@ -1,9 +1,5 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -pub mod clients; -pub mod types; - -use crate::types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}; use async_trait::async_trait; use zksync_types::{ web3::{ @@ -20,6 +16,11 @@ use zksync_types::{ L1ChainId, }; +use crate::types::{Error, ExecutedTxStatus, FailureInfo, SignedCallResult}; + +pub mod clients; +pub mod types; + /// Common Web3 interface, as seen by the core applications. /// Encapsulates the raw Web3 interaction, providing a high-level interface. /// diff --git a/core/lib/eth_signer/src/json_rpc_signer.rs b/core/lib/eth_signer/src/json_rpc_signer.rs index b6619f5e831..66a7b33e989 100644 --- a/core/lib/eth_signer/src/json_rpc_signer.rs +++ b/core/lib/eth_signer/src/json_rpc_signer.rs @@ -1,13 +1,15 @@ -use crate::error::{RpcSignerError, SignerError}; -use crate::json_rpc_signer::messages::JsonRpcRequest; -use crate::raw_ethereum_tx::TransactionParameters; -use crate::EthereumSigner; - use jsonrpc_core::types::response::Output; -use zksync_types::tx::primitives::PackedEthSignature; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain, H256}; - use serde_json::Value; +use zksync_types::{ + tx::primitives::PackedEthSignature, Address, EIP712TypedStructure, Eip712Domain, H256, +}; + +use crate::{ + error::{RpcSignerError, SignerError}, + json_rpc_signer::messages::JsonRpcRequest, + raw_ethereum_tx::TransactionParameters, + EthereumSigner, +}; pub fn is_signature_from_address( signature: &PackedEthSignature, @@ -325,13 +327,14 @@ impl JsonRpcSigner { } mod messages { - use crate::raw_ethereum_tx::TransactionParameters; use hex::encode; use serde::{Deserialize, Serialize}; use zksync_types::{ eip712_signature::utils::get_eip712_json, Address, EIP712TypedStructure, Eip712Domain, }; + use crate::raw_ethereum_tx::TransactionParameters; + #[derive(Debug, Serialize, Deserialize)] pub struct JsonRpcRequest { pub id: String, @@ -429,7 +432,6 @@ mod messages { #[cfg(test)] mod tests { - use crate::raw_ethereum_tx::TransactionParameters; use actix_web::{ post, web::{self, Data}, @@ -439,11 +441,10 @@ mod tests { use jsonrpc_core::{Failure, Id, Output, Success, Version}; use parity_crypto::publickey::{Generator, KeyPair, Random}; use serde_json::json; - use zksync_types::{tx::primitives::PackedEthSignature, Address}; use super::{is_signature_from_address, messages::JsonRpcRequest}; - use crate::{EthereumSigner, JsonRpcSigner}; + use crate::{raw_ethereum_tx::TransactionParameters, EthereumSigner, JsonRpcSigner}; #[post("/")] async fn index(req: web::Json, state: web::Data) -> impl Responder { diff --git a/core/lib/eth_signer/src/lib.rs b/core/lib/eth_signer/src/lib.rs index ce4540c151b..164a124dbc9 100644 --- a/core/lib/eth_signer/src/lib.rs +++ b/core/lib/eth_signer/src/lib.rs @@ -1,11 +1,12 @@ use async_trait::async_trait; use error::SignerError; -use zksync_types::tx::primitives::PackedEthSignature; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain}; - -pub use crate::raw_ethereum_tx::TransactionParameters; pub use json_rpc_signer::JsonRpcSigner; pub use pk_signer::PrivateKeySigner; +use zksync_types::{ + tx::primitives::PackedEthSignature, Address, EIP712TypedStructure, Eip712Domain, +}; + +pub use crate::raw_ethereum_tx::TransactionParameters; pub mod error; pub mod json_rpc_signer; diff --git a/core/lib/eth_signer/src/pk_signer.rs b/core/lib/eth_signer/src/pk_signer.rs index 680d87d62d0..4f9795dca86 100644 --- a/core/lib/eth_signer/src/pk_signer.rs +++ b/core/lib/eth_signer/src/pk_signer.rs @@ -1,11 +1,11 @@ use secp256k1::SecretKey; - -use zksync_types::tx::primitives::PackedEthSignature; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain, H256}; +use zksync_types::{ + tx::primitives::PackedEthSignature, Address, EIP712TypedStructure, Eip712Domain, H256, +}; use crate::{ raw_ethereum_tx::{Transaction, TransactionParameters}, - {EthereumSigner, SignerError}, + EthereumSigner, SignerError, }; #[derive(Clone)] @@ -86,11 +86,11 @@ impl EthereumSigner for PrivateKeySigner { #[cfg(test)] mod test { - use super::PrivateKeySigner; - use crate::raw_ethereum_tx::TransactionParameters; - use crate::EthereumSigner; use zksync_types::{H160, H256, U256, U64}; + use super::PrivateKeySigner; + use crate::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; + #[tokio::test] async fn test_generating_signed_raw_transaction() { let private_key = H256::from([5; 32]); diff --git a/core/lib/eth_signer/src/raw_ethereum_tx.rs b/core/lib/eth_signer/src/raw_ethereum_tx.rs index fcee1349445..124c09965de 100644 --- a/core/lib/eth_signer/src/raw_ethereum_tx.rs +++ b/core/lib/eth_signer/src/raw_ethereum_tx.rs @@ -8,12 +8,16 @@ //! We can refactor this code and adapt it for our needs better, but I prefer to reuse as much code as we can. //! In the case where it will be possible to use only the web3 library without copy-paste, the changes will be small and simple //! Link to @Deniallugo's PR to web3: https://github.com/tomusdrw/rust-web3/pull/630 + use rlp::RlpStream; -use zksync_types::web3::{ - signing::{self, Signature}, - types::{AccessList, SignedTransaction}, +use zksync_types::{ + ethabi::Address, + web3::{ + signing::{self, Signature}, + types::{AccessList, SignedTransaction}, + }, + U256, U64, }; -use zksync_types::{ethabi::Address, U256, U64}; const LEGACY_TX_ID: u64 = 0; const ACCESSLISTS_TX_ID: u64 = 1; diff --git a/core/lib/health_check/src/lib.rs b/core/lib/health_check/src/lib.rs index fac8ec46dbb..12bb292bc85 100644 --- a/core/lib/health_check/src/lib.rs +++ b/core/lib/health_check/src/lib.rs @@ -1,11 +1,10 @@ -use futures::{future, FutureExt}; -use serde::Serialize; -use tokio::sync::watch; - use std::{collections::HashMap, thread}; -/// Public re-export for other crates to be able to implement the interface. +// Public re-export for other crates to be able to implement the interface. pub use async_trait::async_trait; +use futures::{future, FutureExt}; +use serde::Serialize; +use tokio::sync::watch; /// Health status returned as a part of `Health`. #[derive(Debug, Clone, Copy, PartialEq, Serialize)] diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index a8b02bee0cb..51a8d708a74 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -1,10 +1,11 @@ use std::collections::{hash_map, BTreeSet, HashMap, HashSet}; -use crate::types::{AccountTransactions, L2TxFilter, MempoolScore}; use zksync_types::{ l1::L1Tx, l2::L2Tx, Address, ExecuteTransactionCommon, Nonce, PriorityOpId, Transaction, }; +use crate::types::{AccountTransactions, L2TxFilter, MempoolScore}; + #[derive(Debug)] pub struct MempoolInfo { pub stashed_accounts: Vec
, diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index cb149752e2d..cd595509ec5 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -1,12 +1,18 @@ +use std::{ + collections::{HashMap, HashSet}, + iter::FromIterator, +}; + +use zksync_types::{ + fee::Fee, + helpers::unix_timestamp_ms, + l1::{OpProcessingType, PriorityQueueType}, + l2::L2Tx, + Address, Execute, ExecuteTransactionCommon, L1TxCommonData, Nonce, PriorityOpId, Transaction, + H256, U256, +}; + use crate::{mempool_store::MempoolStore, types::L2TxFilter}; -use std::collections::{HashMap, HashSet}; -use std::iter::FromIterator; -use zksync_types::fee::Fee; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::l1::{OpProcessingType, PriorityQueueType}; -use zksync_types::l2::L2Tx; -use zksync_types::{Address, ExecuteTransactionCommon, L1TxCommonData, PriorityOpId, H256, U256}; -use zksync_types::{Execute, Nonce, Transaction}; #[test] fn basic_flow() { diff --git a/core/lib/mempool/src/types.rs b/core/lib/mempool/src/types.rs index 130f8ad0016..9bc58a4e2ce 100644 --- a/core/lib/mempool/src/types.rs +++ b/core/lib/mempool/src/types.rs @@ -1,8 +1,6 @@ -use std::cmp::Ordering; -use std::collections::HashMap; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::{Address, Nonce, Transaction, U256}; +use std::{cmp::Ordering, collections::HashMap}; + +use zksync_types::{fee::Fee, l2::L2Tx, Address, Nonce, Transaction, U256}; /// Pending mempool transactions of account #[derive(Debug)] diff --git a/core/lib/merkle_tree/examples/loadtest/main.rs b/core/lib/merkle_tree/examples/loadtest/main.rs index 527daa87b37..185ae0543f9 100644 --- a/core/lib/merkle_tree/examples/loadtest/main.rs +++ b/core/lib/merkle_tree/examples/loadtest/main.rs @@ -3,16 +3,15 @@ //! Should be compiled with the release profile, otherwise hashing and other ops would be //! prohibitively slow. -use clap::Parser; -use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; -use tempfile::TempDir; -use tracing_subscriber::EnvFilter; - use std::{ thread, time::{Duration, Instant}, }; +use clap::Parser; +use rand::{rngs::StdRng, seq::IteratorRandom, SeedableRng}; +use tempfile::TempDir; +use tracing_subscriber::EnvFilter; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, MerkleTreePruner, PatchSet, RocksDBWrapper, TreeEntry, @@ -21,10 +20,10 @@ use zksync_merkle_tree::{ use zksync_storage::{RocksDB, RocksDBOptions}; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -mod batch; - use crate::batch::WithBatching; +mod batch; + /// CLI for load-testing for the Merkle tree implementation. #[derive(Debug, Parser)] #[command(author, version, about, long_about = None)] diff --git a/core/lib/merkle_tree/examples/recovery.rs b/core/lib/merkle_tree/examples/recovery.rs index 1a2aae236ea..603eb2ec4d7 100644 --- a/core/lib/merkle_tree/examples/recovery.rs +++ b/core/lib/merkle_tree/examples/recovery.rs @@ -1,12 +1,11 @@ //! Tree recovery load test. +use std::time::Instant; + use clap::Parser; use rand::{rngs::StdRng, Rng, SeedableRng}; use tempfile::TempDir; use tracing_subscriber::EnvFilter; - -use std::time::Instant; - use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ recovery::MerkleTreeRecovery, HashTree, Key, PatchSet, PruneDatabase, RocksDBWrapper, diff --git a/core/lib/merkle_tree/src/consistency.rs b/core/lib/merkle_tree/src/consistency.rs index 2cc8996e64e..659befbe048 100644 --- a/core/lib/merkle_tree/src/consistency.rs +++ b/core/lib/merkle_tree/src/consistency.rs @@ -1,9 +1,9 @@ //! Consistency verification for the Merkle tree. -use rayon::prelude::*; - use std::sync::atomic::{AtomicU64, Ordering}; +use rayon::prelude::*; + use crate::{ errors::DeserializeError, hasher::{HashTree, HasherWithStats}, @@ -267,17 +267,17 @@ impl AtomicBitSet { #[cfg(test)] mod tests { + use std::num::NonZeroU64; + use assert_matches::assert_matches; use rayon::ThreadPoolBuilder; - - use std::num::NonZeroU64; + use zksync_types::{H256, U256}; use super::*; use crate::{ types::{InternalNode, TreeEntry}, PatchSet, }; - use zksync_types::{H256, U256}; const FIRST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); const SECOND_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0100_0000]); diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index 0cd9a56a486..f21a5e09133 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -1,6 +1,13 @@ //! Tying the Merkle tree implementation to the problem domain. use rayon::{ThreadPool, ThreadPoolBuilder}; +use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_storage::RocksDB; +use zksync_types::{ + proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, + writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, + L1BatchNumber, StorageKey, U256, +}; use zksync_utils::h256_to_u256; use crate::{ @@ -11,13 +18,6 @@ use crate::{ }, BlockOutput, HashTree, MerkleTree, NoVersionError, }; -use zksync_crypto::hasher::blake2::Blake2Hasher; -use zksync_storage::RocksDB; -use zksync_types::{ - proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, - writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, - L1BatchNumber, StorageKey, U256, -}; /// Metadata for the current tree state. #[derive(Debug, Clone)] diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index a30b0b98f5b..4afe8a2367c 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -166,9 +166,10 @@ impl error::Error for NoVersionError {} #[cfg(test)] mod tests { + use zksync_types::U256; + use super::*; use crate::{types::Nibbles, Key}; - use zksync_types::U256; const TEST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); diff --git a/core/lib/merkle_tree/src/hasher/mod.rs b/core/lib/merkle_tree/src/hasher/mod.rs index 9425a5836f0..fa700a68244 100644 --- a/core/lib/merkle_tree/src/hasher/mod.rs +++ b/core/lib/merkle_tree/src/hasher/mod.rs @@ -1,11 +1,9 @@ //! Hashing operations on the Merkle tree. -use once_cell::sync::Lazy; - use std::{fmt, iter}; -mod nodes; -mod proofs; +use once_cell::sync::Lazy; +use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; pub(crate) use self::nodes::{InternalNodeCache, MerklePath}; pub use self::proofs::TreeRangeDigest; @@ -13,7 +11,9 @@ use crate::{ metrics::HashingStats, types::{TreeEntry, ValueHash, TREE_DEPTH}, }; -use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; + +mod nodes; +mod proofs; /// Tree hashing functionality. pub trait HashTree: Send + Sync { @@ -222,9 +222,10 @@ impl HasherWithStats<'_> { #[cfg(test)] mod tests { + use zksync_types::{AccountTreeId, Address, StorageKey, H256}; + use super::*; use crate::types::LeafNode; - use zksync_types::{AccountTreeId, Address, StorageKey, H256}; #[test] fn empty_tree_hash_is_as_expected() { diff --git a/core/lib/merkle_tree/src/hasher/nodes.rs b/core/lib/merkle_tree/src/hasher/nodes.rs index d36c58c0ae1..6e1c007bc42 100644 --- a/core/lib/merkle_tree/src/hasher/nodes.rs +++ b/core/lib/merkle_tree/src/hasher/nodes.rs @@ -258,10 +258,11 @@ impl Node { #[cfg(test)] mod tests { - use super::*; use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; use zksync_types::H256; + use super::*; + fn test_internal_node_hashing(child_indexes: &[u8]) { println!("Testing indices: {child_indexes:?}"); diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 85ace50aada..687e957f8ef 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -46,6 +46,23 @@ clippy::doc_markdown // frequent false positive: RocksDB )] +use zksync_crypto::hasher::blake2::Blake2Hasher; + +pub use crate::{ + errors::NoVersionError, + hasher::{HashTree, TreeRangeDigest}, + pruning::{MerkleTreePruner, MerkleTreePrunerHandle}, + storage::{ + Database, MerkleTreeColumnFamily, PatchSet, Patched, PruneDatabase, PrunePatchSet, + RocksDBWrapper, + }, + types::{ + BlockOutput, BlockOutputWithProofs, Key, TreeEntry, TreeEntryWithProof, TreeInstruction, + TreeLogEntry, TreeLogEntryWithProof, ValueHash, + }, +}; +use crate::{hasher::HasherWithStats, storage::Storage, types::Root}; + mod consistency; pub mod domain; mod errors; @@ -69,23 +86,6 @@ pub mod unstable { }; } -pub use crate::{ - errors::NoVersionError, - hasher::{HashTree, TreeRangeDigest}, - pruning::{MerkleTreePruner, MerkleTreePrunerHandle}, - storage::{ - Database, MerkleTreeColumnFamily, PatchSet, Patched, PruneDatabase, PrunePatchSet, - RocksDBWrapper, - }, - types::{ - BlockOutput, BlockOutputWithProofs, Key, TreeEntry, TreeEntryWithProof, TreeInstruction, - TreeLogEntry, TreeLogEntryWithProof, ValueHash, - }, -}; - -use crate::{hasher::HasherWithStats, storage::Storage, types::Root}; -use zksync_crypto::hasher::blake2::Blake2Hasher; - /// Binary Merkle tree implemented using AR16MT from Diem [Jellyfish Merkle tree] white paper. /// /// A tree is persistent and is backed by a key-value store (the `DB` type param). It is versioned, diff --git a/core/lib/merkle_tree/src/metrics.rs b/core/lib/merkle_tree/src/metrics.rs index 29bd58e599e..ef1e94f9b05 100644 --- a/core/lib/merkle_tree/src/metrics.rs +++ b/core/lib/merkle_tree/src/metrics.rs @@ -6,11 +6,12 @@ use std::{ time::Duration, }; -use crate::types::Nibbles; use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Global, Histogram, Metrics, Unit, }; +use crate::types::Nibbles; + #[derive(Debug, Metrics)] #[metrics(prefix = "merkle_tree")] pub(crate) struct GeneralMetrics { diff --git a/core/lib/merkle_tree/src/recovery.rs b/core/lib/merkle_tree/src/recovery.rs index d1f2618a5cd..fd1790d2b7c 100644 --- a/core/lib/merkle_tree/src/recovery.rs +++ b/core/lib/merkle_tree/src/recovery.rs @@ -37,13 +37,14 @@ use std::time::Instant; +use zksync_crypto::hasher::blake2::Blake2Hasher; + use crate::{ hasher::{HashTree, HasherWithStats}, storage::{PatchSet, PruneDatabase, PrunePatchSet, Storage}, types::{Key, Manifest, Root, TreeEntry, TreeTags, ValueHash}, MerkleTree, }; -use zksync_crypto::hasher::blake2::Blake2Hasher; /// Handle to a Merkle tree during its recovery. #[derive(Debug)] diff --git a/core/lib/merkle_tree/src/storage/mod.rs b/core/lib/merkle_tree/src/storage/mod.rs index ae273d22f32..d2b89da48cd 100644 --- a/core/lib/merkle_tree/src/storage/mod.rs +++ b/core/lib/merkle_tree/src/storage/mod.rs @@ -1,20 +1,11 @@ //! Storage-related logic. -mod database; -mod patch; -mod proofs; -mod rocksdb; -mod serialization; -#[cfg(test)] -mod tests; - pub(crate) use self::patch::{LoadAncestorsResult, WorkingPatchSet}; pub use self::{ database::{Database, NodeKeys, Patched, PruneDatabase, PrunePatchSet}, patch::PatchSet, rocksdb::{MerkleTreeColumnFamily, RocksDBWrapper}, }; - use crate::{ hasher::HashTree, metrics::{TreeUpdaterStats, BLOCK_TIMINGS, GENERAL_METRICS}, @@ -24,6 +15,14 @@ use crate::{ }, }; +mod database; +mod patch; +mod proofs; +mod rocksdb; +mod serialization; +#[cfg(test)] +mod tests; + /// Tree operation: either inserting a new version or updating an existing one (the latter is only /// used during tree recovery). #[derive(Debug, Clone, Copy)] diff --git a/core/lib/merkle_tree/src/storage/patch.rs b/core/lib/merkle_tree/src/storage/patch.rs index ff41fb2f6bf..f0b06c83bf2 100644 --- a/core/lib/merkle_tree/src/storage/patch.rs +++ b/core/lib/merkle_tree/src/storage/patch.rs @@ -1,13 +1,13 @@ //! Types related to DB patches: `PatchSet` and `WorkingPatchSet`. -use rayon::prelude::*; - use std::{ collections::{hash_map::Entry, HashMap}, iter, time::Instant, }; +use rayon::prelude::*; + use crate::{ hasher::{HashTree, HasherWithStats, MerklePath}, metrics::HashingStats, diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 6c6a3a18105..7dd4d6083d7 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -1,9 +1,10 @@ //! RocksDB implementation of [`Database`]. -use rayon::prelude::*; - use std::path::Path; +use rayon::prelude::*; +use zksync_storage::{db::NamedColumnFamily, rocksdb::DBPinnableSlice, RocksDB}; + use crate::{ errors::{DeserializeError, ErrorContext}, metrics::ApplyPatchStats, @@ -13,7 +14,6 @@ use crate::{ }, types::{InternalNode, LeafNode, Manifest, Nibbles, Node, NodeKey, Root, StaleNodeKey}, }; -use zksync_storage::{db::NamedColumnFamily, rocksdb::DBPinnableSlice, RocksDB}; /// RocksDB column families used by the tree. #[derive(Debug, Clone, Copy)] @@ -285,10 +285,10 @@ impl PruneDatabase for RocksDBWrapper { #[cfg(test)] mod tests { - use tempfile::TempDir; - use std::collections::{HashMap, HashSet}; + use tempfile::TempDir; + use super::*; use crate::storage::tests::{create_patch, generate_nodes}; diff --git a/core/lib/merkle_tree/src/storage/serialization.rs b/core/lib/merkle_tree/src/storage/serialization.rs index 6a9216fa104..09a06a3630c 100644 --- a/core/lib/merkle_tree/src/storage/serialization.rs +++ b/core/lib/merkle_tree/src/storage/serialization.rs @@ -300,9 +300,10 @@ impl Manifest { #[cfg(test)] mod tests { + use zksync_types::H256; + use super::*; use crate::types::TreeEntry; - use zksync_types::H256; #[test] fn serializing_manifest() { diff --git a/core/lib/merkle_tree/src/storage/tests.rs b/core/lib/merkle_tree/src/storage/tests.rs index e70cb057280..a0c1ae4c949 100644 --- a/core/lib/merkle_tree/src/storage/tests.rs +++ b/core/lib/merkle_tree/src/storage/tests.rs @@ -1,3 +1,5 @@ +use std::collections::{HashMap, HashSet}; + use assert_matches::assert_matches; use rand::{ rngs::StdRng, @@ -5,16 +7,14 @@ use rand::{ Rng, SeedableRng, }; use test_casing::test_casing; - -use std::collections::{HashMap, HashSet}; +use zksync_crypto::hasher::blake2::Blake2Hasher; +use zksync_types::{H256, U256}; use super::*; use crate::{ hasher::{HasherWithStats, MerklePath}, types::{NodeKey, TreeInstruction, KEY_SIZE}, }; -use zksync_crypto::hasher::blake2::Blake2Hasher; -use zksync_types::{H256, U256}; pub(super) const FIRST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); const SECOND_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0100_0000]); diff --git a/core/lib/merkle_tree/src/types/internal.rs b/core/lib/merkle_tree/src/types/internal.rs index cb35b0281c2..e983928c554 100644 --- a/core/lib/merkle_tree/src/types/internal.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -554,9 +554,10 @@ impl StaleNodeKey { #[cfg(test)] mod tests { - use super::*; use zksync_types::U256; + use super::*; + // `U256` uses little-endian `u64` ordering; i.e., this is // 0x_dead_beef_0000_0000_.._0000. const TEST_KEY: Key = U256([0, 0, 0, 0x_dead_beef_0000_0000]); diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index 15ab72b6911..43a3922da86 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -1,13 +1,13 @@ //! Basic storage types. -mod internal; +use zksync_types::{H256, U256}; pub(crate) use self::internal::{ ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, }; pub use self::internal::{InternalNode, LeafNode, Manifest, Node, NodeKey, Root}; -use zksync_types::{H256, U256}; +mod internal; /// Key stored in the tree. pub type Key = U256; diff --git a/core/lib/merkle_tree/tests/integration/common.rs b/core/lib/merkle_tree/tests/integration/common.rs index 096a54ce711..28c3827827a 100644 --- a/core/lib/merkle_tree/tests/integration/common.rs +++ b/core/lib/merkle_tree/tests/integration/common.rs @@ -1,9 +1,8 @@ //! Shared functionality. -use once_cell::sync::Lazy; - use std::collections::HashMap; +use once_cell::sync::Lazy; use zksync_crypto::hasher::{blake2::Blake2Hasher, Hasher}; use zksync_merkle_tree::{HashTree, TreeEntry, TreeInstruction}; use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; diff --git a/core/lib/merkle_tree/tests/integration/consistency.rs b/core/lib/merkle_tree/tests/integration/consistency.rs index da3312d2002..b6b424e431a 100644 --- a/core/lib/merkle_tree/tests/integration/consistency.rs +++ b/core/lib/merkle_tree/tests/integration/consistency.rs @@ -3,9 +3,9 @@ use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use tempfile::TempDir; +use zksync_merkle_tree::{MerkleTree, MerkleTreeColumnFamily, RocksDBWrapper}; use crate::common::generate_key_value_pairs; -use zksync_merkle_tree::{MerkleTree, MerkleTreeColumnFamily, RocksDBWrapper}; // Something (maybe RocksDB) makes the test below work very slowly in the debug mode; // thus, the number of test cases is conditionally reduced. diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index f3febda5f06..45165067221 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -1,11 +1,10 @@ //! Domain-specific tests. Taken almost verbatim from the previous tree implementation. +use std::slice; + use serde::{Deserialize, Serialize}; use serde_with::{hex::Hex, serde_as}; use tempfile::TempDir; - -use std::slice; - use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{domain::ZkSyncTree, HashTree, TreeEntry, TreeInstruction}; use zksync_storage::RocksDB; diff --git a/core/lib/merkle_tree/tests/integration/merkle_tree.rs b/core/lib/merkle_tree/tests/integration/merkle_tree.rs index e4f052bb03c..117ea0db4d9 100644 --- a/core/lib/merkle_tree/tests/integration/merkle_tree.rs +++ b/core/lib/merkle_tree/tests/integration/merkle_tree.rs @@ -1,10 +1,9 @@ //! Tests not tied to the zksync domain. -use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; -use test_casing::test_casing; - use std::{cmp, mem}; +use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; +use test_casing::test_casing; use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ Database, HashTree, MerkleTree, PatchSet, Patched, TreeEntry, TreeInstruction, TreeLogEntry, @@ -532,16 +531,16 @@ fn range_proofs_with_random_ranges() { /// RocksDB-specific tests. mod rocksdb { + use std::collections::BTreeMap; + use serde::{Deserialize, Serialize}; use serde_with::{hex::Hex, serde_as}; use tempfile::TempDir; - - use std::collections::BTreeMap; - - use super::*; use zksync_merkle_tree::{MerkleTreeColumnFamily, MerkleTreePruner, RocksDBWrapper}; use zksync_storage::RocksDB; + use super::*; + #[derive(Debug)] struct Harness { db: RocksDBWrapper, diff --git a/core/lib/merkle_tree/tests/integration/recovery.rs b/core/lib/merkle_tree/tests/integration/recovery.rs index 6739e4ffe02..399b214e3fe 100644 --- a/core/lib/merkle_tree/tests/integration/recovery.rs +++ b/core/lib/merkle_tree/tests/integration/recovery.rs @@ -2,7 +2,6 @@ use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng}; use test_casing::test_casing; - use zksync_crypto::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ recovery::MerkleTreeRecovery, Database, MerkleTree, PatchSet, PruneDatabase, ValueHash, @@ -125,9 +124,9 @@ fn recovery_in_chunks(kind: RecoveryKind, chunk_size: usize) { mod rocksdb { use tempfile::TempDir; + use zksync_merkle_tree::RocksDBWrapper; use super::*; - use zksync_merkle_tree::RocksDBWrapper; #[test_casing(8, test_casing::Product((RecoveryKind::ALL, [6, 10, 17, 42])))] fn recovery_in_chunks(kind: RecoveryKind, chunk_size: usize) { diff --git a/core/lib/mini_merkle_tree/benches/tree.rs b/core/lib/mini_merkle_tree/benches/tree.rs index a964456bfb4..8ea4128ac34 100644 --- a/core/lib/mini_merkle_tree/benches/tree.rs +++ b/core/lib/mini_merkle_tree/benches/tree.rs @@ -3,7 +3,6 @@ use criterion::{ criterion_group, criterion_main, BatchSize, Bencher, BenchmarkId, Criterion, Throughput, }; - use zksync_mini_merkle_tree::MiniMerkleTree; const TREE_SIZES: &[usize] = &[32, 64, 128, 256, 512, 1_024]; diff --git a/core/lib/mini_merkle_tree/src/lib.rs b/core/lib/mini_merkle_tree/src/lib.rs index a6cbf37213c..168e5d8dd09 100644 --- a/core/lib/mini_merkle_tree/src/lib.rs +++ b/core/lib/mini_merkle_tree/src/lib.rs @@ -5,10 +5,10 @@ #![warn(clippy::all, clippy::pedantic)] #![allow(clippy::must_use_candidate, clippy::similar_names)] -use once_cell::sync::Lazy; - use std::{iter, str::FromStr}; +use once_cell::sync::Lazy; + #[cfg(test)] mod tests; diff --git a/core/lib/multivm/src/glue/tracers/mod.rs b/core/lib/multivm/src/glue/tracers/mod.rs index a504d5d2c8f..c58e717a646 100644 --- a/core/lib/multivm/src/glue/tracers/mod.rs +++ b/core/lib/multivm/src/glue/tracers/mod.rs @@ -29,10 +29,11 @@ //! - Add this trait as a trait bound to the `MultiVMTracer`. //! - Add this trait as a trait bound for `T` in `MultiVMTracer` implementation. //! - Implement the trait for `T` with a bound to `VmTracer` for a specific version. -//! -use crate::HistoryMode; + use zksync_state::WriteStorage; +use crate::HistoryMode; + pub type MultiVmTracerPointer = Box>; pub trait MultiVMTracer: diff --git a/core/lib/multivm/src/glue/types/vm/block_context_mode.rs b/core/lib/multivm/src/glue/types/vm/block_context_mode.rs index eba3c503e06..0cbbcbf33e3 100644 --- a/core/lib/multivm/src/glue/types/vm/block_context_mode.rs +++ b/core/lib/multivm/src/glue/types/vm/block_context_mode.rs @@ -1,6 +1,7 @@ -use crate::glue::GlueFrom; use zksync_utils::h256_to_u256; +use crate::glue::GlueFrom; + impl GlueFrom for crate::vm_m5::vm_with_bootloader::BlockContextMode { fn glue_from(value: crate::interface::L1BatchEnv) -> Self { let derived = crate::vm_m5::vm_with_bootloader::DerivedBlockContext { diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index 827ac7fe82a..e63ab376bad 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -1,9 +1,11 @@ use zksync_types::l2_to_l1_log::UserL2ToL1Log; -use crate::glue::{GlueFrom, GlueInto}; -use crate::interface::{ - types::outputs::VmExecutionLogs, CurrentExecutionState, ExecutionResult, Refunds, - VmExecutionResultAndLogs, VmExecutionStatistics, +use crate::{ + glue::{GlueFrom, GlueInto}, + interface::{ + types::outputs::VmExecutionLogs, CurrentExecutionState, ExecutionResult, Refunds, + VmExecutionResultAndLogs, VmExecutionStatistics, + }, }; // Note: In version after vm VmVirtualBlocks the bootloader memory knowledge is encapsulated into the VM. diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 10e422edbca..0c888cdda23 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -1,7 +1,10 @@ -use crate::glue::{GlueFrom, GlueInto}; -use crate::interface::{ExecutionResult, Refunds, TxRevertReason, VmExecutionResultAndLogs}; use zksync_types::tx::tx_execution_info::TxExecutionStatus; +use crate::{ + glue::{GlueFrom, GlueInto}, + interface::{ExecutionResult, Refunds, TxRevertReason, VmExecutionResultAndLogs}, +}; + impl GlueFrom for VmExecutionResultAndLogs { fn glue_from(value: crate::vm_m5::vm_instance::VmTxExecutionResult) -> Self { let mut result: VmExecutionResultAndLogs = value.result.glue_into(); diff --git a/core/lib/multivm/src/interface/traits/tracers/dyn_tracers/vm_1_3_3.rs b/core/lib/multivm/src/interface/traits/tracers/dyn_tracers/vm_1_3_3.rs index 2138dd086c0..c088889aa03 100644 --- a/core/lib/multivm/src/interface/traits/tracers/dyn_tracers/vm_1_3_3.rs +++ b/core/lib/multivm/src/interface/traits/tracers/dyn_tracers/vm_1_3_3.rs @@ -1,6 +1,6 @@ -use zk_evm_1_3_3::abstractions::Memory; -use zk_evm_1_3_3::tracing::{ - AfterDecodingData, AfterExecutionData, BeforeExecutionData, VmLocalStateData, +use zk_evm_1_3_3::{ + abstractions::Memory, + tracing::{AfterDecodingData, AfterExecutionData, BeforeExecutionData, VmLocalStateData}, }; use zksync_state::StoragePtr; diff --git a/core/lib/multivm/src/interface/traits/tracers/dyn_tracers/vm_1_4_0.rs b/core/lib/multivm/src/interface/traits/tracers/dyn_tracers/vm_1_4_0.rs index 61d7831393d..3ce69d02942 100644 --- a/core/lib/multivm/src/interface/traits/tracers/dyn_tracers/vm_1_4_0.rs +++ b/core/lib/multivm/src/interface/traits/tracers/dyn_tracers/vm_1_4_0.rs @@ -1,6 +1,6 @@ -use zk_evm_1_4_0::abstractions::Memory; -use zk_evm_1_4_0::tracing::{ - AfterDecodingData, AfterExecutionData, BeforeExecutionData, VmLocalStateData, +use zk_evm_1_4_0::{ + abstractions::Memory, + tracing::{AfterDecodingData, AfterExecutionData, BeforeExecutionData, VmLocalStateData}, }; use zksync_state::StoragePtr; diff --git a/core/lib/multivm/src/interface/traits/vm.rs b/core/lib/multivm/src/interface/traits/vm.rs index b4a9320bbc6..0dbacc9d1d9 100644 --- a/core/lib/multivm/src/interface/traits/vm.rs +++ b/core/lib/multivm/src/interface/traits/vm.rs @@ -47,20 +47,24 @@ //! let result = vm.execute(multivm::interface::VmExecutionMode::Batch); //! ``` -use crate::interface::types::errors::BytecodeCompressionError; -use crate::interface::types::inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode}; -use crate::interface::types::outputs::{ - BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs, -}; - -use crate::interface::{FinishedL1Batch, VmMemoryMetrics}; -use crate::tracers::TracerDispatcher; -use crate::vm_latest::HistoryEnabled; -use crate::HistoryMode; use zksync_state::StoragePtr; use zksync_types::Transaction; use zksync_utils::bytecode::CompressedBytecodeInfo; +use crate::{ + interface::{ + types::{ + errors::BytecodeCompressionError, + inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode}, + outputs::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}, + }, + FinishedL1Batch, VmMemoryMetrics, + }, + tracers::TracerDispatcher, + vm_latest::HistoryEnabled, + HistoryMode, +}; + pub trait VmInterface { type TracerDispatcher: Default + From>; diff --git a/core/lib/multivm/src/interface/types/errors/halt.rs b/core/lib/multivm/src/interface/types/errors/halt.rs index 23bab7ee55e..3323a128c68 100644 --- a/core/lib/multivm/src/interface/types/errors/halt.rs +++ b/core/lib/multivm/src/interface/types/errors/halt.rs @@ -1,6 +1,7 @@ -use super::VmRevertReason; use std::fmt::{Display, Formatter}; +use super::VmRevertReason; + /// Structure for non-contract errors from the Virtual Machine (EVM). /// Differentiates VM-specific issues from contract-related errors. diff --git a/core/lib/multivm/src/interface/types/errors/tx_revert_reason.rs b/core/lib/multivm/src/interface/types/errors/tx_revert_reason.rs index f92a913fb8b..9578a06ea0a 100644 --- a/core/lib/multivm/src/interface/types/errors/tx_revert_reason.rs +++ b/core/lib/multivm/src/interface/types/errors/tx_revert_reason.rs @@ -1,8 +1,6 @@ -use super::halt::Halt; - use std::fmt::Display; -use super::{BootloaderErrorCode, VmRevertReason}; +use super::{halt::Halt, BootloaderErrorCode, VmRevertReason}; #[derive(Debug, Clone, PartialEq)] pub enum TxRevertReason { diff --git a/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs b/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs index ff239ec4266..b5cb0cbe5e8 100644 --- a/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs +++ b/core/lib/multivm/src/interface/types/inputs/l1_batch_env.rs @@ -1,6 +1,6 @@ -use super::L2BlockEnv; use zksync_types::{Address, L1BatchNumber, H256}; +use super::L2BlockEnv; use crate::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata; /// Unique params for each block diff --git a/core/lib/multivm/src/interface/types/outputs/execution_result.rs b/core/lib/multivm/src/interface/types/outputs/execution_result.rs index 3181a94a9da..e177b630012 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_result.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_result.rs @@ -1,11 +1,14 @@ -use crate::interface::{Halt, VmExecutionStatistics, VmRevertReason}; use zksync_system_constants::PUBLISH_BYTECODE_OVERHEAD; -use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; -use zksync_types::l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}; -use zksync_types::tx::ExecutionMetrics; -use zksync_types::{StorageLogQuery, Transaction, VmEvent}; +use zksync_types::{ + event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, + l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, + tx::ExecutionMetrics, + StorageLogQuery, Transaction, VmEvent, +}; use zksync_utils::bytecode::bytecode_len_in_bytes; +use crate::interface::{Halt, VmExecutionStatistics, VmRevertReason}; + /// Refunds produced for the user. #[derive(Debug, Clone, Default)] pub struct Refunds { diff --git a/core/lib/multivm/src/interface/types/outputs/execution_state.rs b/core/lib/multivm/src/interface/types/outputs/execution_state.rs index 066de92ffbe..24034a96221 100644 --- a/core/lib/multivm/src/interface/types/outputs/execution_state.rs +++ b/core/lib/multivm/src/interface/types/outputs/execution_state.rs @@ -1,5 +1,7 @@ -use zksync_types::l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}; -use zksync_types::{LogQuery, StorageLogQuery, VmEvent, U256}; +use zksync_types::{ + l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, + LogQuery, StorageLogQuery, VmEvent, U256, +}; /// State of the VM since the start of the batch execution. #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/multivm/src/interface/types/outputs/mod.rs b/core/lib/multivm/src/interface/types/outputs/mod.rs index 39fed3ad9cb..eec19826e0b 100644 --- a/core/lib/multivm/src/interface/types/outputs/mod.rs +++ b/core/lib/multivm/src/interface/types/outputs/mod.rs @@ -1,12 +1,13 @@ +pub use self::{ + execution_result::{ExecutionResult, Refunds, VmExecutionLogs, VmExecutionResultAndLogs}, + execution_state::{BootloaderMemory, CurrentExecutionState}, + finished_l1batch::FinishedL1Batch, + l2_block::L2Block, + statistic::{VmExecutionStatistics, VmMemoryMetrics}, +}; + mod execution_result; mod execution_state; mod finished_l1batch; mod l2_block; mod statistic; - -pub use execution_result::VmExecutionLogs; -pub use execution_result::{ExecutionResult, Refunds, VmExecutionResultAndLogs}; -pub use execution_state::{BootloaderMemory, CurrentExecutionState}; -pub use finished_l1batch::FinishedL1Batch; -pub use l2_block::L2Block; -pub use statistic::{VmExecutionStatistics, VmMemoryMetrics}; diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 06d7a429130..23ea80a6860 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -3,6 +3,14 @@ #![warn(unused_extern_crates)] #![warn(unused_imports)] +pub use zk_evm_1_3_1; +pub use zk_evm_1_3_3; +pub use zk_evm_1_4_0; +pub use zksync_types::vm_version::VmVersion; + +pub use self::versions::{ + vm_1_3_2, vm_latest, vm_m5, vm_m6, vm_refunds_enhancement, vm_virtual_blocks, +}; pub use crate::{ glue::{ history_mode::HistoryMode, @@ -10,22 +18,9 @@ pub use crate::{ }, vm_instance::VmInstance, }; -pub use zksync_types::vm_version::VmVersion; mod glue; - -mod vm_instance; - pub mod interface; pub mod tracers; pub mod versions; - -pub use versions::vm_1_3_2; -pub use versions::vm_latest; -pub use versions::vm_m5; -pub use versions::vm_m6; -pub use versions::vm_refunds_enhancement; -pub use versions::vm_virtual_blocks; -pub use zk_evm_1_3_1; -pub use zk_evm_1_3_3; -pub use zk_evm_1_4_0; +mod vm_instance; diff --git a/core/lib/multivm/src/tracers/call_tracer/mod.rs b/core/lib/multivm/src/tracers/call_tracer/mod.rs index 90f15fb68d4..0c7e4d3c280 100644 --- a/core/lib/multivm/src/tracers/call_tracer/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/mod.rs @@ -1,8 +1,10 @@ -use crate::tracers::call_tracer::metrics::CALL_METRICS; -use once_cell::sync::OnceCell; use std::sync::Arc; + +use once_cell::sync::OnceCell; use zksync_types::vm_trace::Call; +use crate::tracers::call_tracer::metrics::CALL_METRICS; + mod metrics; pub mod vm_latest; pub mod vm_refunds_enhancement; diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs index f5f5c1077d3..09b5b828fc0 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_latest/mod.rs @@ -7,16 +7,19 @@ use zk_evm_1_4_0::{ }; use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::vm_trace::{Call, CallType}; -use zksync_types::FarCallOpcode; -use zksync_types::U256; +use zksync_types::{ + vm_trace::{Call, CallType}, + FarCallOpcode, U256, +}; -use crate::interface::{ - tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, - VmRevertReason, +use crate::{ + interface::{ + tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, + VmRevertReason, + }, + tracers::call_tracer::CallTracer, + vm_latest::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, }; -use crate::tracers::call_tracer::CallTracer; -use crate::vm_latest::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}; impl DynTracer> for CallTracer { fn after_execution( diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs index fab4ee0ff0f..3bc4426e8cc 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_refunds_enhancement/mod.rs @@ -7,17 +7,18 @@ use zk_evm_1_3_3::{ }; use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::vm_trace::{Call, CallType}; -use zksync_types::FarCallOpcode; -use zksync_types::U256; - -use crate::interface::{ - tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_3_3::DynTracer, - VmRevertReason, +use zksync_types::{ + vm_trace::{Call, CallType}, + FarCallOpcode, U256, }; -use crate::tracers::call_tracer::CallTracer; -use crate::vm_refunds_enhancement::{ - BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState, + +use crate::{ + interface::{ + tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_3_3::DynTracer, + VmRevertReason, + }, + tracers::call_tracer::CallTracer, + vm_refunds_enhancement::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, }; impl DynTracer> for CallTracer { diff --git a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs index 631d4d2081c..f96b2cb0f58 100644 --- a/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/call_tracer/vm_virtual_blocks/mod.rs @@ -1,20 +1,23 @@ -use zk_evm_1_3_3::tracing::{AfterExecutionData, VmLocalStateData}; -use zk_evm_1_3_3::zkevm_opcode_defs::{ - FarCallABI, FatPointer, Opcode, RetOpcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, - RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, +use zk_evm_1_3_3::{ + tracing::{AfterExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{ + FarCallABI, FatPointer, Opcode, RetOpcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::vm_trace::{Call, CallType}; -use zksync_types::FarCallOpcode; -use zksync_types::U256; - -use crate::interface::{ - dyn_tracers::vm_1_3_3::DynTracer, VmExecutionResultAndLogs, VmRevertReason, +use zksync_types::{ + vm_trace::{Call, CallType}, + FarCallOpcode, U256, }; -use crate::tracers::call_tracer::CallTracer; -use crate::vm_virtual_blocks::{ - ExecutionEndTracer, ExecutionProcessing, HistoryMode, SimpleMemory, VmTracer, + +use crate::{ + interface::{dyn_tracers::vm_1_3_3::DynTracer, VmExecutionResultAndLogs, VmRevertReason}, + tracers::call_tracer::CallTracer, + vm_virtual_blocks::{ + ExecutionEndTracer, ExecutionProcessing, HistoryMode, SimpleMemory, VmTracer, + }, }; impl DynTracer> for CallTracer { diff --git a/core/lib/multivm/src/tracers/multivm_dispatcher.rs b/core/lib/multivm/src/tracers/multivm_dispatcher.rs index d4c7337ce65..8ee858a6110 100644 --- a/core/lib/multivm/src/tracers/multivm_dispatcher.rs +++ b/core/lib/multivm/src/tracers/multivm_dispatcher.rs @@ -1,6 +1,7 @@ -use crate::{HistoryMode, MultiVmTracerPointer}; use zksync_state::WriteStorage; +use crate::{HistoryMode, MultiVmTracerPointer}; + /// Tracer dispatcher is a tracer that can dispatch calls to multiple tracers. pub struct TracerDispatcher { tracers: Vec>, diff --git a/core/lib/multivm/src/tracers/storage_invocation/vm_latest/mod.rs b/core/lib/multivm/src/tracers/storage_invocation/vm_latest/mod.rs index 0490ec34107..46213ff54fc 100644 --- a/core/lib/multivm/src/tracers/storage_invocation/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/storage_invocation/vm_latest/mod.rs @@ -1,12 +1,15 @@ -use crate::interface::{ - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, - Halt, -}; -use crate::tracers::storage_invocation::StorageInvocations; -use crate::vm_latest::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}; use zksync_state::WriteStorage; +use crate::{ + interface::{ + tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, + Halt, + }, + tracers::storage_invocation::StorageInvocations, + vm_latest::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, +}; + impl DynTracer> for StorageInvocations {} impl VmTracer for StorageInvocations { diff --git a/core/lib/multivm/src/tracers/storage_invocation/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/storage_invocation/vm_refunds_enhancement/mod.rs index fe4fc33418d..1e562374afd 100644 --- a/core/lib/multivm/src/tracers/storage_invocation/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/storage_invocation/vm_refunds_enhancement/mod.rs @@ -1,10 +1,15 @@ -use crate::interface::tracer::{TracerExecutionStatus, TracerExecutionStopReason}; -use crate::interface::{traits::tracers::dyn_tracers::vm_1_3_3::DynTracer, Halt}; -use crate::tracers::storage_invocation::StorageInvocations; -use crate::vm_refunds_enhancement::VmTracer; -use crate::vm_refunds_enhancement::{BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState}; use zksync_state::WriteStorage; +use crate::{ + interface::{ + tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + traits::tracers::dyn_tracers::vm_1_3_3::DynTracer, + Halt, + }, + tracers::storage_invocation::StorageInvocations, + vm_refunds_enhancement::{BootloaderState, HistoryMode, SimpleMemory, VmTracer, ZkSyncVmState}, +}; + impl DynTracer> for StorageInvocations {} impl VmTracer for StorageInvocations { diff --git a/core/lib/multivm/src/tracers/storage_invocation/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/storage_invocation/vm_virtual_blocks/mod.rs index 023b6f376cd..cd0ab9f4bb5 100644 --- a/core/lib/multivm/src/tracers/storage_invocation/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/storage_invocation/vm_virtual_blocks/mod.rs @@ -1,11 +1,14 @@ -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::tracers::storage_invocation::StorageInvocations; -use crate::vm_virtual_blocks::{ - BootloaderState, ExecutionEndTracer, ExecutionProcessing, HistoryMode, SimpleMemory, VmTracer, - ZkSyncVmState, -}; use zksync_state::WriteStorage; +use crate::{ + interface::dyn_tracers::vm_1_3_3::DynTracer, + tracers::storage_invocation::StorageInvocations, + vm_virtual_blocks::{ + BootloaderState, ExecutionEndTracer, ExecutionProcessing, HistoryMode, SimpleMemory, + VmTracer, ZkSyncVmState, + }, +}; + impl ExecutionEndTracer for StorageInvocations { fn should_stop_execution(&self) -> bool { self.current >= self.limit diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index 718edf1a964..0f43f235ade 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -1,19 +1,11 @@ -mod types; -mod vm_latest; -mod vm_refunds_enhancement; -mod vm_virtual_blocks; - -use std::sync::Arc; -use std::{collections::HashSet, marker::PhantomData}; +use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use once_cell::sync::OnceCell; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::{ ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, L2_ETH_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, }; - use zksync_types::{ vm_trace::ViolatedValidationRule, web3::signing::keccak256, AccountTreeId, Address, StorageKey, H256, U256, @@ -23,6 +15,11 @@ use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h2 use crate::tracers::validator::types::{NewTrustedValidationItems, ValidationTracerMode}; pub use crate::tracers::validator::types::{ValidationError, ValidationTracerParams}; +mod types; +mod vm_latest; +mod vm_refunds_enhancement; +mod vm_virtual_blocks; + /// Tracer that is used to ensure that the validation adheres to all the rules /// to prevent DDoS attacks on the server. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/tracers/validator/types.rs b/core/lib/multivm/src/tracers/validator/types.rs index eb80e6f1650..de6217c2988 100644 --- a/core/lib/multivm/src/tracers/validator/types.rs +++ b/core/lib/multivm/src/tracers/validator/types.rs @@ -1,8 +1,8 @@ +use std::{collections::HashSet, fmt::Display}; + +use zksync_types::{vm_trace::ViolatedValidationRule, Address, H256, U256}; + use crate::interface::Halt; -use std::collections::HashSet; -use std::fmt::Display; -use zksync_types::vm_trace::ViolatedValidationRule; -use zksync_types::{Address, H256, U256}; #[derive(Debug, Clone, Eq, PartialEq, Copy)] #[allow(clippy::enum_variant_names)] diff --git a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs index 4d5ff43ec47..dbdc7253f75 100644 --- a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs @@ -2,32 +2,32 @@ use zk_evm_1_4_0::{ tracing::{BeforeExecutionData, VmLocalStateData}, zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; - -use crate::HistoryMode; use zksync_types::{ get_code_key, vm_trace::ViolatedValidationRule, AccountTreeId, StorageKey, H256, }; use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; -use crate::vm_latest::tracers::utils::{ - computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, -}; - -use crate::interface::{ - traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, - types::tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - Halt, -}; -use crate::tracers::validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode}, - {ValidationRoundResult, ValidationTracer}, +use crate::{ + interface::{ + traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, + types::tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + Halt, + }, + tracers::validator::{ + types::{NewTrustedValidationItems, ValidationTracerMode}, + ValidationRoundResult, ValidationTracer, + }, + vm_latest::{ + tracers::utils::{ + computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, + }, + BootloaderState, SimpleMemory, VmTracer, ZkSyncVmState, + }, + HistoryMode, }; -use crate::vm_latest::{BootloaderState, SimpleMemory, VmTracer, ZkSyncVmState}; - impl ValidationTracer { fn check_user_restrictions_vm_latest( &mut self, diff --git a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs index ec4e95e5630..ab3a16c4b90 100644 --- a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs @@ -2,31 +2,30 @@ use zk_evm_1_3_3::{ tracing::{BeforeExecutionData, VmLocalStateData}, zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; - -use crate::HistoryMode; use zksync_types::{ get_code_key, vm_trace::ViolatedValidationRule, AccountTreeId, StorageKey, H256, }; use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; -use crate::interface::{ - traits::tracers::dyn_tracers::vm_1_3_3::DynTracer, - types::tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - Halt, -}; -use crate::tracers::validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode}, - {ValidationRoundResult, ValidationTracer}, -}; - -use crate::vm_refunds_enhancement::{ - tracers::utils::{ - computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, +use crate::{ + interface::{ + traits::tracers::dyn_tracers::vm_1_3_3::DynTracer, + types::tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + Halt, + }, + tracers::validator::{ + types::{NewTrustedValidationItems, ValidationTracerMode}, + ValidationRoundResult, ValidationTracer, + }, + vm_refunds_enhancement::{ + tracers::utils::{ + computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, + }, + BootloaderState, SimpleMemory, VmTracer, ZkSyncVmState, }, - BootloaderState, SimpleMemory, VmTracer, ZkSyncVmState, + HistoryMode, }; impl ValidationTracer { diff --git a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs index d2155f4ecf8..6fd2955f60b 100644 --- a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs @@ -2,25 +2,27 @@ use zk_evm_1_3_3::{ tracing::{BeforeExecutionData, VmLocalStateData}, zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::KECCAK256_PRECOMPILE_ADDRESS; - -use crate::HistoryMode; -use zksync_types::vm_trace::ViolatedValidationRule; -use zksync_types::{get_code_key, AccountTreeId, StorageKey, H256}; +use zksync_types::{ + get_code_key, vm_trace::ViolatedValidationRule, AccountTreeId, StorageKey, H256, +}; use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h256}; -use crate::vm_virtual_blocks::tracers::utils::{ - computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, +use crate::{ + interface::{dyn_tracers::vm_1_3_3::DynTracer, VmExecutionResultAndLogs}, + tracers::validator::{ + types::{NewTrustedValidationItems, ValidationTracerMode}, + ValidationRoundResult, ValidationTracer, + }, + vm_virtual_blocks::{ + tracers::utils::{ + computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, + }, + ExecutionEndTracer, ExecutionProcessing, SimpleMemory, VmTracer, + }, + HistoryMode, }; -use crate::vm_virtual_blocks::SimpleMemory; -use crate::vm_virtual_blocks::{ExecutionEndTracer, ExecutionProcessing, VmTracer}; - -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::VmExecutionResultAndLogs; -use crate::tracers::validator::types::{NewTrustedValidationItems, ValidationTracerMode}; -use crate::tracers::validator::{ValidationRoundResult, ValidationTracer}; impl ValidationTracer { fn check_user_restrictions_vm_virtual_blocks( diff --git a/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs index c127a9e6f2d..70c954425f4 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/errors/vm_revert_reason.rs @@ -1,5 +1,7 @@ -use std::convert::TryFrom; -use std::fmt::{Debug, Display}; +use std::{ + convert::TryFrom, + fmt::{Debug, Display}, +}; use zksync_types::U256; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs b/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs index db6c5d11aee..cbf7c183d3a 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/event_sink.rs @@ -1,8 +1,5 @@ -use crate::vm_1_3_2::{ - history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, - oracles::OracleWithHistory, -}; use std::collections::HashMap; + use zk_evm_1_3_3::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -12,6 +9,11 @@ use zk_evm_1_3_3::{ }, }; +use crate::vm_1_3_2::{ + history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, + oracles::OracleWithHistory, +}; + #[derive(Debug, Clone, PartialEq, Default)] pub struct InMemoryEventSink { frames_stack: AppDataFrameManagerWithHistory, H>, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/history_recorder.rs b/core/lib/multivm/src/versions/vm_1_3_2/history_recorder.rs index 3c83b68e0a3..263c1f023dd 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/history_recorder.rs @@ -5,7 +5,6 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{StorageKey, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; @@ -765,12 +764,13 @@ impl HistoryRecorder, H> { #[cfg(test)] mod tests { + use zk_evm_1_3_3::{aux_structures::Timestamp, vm_state::PrimitiveValue}; + use zksync_types::U256; + use crate::vm_1_3_2::{ history_recorder::{HistoryRecorder, MemoryWrapper}, HistoryDisabled, }; - use zk_evm_1_3_3::{aux_structures::Timestamp, vm_state::PrimitiveValue}; - use zksync_types::U256; #[test] fn memory_equality() { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/memory.rs b/core/lib/multivm/src/versions/vm_1_3_2/memory.rs index b269ba89b3c..91fdbe223d8 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/memory.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/memory.rs @@ -1,15 +1,19 @@ -use zk_evm_1_3_3::abstractions::{Memory, MemoryType}; -use zk_evm_1_3_3::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; -use zk_evm_1_3_3::vm_state::PrimitiveValue; -use zk_evm_1_3_3::zkevm_opcode_defs::FatPointer; +use zk_evm_1_3_3::{ + abstractions::{Memory, MemoryType}, + aux_structures::{MemoryPage, MemoryQuery, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; use zksync_types::U256; -use crate::vm_1_3_2::history_recorder::{ - FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, - MemoryWrapper, WithHistory, +use crate::vm_1_3_2::{ + history_recorder::{ + FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, + MemoryWrapper, WithHistory, + }, + oracles::OracleWithHistory, + utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}, }; -use crate::vm_1_3_2::oracles::OracleWithHistory; -use crate::vm_1_3_2::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; #[derive(Debug, Clone, PartialEq)] pub struct SimpleMemory { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/mod.rs b/core/lib/multivm/src/versions/vm_1_3_2/mod.rs index 24e433d9123..37c5f34ffd0 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/mod.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/mod.rs @@ -1,5 +1,18 @@ #![allow(clippy::derive_partial_eq_without_eq)] +pub use zk_evm_1_3_3::{self, block_properties::BlockProperties}; +pub use zksync_types::vm_trace::VmExecutionTrace; + +pub(crate) use self::vm_instance::VmInstance; +pub use self::{ + errors::TxRevertReason, + history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, + oracle_tools::OracleTools, + oracles::storage::StorageOracle, + vm::Vm, + vm_instance::{VmBlockResult, VmExecutionResult}, +}; + mod bootloader_state; pub mod errors; pub mod event_sink; @@ -11,25 +24,13 @@ pub mod oracles; mod pubdata_utils; mod refunds; pub mod test_utils; -pub mod transaction_data; -pub mod utils; -pub mod vm_with_bootloader; - #[cfg(test)] mod tests; +pub mod transaction_data; +pub mod utils; mod vm; pub mod vm_instance; - -pub use errors::TxRevertReason; -pub use history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}; -pub use oracle_tools::OracleTools; -pub use oracles::storage::StorageOracle; -pub use vm::Vm; -pub(crate) use vm_instance::VmInstance; -pub use vm_instance::{VmBlockResult, VmExecutionResult}; -pub use zk_evm_1_3_3; -pub use zk_evm_1_3_3::block_properties::BlockProperties; -pub use zksync_types::vm_trace::VmExecutionTrace; +pub mod vm_with_bootloader; pub type Word = zksync_types::U256; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracle_tools.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracle_tools.rs index 9f0f2600c5b..f271d86474c 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracle_tools.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracle_tools.rs @@ -1,15 +1,18 @@ use std::fmt::Debug; -use crate::vm_1_3_2::event_sink::InMemoryEventSink; -use crate::vm_1_3_2::history_recorder::HistoryMode; -use crate::vm_1_3_2::memory::SimpleMemory; -use crate::vm_1_3_2::oracles::{ - decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, - storage::StorageOracle, -}; use zk_evm_1_3_3::witness_trace::DummyTracer; use zksync_state::{StoragePtr, WriteStorage}; +use crate::vm_1_3_2::{ + event_sink::InMemoryEventSink, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, + }, +}; + /// zkEVM requires a bunch of objects implementing given traits to work. /// For example: Storage, Memory, PrecompilerProcessor etc /// (you can find all these traits in zk_evm crate -> src/abstractions/mod.rs) diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs index 17583b70dc9..d58b501b244 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/decommitter.rs @@ -1,21 +1,19 @@ use std::collections::HashMap; -use crate::vm_1_3_2::history_recorder::{ - HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, -}; - -use zk_evm_1_3_3::abstractions::MemoryType; -use zk_evm_1_3_3::aux_structures::Timestamp; use zk_evm_1_3_3::{ - abstractions::{DecommittmentProcessor, Memory}, - aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, + abstractions::{DecommittmentProcessor, Memory, MemoryType}, + aux_structures::{ + DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, + }, }; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::U256; -use zksync_utils::bytecode::bytecode_len_in_words; -use zksync_utils::{bytes_to_be_words, u256_to_h256}; +use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; use super::OracleWithHistory; +use crate::vm_1_3_2::history_recorder::{ + HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, +}; /// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is /// used by the VM to 'load' bytecodes into memory. diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/mod.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/mod.rs index 342fadb554a..08eb1c6e174 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/mod.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/mod.rs @@ -1,11 +1,10 @@ use zk_evm_1_3_3::aux_structures::Timestamp; -// We will discard RAM as soon as the execution of a tx ends, so -// it is ok for now to use SimpleMemory -pub use zk_evm_1_3_3::reference_impls::memory::SimpleMemory as RamOracle; // All the changes to the events in the DB will be applied after the tx is executed, // so fow now it is fine. pub use zk_evm_1_3_3::reference_impls::event_sink::InMemoryEventSink as EventSinkOracle; - +// We will discard RAM as soon as the execution of a tx ends, so +// it is ok for now to use SimpleMemory +pub use zk_evm_1_3_3::reference_impls::memory::SimpleMemory as RamOracle; pub use zk_evm_1_3_3::testing::simple_tracer::NoopTracer; pub mod decommitter; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/precompile.rs index 0693fac6d60..8089527183f 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/precompile.rs @@ -1,14 +1,11 @@ use zk_evm_1_3_3::{ - abstractions::Memory, - abstractions::PrecompileCyclesWitness, - abstractions::PrecompilesProcessor, + abstractions::{Memory, PrecompileCyclesWitness, PrecompilesProcessor}, aux_structures::{LogQuery, MemoryQuery, Timestamp}, precompiles::DefaultPrecompilesProcessor, }; -use crate::vm_1_3_2::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; - use super::OracleWithHistory; +use crate::vm_1_3_2::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; /// Wrap of DefaultPrecompilesProcessor that store queue /// of timestamp when precompiles are called to be executed. diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs index 9a4873fe59a..ea2ecf83a3d 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/storage.rs @@ -1,25 +1,22 @@ use std::collections::HashMap; -use crate::vm_1_3_2::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, WithHistory, -}; - -use zk_evm_1_3_3::abstractions::RefundedAmounts; -use zk_evm_1_3_3::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; use zk_evm_1_3_3::{ - abstractions::{RefundType, Storage as VmStorageOracle}, + abstractions::{RefundType, RefundedAmounts, Storage as VmStorageOracle}, aux_structures::{LogQuery, Timestamp}, + zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_eth_balance; use zksync_types::{ - AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, - U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, + StorageLogQueryType, BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use super::OracleWithHistory; +use crate::vm_1_3_2::history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, +}; // While the storage does not support different shards, it was decided to write the // code of the StorageOracle with the shard parameters in mind. diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/bootloader.rs index 16b1efdff54..3bc80f31119 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/bootloader.rs @@ -1,12 +1,5 @@ use std::marker::PhantomData; -use crate::vm_1_3_2::history_recorder::HistoryMode; -use crate::vm_1_3_2::memory::SimpleMemory; -use crate::vm_1_3_2::oracles::tracer::{ - utils::gas_spent_on_bytecodes_and_long_messages_this_opcode, ExecutionEndTracer, - PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, -}; - use zk_evm_1_3_3::{ tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, @@ -16,6 +9,15 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::{Opcode, RetOpcode}, }; +use crate::vm_1_3_2::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::gas_spent_on_bytecodes_and_long_messages_this_opcode, ExecutionEndTracer, + PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, + }, +}; + /// Tells the VM to end the execution before `ret` from the bootloader if there is no panic or revert. /// Also, saves the information if this `ret` was caused by "out of gas" panic. #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs index 72701f6e0f2..88b21818fc3 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/call.rs @@ -1,20 +1,23 @@ -use crate::vm_1_3_2::errors::VmRevertReason; -use crate::vm_1_3_2::history_recorder::HistoryMode; -use crate::vm_1_3_2::memory::SimpleMemory; -use std::convert::TryFrom; -use std::marker::PhantomData; -use std::mem; -use zk_evm_1_3_3::tracing::{ - AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, -}; -use zk_evm_1_3_3::zkevm_opcode_defs::FatPointer; -use zk_evm_1_3_3::zkevm_opcode_defs::{ - FarCallABI, FarCallOpcode, Opcode, RetOpcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, - RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, +use std::{convert::TryFrom, marker::PhantomData, mem}; + +use zk_evm_1_3_3::{ + tracing::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + zkevm_opcode_defs::{ + FarCallABI, FarCallOpcode, FatPointer, Opcode, RetOpcode, + CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::vm_trace::{Call, CallType}; -use zksync_types::U256; +use zksync_types::{ + vm_trace::{Call, CallType}, + U256, +}; + +use crate::vm_1_3_2::{ + errors::VmRevertReason, history_recorder::HistoryMode, memory::SimpleMemory, +}; /// NOTE Auto implementing clone for this tracer can cause stack overflow. /// This is because of the stack field which is a Vec with nested vecs inside. @@ -282,9 +285,10 @@ fn filter_near_call(mut call: Call) -> Vec { #[cfg(test)] mod tests { - use crate::vm_1_3_2::oracles::tracer::call::{filter_near_call, Call, CallType}; use zk_evm_1_3_3::zkevm_opcode_defs::FarCallOpcode; + use crate::vm_1_3_2::oracles::tracer::call::{filter_near_call, Call, CallType}; + #[test] fn test_filter_near_calls() { let mut call = Call::default(); diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/mod.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/mod.rs index 29121f35c5f..5395a0a9d7b 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/mod.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/mod.rs @@ -1,5 +1,15 @@ -use zk_evm_1_3_3::tracing::Tracer; -use zk_evm_1_3_3::vm_state::VmLocalState; +use zk_evm_1_3_3::{tracing::Tracer, vm_state::VmLocalState}; + +pub(crate) use self::transaction_result::TransactionResultTracer; +pub use self::{ + bootloader::BootloaderTracer, + call::CallTracer, + one_tx::OneTxTracer, + validation::{ + ValidationError, ValidationTracer, ValidationTracerParams, ViolatedValidationRule, + }, +}; +use crate::vm_1_3_2::{history_recorder::HistoryMode, memory::SimpleMemory}; mod bootloader; mod call; @@ -8,18 +18,6 @@ mod transaction_result; mod utils; mod validation; -pub use bootloader::BootloaderTracer; -pub use call::CallTracer; -pub use one_tx::OneTxTracer; -pub use validation::{ - ValidationError, ValidationTracer, ValidationTracerParams, ViolatedValidationRule, -}; - -pub(crate) use transaction_result::TransactionResultTracer; - -use crate::vm_1_3_2::history_recorder::HistoryMode; -use crate::vm_1_3_2::memory::SimpleMemory; - pub trait ExecutionEndTracer: Tracer> { // Returns whether the vm execution should stop. fn should_stop_execution(&self) -> bool; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/one_tx.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/one_tx.rs index a9349ea2035..896befb8abc 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/one_tx.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/one_tx.rs @@ -1,23 +1,23 @@ +use zk_evm_1_3_3::{ + tracing::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + vm_state::VmLocalState, +}; +use zksync_types::vm_trace::Call; + use super::utils::{computational_gas_price, print_debug_if_needed}; use crate::vm_1_3_2::{ history_recorder::HistoryMode, memory::SimpleMemory, oracles::tracer::{ utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, VmHook}, - BootloaderTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + BootloaderTracer, CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, }, vm_instance::get_vm_hook_params, }; -use crate::vm_1_3_2::oracles::tracer::{CallTracer, StorageInvocationTracer}; -use zk_evm_1_3_3::{ - tracing::{ - AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, - }, - vm_state::VmLocalState, -}; -use zksync_types::vm_trace::Call; - /// Allows any opcodes, but tells the VM to end the execution once the tx is over. // Internally depeds on Bootloader's VMHooks to get the notification once the transaction is finished. #[derive(Debug)] diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/transaction_result.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/transaction_result.rs index 215c66bfa74..c74e9bb862d 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/transaction_result.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/transaction_result.rs @@ -7,18 +7,18 @@ use zk_evm_1_3_3::{ }; use zksync_types::{vm_trace, U256}; -use crate::vm_1_3_2::memory::SimpleMemory; -use crate::vm_1_3_2::oracles::tracer::{ - CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, - StorageInvocationTracer, -}; -use crate::vm_1_3_2::vm_instance::get_vm_hook_params; use crate::vm_1_3_2::{ history_recorder::HistoryMode, - oracles::tracer::utils::{ - gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, read_pointer, - VmHook, + memory::SimpleMemory, + oracles::tracer::{ + utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, + read_pointer, VmHook, + }, + CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, }, + vm_instance::get_vm_hook_params, }; #[derive(Debug)] diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs index 3b3b99991ed..76890b042de 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/utils.rs @@ -1,14 +1,9 @@ -use crate::vm_1_3_2::history_recorder::HistoryMode; -use crate::vm_1_3_2::memory::SimpleMemory; -use crate::vm_1_3_2::utils::{aux_heap_page_from_base, heap_page_from_base}; -use crate::vm_1_3_2::vm_instance::{get_vm_hook_params, VM_HOOK_POSITION}; -use crate::vm_1_3_2::vm_with_bootloader::BOOTLOADER_HEAP_PAGE; - -use zk_evm_1_3_3::aux_structures::MemoryPage; -use zk_evm_1_3_3::zkevm_opcode_defs::{FarCallABI, FarCallForwardPageType}; use zk_evm_1_3_3::{ + aux_structures::MemoryPage, tracing::{BeforeExecutionData, VmLocalStateData}, - zkevm_opcode_defs::{FatPointer, LogOpcode, Opcode, UMAOpcode}, + zkevm_opcode_defs::{ + FarCallABI, FarCallForwardPageType, FatPointer, LogOpcode, Opcode, UMAOpcode, + }, }; use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, @@ -17,6 +12,14 @@ use zksync_system_constants::{ use zksync_types::U256; use zksync_utils::u256_to_h256; +use crate::vm_1_3_2::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + utils::{aux_heap_page_from_base, heap_page_from_base}, + vm_instance::{get_vm_hook_params, VM_HOOK_POSITION}, + vm_with_bootloader::BOOTLOADER_HEAP_PAGE, +}; + #[derive(Clone, Debug, Copy)] pub(crate) enum VmHook { AccountValidationEntered, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs index c9ee54f35ba..d3308c7ea2d 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/oracles/tracer/validation.rs @@ -1,15 +1,5 @@ use std::{collections::HashSet, fmt::Display, marker::PhantomData}; -use crate::vm_1_3_2::{ - errors::VmRevertReasonParsingResult, - history_recorder::HistoryMode, - memory::SimpleMemory, - oracles::tracer::{ - utils::{computational_gas_price, print_debug_if_needed, VmHook}, - ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, - }, -}; - use zk_evm_1_3_3::{ tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, @@ -17,8 +7,6 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; use zksync_state::{StoragePtr, WriteStorage}; - -use crate::vm_1_3_2::oracles::tracer::{utils::get_calldata_page_via_abi, StorageInvocationTracer}; use zksync_system_constants::{ ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, L2_ETH_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, @@ -31,6 +19,18 @@ use zksync_utils::{ be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, }; +use crate::vm_1_3_2::{ + errors::VmRevertReasonParsingResult, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{ + computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, + }, + ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, + }, +}; + #[derive(Debug, Clone, Eq, PartialEq, Copy)] #[allow(clippy::enum_variant_names)] pub enum ValidationTracerMode { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs index 936b85bfc09..23d42fc2b5a 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/pubdata_utils.rs @@ -1,14 +1,18 @@ -use crate::vm_1_3_2::history_recorder::HistoryMode; -use crate::vm_1_3_2::oracles::storage::storage_key_of_log; -use crate::vm_1_3_2::VmInstance; use std::collections::HashMap; + use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; -use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; -use zksync_types::zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries; -use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; +use zksync_types::{ + event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, + zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, + StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS, +}; use zksync_utils::bytecode::bytecode_len_in_bytes; +use crate::vm_1_3_2::{ + history_recorder::HistoryMode, oracles::storage::storage_key_of_log, VmInstance, +}; + impl VmInstance { pub fn pubdata_published(&self, from_timestamp: Timestamp) -> u32 { let storage_writes_pubdata_published = self.pubdata_published_for_writes(from_timestamp); diff --git a/core/lib/multivm/src/versions/vm_1_3_2/refunds.rs b/core/lib/multivm/src/versions/vm_1_3_2/refunds.rs index 0277379143b..9de2ee9676b 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/refunds.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/refunds.rs @@ -1,13 +1,14 @@ -use crate::vm_1_3_2::history_recorder::HistoryMode; -use crate::vm_1_3_2::vm_with_bootloader::{ - eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_1_3_2::VmInstance; use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; use zksync_types::U256; use zksync_utils::ceil_div_u256; +use crate::vm_1_3_2::{ + history_recorder::HistoryMode, + vm_with_bootloader::{eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET}, + VmInstance, +}; + impl VmInstance { pub(crate) fn tx_body_refund( &self, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs index e697e3b310d..c3aa161543a 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs @@ -10,8 +10,10 @@ use std::collections::HashMap; use itertools::Itertools; use zk_evm_1_3_3::{aux_structures::Timestamp, vm_state::VmLocalState}; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; -use zksync_contracts::{deployer_contract, get_loadnext_contract, load_contract}; +use zksync_contracts::{ + deployer_contract, get_loadnext_contract, load_contract, + test_contracts::LoadnextContractExecutionParams, +}; use zksync_state::WriteStorage; use zksync_types::{ ethabi::{Address, Token}, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs index d3a96dc06a7..f2c8f278f56 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs @@ -1,12 +1,14 @@ use zk_evm_1_3_3::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; -use zksync_types::ethabi::{encode, Address, Token}; -use zksync_types::fee::encoding_len; use zksync_types::{ - l1::is_l1_tx_type, l2::TransactionType, ExecuteTransactionCommon, Transaction, U256, + ethabi::{encode, Address, Token}, + fee::encoding_len, + l1::is_l1_tx_type, + l2::TransactionType, + ExecuteTransactionCommon, Transaction, MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK, U256, +}; +use zksync_utils::{ + address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, ceil_div_u256, h256_to_u256, }; -use zksync_types::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; -use zksync_utils::{address_to_h256, ceil_div_u256}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; use crate::vm_1_3_2::vm_with_bootloader::{ BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs index 44be1b9c8b9..d0a51cde7fa 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs @@ -1,13 +1,7 @@ -use crate::vm_1_3_2::history_recorder::HistoryMode; -use crate::vm_1_3_2::{ - memory::SimpleMemory, oracles::tracer::PubdataSpentTracer, vm_with_bootloader::BlockContext, - VmInstance, -}; use once_cell::sync::Lazy; - -use zk_evm_1_3_3::block_properties::BlockProperties; use zk_evm_1_3_3::{ aux_structures::{MemoryPage, Timestamp}, + block_properties::BlockProperties, vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; @@ -17,6 +11,11 @@ use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; +use crate::vm_1_3_2::{ + history_recorder::HistoryMode, memory::SimpleMemory, oracles::tracer::PubdataSpentTracer, + vm_with_bootloader::BlockContext, VmInstance, +}; + pub const INITIAL_TIMESTAMP: u32 = 1024; pub const INITIAL_MEMORY_COUNTER: u32 = 2048; pub const INITIAL_CALLDATA_PAGE: u32 = 7; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 84b84d3e31a..f0cf5d9c1aa 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -1,21 +1,24 @@ -use crate::interface::{ - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, -}; - use std::collections::HashSet; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::Transaction; -use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::glue::history_mode::HistoryMode; -use crate::glue::GlueInto; -use crate::vm_1_3_2::events::merge_events; -use crate::vm_1_3_2::VmInstance; +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + Transaction, +}; +use zksync_utils::{ + bytecode::{hash_bytecode, CompressedBytecodeInfo}, + h256_to_u256, u256_to_h256, +}; + +use crate::{ + glue::{history_mode::HistoryMode, GlueInto}, + interface::{ + BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + vm_1_3_2::{events::merge_events, VmInstance}, +}; #[derive(Debug)] pub struct Vm { diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs index 8b7c416522e..2217b4f50d6 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_instance.rs @@ -1,43 +1,52 @@ -use std::convert::TryFrom; -use std::fmt::Debug; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::{PrimitiveValue, VmLocalState, VmState}; -use zk_evm_1_3_3::witness_trace::DummyTracer; -use zk_evm_1_3_3::zkevm_opcode_defs::decoding::{ - AllowedPcOrImm, EncodingModeProduction, VmEncodingMode, +use std::{convert::TryFrom, fmt::Debug}; + +use zk_evm_1_3_3::{ + aux_structures::Timestamp, + vm_state::{PrimitiveValue, VmLocalState, VmState}, + witness_trace::DummyTracer, + zkevm_opcode_defs::{ + decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}, + definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; -use zk_evm_1_3_3::zkevm_opcode_defs::definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; use zksync_state::WriteStorage; use zksync_system_constants::MAX_TXS_IN_BLOCK; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::tx::tx_execution_info::TxExecutionStatus; -use zksync_types::vm_trace::{Call, VmExecutionTrace, VmTrace}; -use zksync_types::{L1BatchNumber, StorageLogQuery, VmEvent, H256, U256}; - -use crate::interface::types::outputs::VmExecutionLogs; -use crate::vm_1_3_2::bootloader_state::BootloaderState; -use crate::vm_1_3_2::errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}; -use crate::vm_1_3_2::event_sink::InMemoryEventSink; -use crate::vm_1_3_2::events::merge_events; -use crate::vm_1_3_2::history_recorder::{HistoryEnabled, HistoryMode}; -use crate::vm_1_3_2::memory::SimpleMemory; -use crate::vm_1_3_2::oracles::decommitter::DecommitterOracle; -use crate::vm_1_3_2::oracles::precompile::PrecompilesProcessorWithHistory; -use crate::vm_1_3_2::oracles::storage::StorageOracle; -use crate::vm_1_3_2::oracles::tracer::{ - BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, PubdataSpentTracer, - StorageInvocationTracer, TransactionResultTracer, ValidationError, ValidationTracer, - ValidationTracerParams, +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + tx::tx_execution_info::TxExecutionStatus, + vm_trace::{Call, VmExecutionTrace, VmTrace}, + L1BatchNumber, StorageLogQuery, VmEvent, H256, U256, }; -use crate::vm_1_3_2::oracles::OracleWithHistory; -use crate::vm_1_3_2::utils::{ - calculate_computational_gas_used, dump_memory_page_using_primitive_value, - precompile_calls_count_after_timestamp, -}; -use crate::vm_1_3_2::vm_with_bootloader::{ - BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, - OPERATOR_REFUNDS_OFFSET, + +use crate::{ + interface::types::outputs::VmExecutionLogs, + vm_1_3_2::{ + bootloader_state::BootloaderState, + errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}, + event_sink::InMemoryEventSink, + events::merge_events, + history_recorder::{HistoryEnabled, HistoryMode}, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, + precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, + tracer::{ + BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, + PubdataSpentTracer, StorageInvocationTracer, TransactionResultTracer, + ValidationError, ValidationTracer, ValidationTracerParams, + }, + OracleWithHistory, + }, + utils::{ + calculate_computational_gas_used, dump_memory_page_using_primitive_value, + precompile_calls_count_after_timestamp, + }, + vm_with_bootloader::{ + BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, + OPERATOR_REFUNDS_OFFSET, + }, + }, }; pub type ZkSyncVmState = VmState< diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index c2ff035c669..71c108cae32 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use itertools::Itertools; use zk_evm_1_3_3::{ aux_structures::{MemoryPage, Timestamp}, block_properties::BlockProperties, @@ -11,8 +12,8 @@ use zk_evm_1_3_3::{ }, }; use zksync_contracts::BaseSystemContracts; +use zksync_state::WriteStorage; use zksync_system_constants::MAX_TXS_IN_BLOCK; - use zksync_types::{ l1::is_l1_tx_type, zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, @@ -25,9 +26,6 @@ use zksync_utils::{ misc::ceil_div, }; -use itertools::Itertools; -use zksync_state::WriteStorage; - use crate::vm_1_3_2::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs index 6da9b64673e..146e8713c69 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/l2_block.rs @@ -1,11 +1,15 @@ use std::cmp::Ordering; + use zksync_types::{MiniblockNumber, H256}; use zksync_utils::concat_and_hash; -use crate::interface::{L2Block, L2BlockEnv}; -use crate::vm_latest::bootloader_state::snapshot::L2BlockSnapshot; -use crate::vm_latest::bootloader_state::tx::BootloaderTx; -use crate::vm_latest::utils::l2_blocks::l2_block_hash; +use crate::{ + interface::{L2Block, L2BlockEnv}, + vm_latest::{ + bootloader_state::{snapshot::L2BlockSnapshot, tx::BootloaderTx}, + utils::l2_blocks::l2_block_hash, + }, +}; const EMPTY_TXS_ROLLING_HASH: H256 = H256::zero(); diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index b4641d9bc64..14e693a1d2e 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -1,20 +1,24 @@ -use crate::vm_latest::bootloader_state::l2_block::BootloaderL2Block; -use crate::vm_latest::bootloader_state::snapshot::BootloaderStateSnapshot; -use crate::vm_latest::bootloader_state::utils::{apply_l2_block, apply_tx_to_memory}; -use once_cell::sync::OnceCell; use std::cmp::Ordering; + +use once_cell::sync::OnceCell; use zksync_types::{L2ChainId, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}; -use crate::vm_latest::types::internals::pubdata::PubdataInput; -use crate::vm_latest::{ - constants::TX_DESCRIPTION_OFFSET, types::internals::TransactionData, - utils::l2_blocks::assert_next_block, +use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; +use crate::{ + interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + vm_latest::{ + bootloader_state::{ + l2_block::BootloaderL2Block, + snapshot::BootloaderStateSnapshot, + utils::{apply_l2_block, apply_tx_to_memory}, + }, + constants::TX_DESCRIPTION_OFFSET, + types::internals::{pubdata::PubdataInput, TransactionData}, + utils::l2_blocks::assert_next_block, + }, }; -use super::tx::BootloaderTx; -use super::utils::apply_pubdata_to_memory; /// Intermediate bootloader-related VM state. /// /// Required to process transactions one by one (since we intercept the VM execution to execute diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs index dce0ecce3fb..21aee75b38b 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/tx.rs @@ -1,7 +1,8 @@ -use crate::vm_latest::types::internals::TransactionData; use zksync_types::{L2ChainId, H256, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; +use crate::vm_latest::types::internals::TransactionData; + /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] pub(super) struct BootloaderTx { diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 7e76f3faeff..16776be444e 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -1,18 +1,21 @@ use zksync_types::U256; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; - -use crate::interface::{BootloaderMemory, TxExecutionMode}; -use crate::vm_latest::bootloader_state::l2_block::BootloaderL2Block; -use crate::vm_latest::constants::{ - BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, COMPRESSED_BYTECODES_OFFSET, - OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET, OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, - OPERATOR_REFUNDS_OFFSET, TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, -}; -use crate::vm_latest::types::internals::pubdata::PubdataInput; +use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; +use crate::{ + interface::{BootloaderMemory, TxExecutionMode}, + vm_latest::{ + bootloader_state::l2_block::BootloaderL2Block, + constants::{ + BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, + COMPRESSED_BYTECODES_OFFSET, OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET, + OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, OPERATOR_REFUNDS_OFFSET, + TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, + }, + types::internals::pubdata::PubdataInput, + }, +}; pub(super) fn get_memory_for_compressed_bytecodes( compressed_bytecodes: &[CompressedBytecodeInfo], diff --git a/core/lib/multivm/src/versions/vm_latest/constants.rs b/core/lib/multivm/src/versions/vm_latest/constants.rs index 4d1c7705423..44266344be6 100644 --- a/core/lib/multivm/src/versions/vm_latest/constants.rs +++ b/core/lib/multivm/src/versions/vm_latest/constants.rs @@ -1,14 +1,12 @@ use zk_evm_1_4_0::aux_structures::MemoryPage; - +pub use zk_evm_1_4_0::zkevm_opcode_defs::system_params::{ + ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, MAX_PUBDATA_PER_BLOCK, +}; use zksync_system_constants::{ L1_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, USED_BOOTLOADER_MEMORY_WORDS, }; -pub use zk_evm_1_4_0::zkevm_opcode_defs::system_params::{ - ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, MAX_PUBDATA_PER_BLOCK, -}; - use crate::vm_latest::old_vm::utils::heap_page_from_base; /// Max cycles for a single transaction. diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs index 83a7be74897..bda1803067f 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/bytecode.rs @@ -1,13 +1,12 @@ use itertools::Itertools; - -use crate::interface::VmInterface; -use crate::HistoryMode; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::U256; -use zksync_utils::bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}; -use zksync_utils::bytes_to_be_words; +use zksync_utils::{ + bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, + bytes_to_be_words, +}; -use crate::vm_latest::Vm; +use crate::{interface::VmInterface, vm_latest::Vm, HistoryMode}; impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index 1b3197f57b9..a913ea3ed46 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -1,15 +1,19 @@ -use crate::HistoryMode; use zk_evm_1_4_0::aux_structures::Timestamp; use zksync_state::WriteStorage; -use crate::interface::{ - types::tracer::{TracerExecutionStatus, VmExecutionStopReason}, - VmExecutionMode, VmExecutionResultAndLogs, -}; -use crate::vm_latest::{ - old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, - tracers::{dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer}, - vm::Vm, +use crate::{ + interface::{ + types::tracer::{TracerExecutionStatus, VmExecutionStopReason}, + VmExecutionMode, VmExecutionResultAndLogs, + }, + vm_latest::{ + old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, + tracers::{ + dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, + }, + vm::Vm, + }, + HistoryMode, }; impl Vm { diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/gas.rs b/core/lib/multivm/src/versions/vm_latest/implementation/gas.rs index c970cd4e5d2..526eab76f07 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/gas.rs @@ -1,8 +1,9 @@ -use crate::HistoryMode; use zksync_state::WriteStorage; -use crate::vm_latest::tracers::DefaultExecutionTracer; -use crate::vm_latest::vm::Vm; +use crate::{ + vm_latest::{tracers::DefaultExecutionTracer, vm::Vm}, + HistoryMode, +}; impl Vm { /// Returns the amount of gas remaining to the VM. diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs b/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs index c468cf87817..9e0817aa939 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/logs.rs @@ -1,16 +1,16 @@ use zk_evm_1_4_0::aux_structures::Timestamp; use zksync_state::WriteStorage; -use zksync_types::event::extract_l2tol1logs_from_l1_messenger; +use zksync_types::{ + event::extract_l2tol1logs_from_l1_messenger, + l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, + VmEvent, +}; -use crate::HistoryMode; -use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}; -use zksync_types::VmEvent; - -use crate::interface::types::outputs::VmExecutionLogs; - -use crate::vm_latest::old_vm::utils::precompile_calls_count_after_timestamp; -use crate::vm_latest::utils::logs; -use crate::vm_latest::vm::Vm; +use crate::{ + interface::types::outputs::VmExecutionLogs, + vm_latest::{old_vm::utils::precompile_calls_count_after_timestamp, utils::logs, vm::Vm}, + HistoryMode, +}; impl Vm { pub(crate) fn collect_execution_logs_after_timestamp( diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/snapshots.rs b/core/lib/multivm/src/versions/vm_latest/implementation/snapshots.rs index 99d41a2aec6..b6b45283438 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/snapshots.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/snapshots.rs @@ -1,7 +1,6 @@ -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; - use std::time::Duration; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zk_evm_1_4_0::aux_structures::Timestamp; use zksync_state::WriteStorage; diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index 92604479a88..6af9ad041fe 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -1,12 +1,12 @@ use zk_evm_1_4_0::aux_structures::Timestamp; use zksync_state::WriteStorage; - -use crate::HistoryMode; use zksync_types::U256; -use crate::interface::{VmExecutionStatistics, VmMemoryMetrics}; -use crate::vm_latest::tracers::DefaultExecutionTracer; -use crate::vm_latest::vm::Vm; +use crate::{ + interface::{VmExecutionStatistics, VmMemoryMetrics}, + vm_latest::{tracers::DefaultExecutionTracer, vm::Vm}, + HistoryMode, +}; /// Module responsible for observing the VM behavior, i.e. calculating the statistics of the VM runs /// or reporting the VM memory usage. diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs index 6def1da0f5d..326be41c5ee 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs @@ -1,13 +1,16 @@ -use crate::vm_latest::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_latest::implementation::bytecode::{bytecode_to_factory_dep, compress_bytecodes}; -use crate::HistoryMode; use zk_evm_1_4_0::aux_structures::Timestamp; use zksync_state::WriteStorage; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::Transaction; +use zksync_types::{l1::is_l1_tx_type, Transaction}; -use crate::vm_latest::types::internals::TransactionData; -use crate::vm_latest::vm::Vm; +use crate::{ + vm_latest::{ + constants::BOOTLOADER_HEAP_PAGE, + implementation::bytecode::{bytecode_to_factory_dep, compress_bytecodes}, + types::internals::TransactionData, + vm::Vm, + }, + HistoryMode, +}; impl Vm { pub(crate) fn push_raw_transaction( diff --git a/core/lib/multivm/src/versions/vm_latest/mod.rs b/core/lib/multivm/src/versions/vm_latest/mod.rs index 49cd7111f6f..0b4919f83d7 100644 --- a/core/lib/multivm/src/versions/vm_latest/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/mod.rs @@ -1,15 +1,18 @@ -pub use old_vm::{ - history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, - memory::SimpleMemory, -}; - -pub use oracles::storage::StorageOracle; - -pub use tracers::{ - dispatcher::TracerDispatcher, - traits::{ToTracerPointer, TracerPointer, VmTracer}, +pub use self::{ + bootloader_state::BootloaderState, + old_vm::{ + history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, + memory::SimpleMemory, + }, + oracles::storage::StorageOracle, + tracers::{ + dispatcher::TracerDispatcher, + traits::{ToTracerPointer, TracerPointer, VmTracer}, + }, + types::internals::ZkSyncVmState, + utils::transaction_encoding::TransactionVmExt, + vm::Vm, }; - pub use crate::interface::types::{ inputs::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode}, outputs::{ @@ -17,23 +20,15 @@ pub use crate::interface::types::{ Refunds, VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, }, }; -pub use types::internals::ZkSyncVmState; -pub use utils::transaction_encoding::TransactionVmExt; - -pub use bootloader_state::BootloaderState; - -pub use vm::Vm; mod bootloader_state; +pub mod constants; mod implementation; mod old_vm; mod oracles; +#[cfg(test)] +mod tests; pub(crate) mod tracers; mod types; -mod vm; - -pub mod constants; pub mod utils; - -#[cfg(test)] -mod tests; +mod vm; diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs index 4174d9f4f17..8e7f4d447b4 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/event_sink.rs @@ -1,9 +1,6 @@ -use crate::vm_latest::old_vm::{ - history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, - oracles::OracleWithHistory, -}; -use itertools::Itertools; use std::collections::HashMap; + +use itertools::Itertools; use zk_evm_1_4_0::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -14,6 +11,11 @@ use zk_evm_1_4_0::{ }; use zksync_types::U256; +use crate::vm_latest::old_vm::{ + history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, + oracles::OracleWithHistory, +}; + #[derive(Debug, Clone, PartialEq, Default)] pub struct InMemoryEventSink { frames_stack: AppDataFrameManagerWithHistory, H>, diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/history_recorder.rs index 7c0490044d6..2253831b745 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/history_recorder.rs @@ -5,7 +5,6 @@ use zk_evm_1_4_0::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{StorageKey, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; @@ -771,11 +770,14 @@ impl HistoryRecorder, H> { #[cfg(test)] mod tests { - use crate::vm_latest::old_vm::history_recorder::{HistoryRecorder, MemoryWrapper}; - use crate::vm_latest::HistoryDisabled; use zk_evm_1_4_0::{aux_structures::Timestamp, vm_state::PrimitiveValue}; use zksync_types::U256; + use crate::vm_latest::{ + old_vm::history_recorder::{HistoryRecorder, MemoryWrapper}, + HistoryDisabled, + }; + #[test] fn memory_equality() { let mut a: HistoryRecorder = Default::default(); diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/memory.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/memory.rs index 5694a725d93..5a7592ce965 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/memory.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/memory.rs @@ -1,16 +1,18 @@ -use zk_evm_1_4_0::abstractions::{Memory, MemoryType}; -use zk_evm_1_4_0::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; -use zk_evm_1_4_0::vm_state::PrimitiveValue; -use zk_evm_1_4_0::zkevm_opcode_defs::FatPointer; +use zk_evm_1_4_0::{ + abstractions::{Memory, MemoryType}, + aux_structures::{MemoryPage, MemoryQuery, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; use zksync_types::U256; -use crate::vm_latest::old_vm::history_recorder::{ - FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, - MemoryWrapper, WithHistory, -}; -use crate::vm_latest::old_vm::oracles::OracleWithHistory; -use crate::vm_latest::old_vm::utils::{ - aux_heap_page_from_base, heap_page_from_base, stack_page_from_base, +use crate::vm_latest::old_vm::{ + history_recorder::{ + FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, + MemoryWrapper, WithHistory, + }, + oracles::OracleWithHistory, + utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}, }; #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index c679532fa76..4a718917a21 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -1,23 +1,19 @@ -use std::collections::HashMap; -use std::fmt::Debug; +use std::{collections::HashMap, fmt::Debug}; -use crate::vm_latest::old_vm::history_recorder::{ - HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, -}; - -use zk_evm_1_4_0::abstractions::MemoryType; -use zk_evm_1_4_0::aux_structures::Timestamp; use zk_evm_1_4_0::{ - abstractions::{DecommittmentProcessor, Memory}, - aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, + abstractions::{DecommittmentProcessor, Memory, MemoryType}, + aux_structures::{ + DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, + }, }; - use zksync_state::{ReadStorage, StoragePtr}; use zksync_types::U256; -use zksync_utils::bytecode::bytecode_len_in_words; -use zksync_utils::{bytes_to_be_words, u256_to_h256}; +use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; use super::OracleWithHistory; +use crate::vm_latest::old_vm::history_recorder::{ + HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, +}; /// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is /// used by the VM to 'load' bytecodes into memory. diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/precompile.rs index ed3621fc497..92b88e40fc9 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/precompile.rs @@ -1,14 +1,11 @@ use zk_evm_1_4_0::{ - abstractions::Memory, - abstractions::PrecompileCyclesWitness, - abstractions::PrecompilesProcessor, + abstractions::{Memory, PrecompileCyclesWitness, PrecompilesProcessor}, aux_structures::{LogQuery, MemoryQuery, Timestamp}, zk_evm_abstractions::precompiles::DefaultPrecompilesProcessor, }; -use crate::vm_latest::old_vm::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; - use super::OracleWithHistory; +use crate::vm_latest::old_vm::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; /// Wrap of DefaultPrecompilesProcessor that store queue /// of timestamp when precompiles are called to be executed. diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/utils.rs index afaa19cac87..1dbe82a81d4 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/utils.rs @@ -1,22 +1,19 @@ -use crate::vm_latest::old_vm::memory::SimpleMemory; - -use crate::vm_latest::types::internals::ZkSyncVmState; -use crate::vm_latest::HistoryMode; - -use zk_evm_1_4_0::zkevm_opcode_defs::decoding::{ - AllowedPcOrImm, EncodingModeProduction, VmEncodingMode, -}; -use zk_evm_1_4_0::zkevm_opcode_defs::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; use zk_evm_1_4_0::{ aux_structures::{MemoryPage, Timestamp}, vm_state::PrimitiveValue, - zkevm_opcode_defs::FatPointer, + zkevm_opcode_defs::{ + decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}, + FatPointer, RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; use zksync_state::WriteStorage; use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; - use zksync_types::{Address, U256}; +use crate::vm_latest::{ + old_vm::memory::SimpleMemory, types::internals::ZkSyncVmState, HistoryMode, +}; + #[derive(Debug, Clone)] pub(crate) enum VmExecutionResult { Ok(Vec), diff --git a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs index beec2fa086f..60516083d29 100644 --- a/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/oracles/storage.rs @@ -1,28 +1,30 @@ use std::collections::HashMap; -use crate::vm_latest::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, -}; -use crate::vm_latest::old_vm::oracles::OracleWithHistory; - -use zk_evm_1_4_0::abstractions::RefundedAmounts; -use zk_evm_1_4_0::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; use zk_evm_1_4_0::{ - abstractions::{RefundType, Storage as VmStorageOracle}, + abstractions::{RefundType, RefundedAmounts, Storage as VmStorageOracle}, aux_structures::{LogQuery, Timestamp}, + zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; - use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_eth_balance; -use zksync_types::writes::compression::compress_with_best_strategy; -use zksync_types::writes::{BYTES_PER_DERIVED_KEY, BYTES_PER_ENUMERATION_INDEX}; use zksync_types::{ + utils::storage_key_for_eth_balance, + writes::{ + compression::compress_with_best_strategy, BYTES_PER_DERIVED_KEY, + BYTES_PER_ENUMERATION_INDEX, + }, AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; +use crate::vm_latest::old_vm::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, + }, + oracles::OracleWithHistory, +}; + // While the storage does not support different shards, it was decided to write the // code of the StorageOracle with the shard parameters in mind. pub(crate) fn triplet_to_storage_key(_shard_id: u8, address: Address, key: U256) -> StorageKey { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs index b2763f358be..78fb964f722 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs @@ -1,15 +1,17 @@ use zksync_types::U256; -use crate::interface::{Halt, TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_latest::tests::tester::VmTesterBuilder; -use crate::vm_latest::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, +use crate::{ + interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + constants::BOOTLOADER_HEAP_PAGE, + tests::{ + tester::VmTesterBuilder, + utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, + }, + HistoryEnabled, + }, }; -use crate::interface::ExecutionResult; -use crate::vm_latest::HistoryEnabled; - #[test] fn test_dummy_bootloader() { let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs index e574a881d91..a0c10addff9 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs @@ -1,10 +1,16 @@ use zksync_types::event::extract_long_l2_to_l1_messages; use zksync_utils::bytecode::compress_bytecode; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_latest::tests::utils::read_test_contract; -use crate::vm_latest::HistoryEnabled; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, + HistoryEnabled, + }, +}; #[test] fn test_bytecode_publishing() { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index e5b1ce15fcd..2f8f37e081b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -1,13 +1,21 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::tracers::CallTracer; -use crate::vm_latest::constants::BLOCK_GAS_LIMIT; -use crate::vm_latest::tests::tester::VmTesterBuilder; -use crate::vm_latest::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_latest::{HistoryEnabled, ToTracerPointer}; -use once_cell::sync::OnceCell; use std::sync::Arc; + +use once_cell::sync::OnceCell; use zksync_types::{Address, Execute}; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + tracers::CallTracer, + vm_latest::{ + constants::BLOCK_GAS_LIMIT, + tests::{ + tester::VmTesterBuilder, + utils::{read_max_depth_contract, read_test_contract}, + }, + HistoryEnabled, ToTracerPointer, + }, +}; + // This test is ultra slow, so it's ignored by default. #[test] #[ignore] diff --git a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs index b31e32270d9..7c951e31321 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs @@ -1,13 +1,21 @@ use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; +use zksync_types::{ + get_code_key, get_known_code_key, get_nonce_key, + system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, + AccountTreeId, U256, +}; use zksync_utils::u256_to_h256; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_latest::tests::utils::{get_balance, read_test_contract, verify_required_storage}; -use crate::vm_latest::HistoryEnabled; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::{get_balance, read_test_contract, verify_required_storage}, + }, + HistoryEnabled, + }, +}; #[test] fn test_default_aa_interaction() { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index 6bebffeacee..533d9ec660e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -1,13 +1,13 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; +use zksync_types::{fee::Fee, Execute}; -use crate::vm_latest::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, +use crate::{ + interface::{TxExecutionMode, VmInterface}, + vm_latest::{ + constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, + tests::tester::VmTesterBuilder, + HistoryDisabled, + }, }; -use crate::vm_latest::tests::tester::VmTesterBuilder; - -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; /// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index 688711d5a9c..b82057bef8b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,19 +1,23 @@ use std::collections::{HashMap, HashSet}; use itertools::Itertools; - -use crate::HistoryMode; use zksync_state::WriteStorage; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_latest::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_latest::{HistoryDisabled, Vm}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; + +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + tests::{ + tester::{TxType, VmTesterBuilder}, + utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, + }, + HistoryDisabled, Vm, + }, + HistoryMode, +}; #[test] fn test_get_used_contracts() { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs index d40f9109dcb..d5a6679502b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs @@ -1,10 +1,16 @@ use zksync_state::ReadStorage; use zksync_types::get_nonce_key; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_latest::tests::utils::read_test_contract; -use crate::vm_latest::HistoryDisabled; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + tests::{ + tester::{Account, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, + HistoryDisabled, + }, +}; #[test] fn test_is_write_initial_behaviour() { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 5c1bdbad58a..4f61dd90fad 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,16 +1,23 @@ use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; +use zksync_types::{ + get_code_key, get_known_code_key, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + storage_writes_deduplicator::StorageWritesDeduplicator, + U256, +}; use zksync_utils::u256_to_h256; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_latest::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + tests::{ + tester::{TxType, VmTesterBuilder}, + utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, + }, + types::internals::TransactionData, + HistoryEnabled, + }, }; -use crate::vm_latest::types::internals::TransactionData; -use crate::vm_latest::HistoryEnabled; #[test] fn test_l1_tx_execution() { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs index 4fd4e0207d4..81939d402ff 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs @@ -3,23 +3,11 @@ //! The description for each of the tests can be found in the corresponding `.yul` file. //! -use crate::interface::{ - ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, -}; -use crate::vm_latest::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_latest::tests::tester::default_l1_batch; -use crate::vm_latest::tests::tester::VmTesterBuilder; -use crate::vm_latest::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_latest::{HistoryEnabled, Vm}; -use crate::HistoryMode; use zk_evm_1_4_0::aux_structures::Timestamp; use zksync_state::WriteStorage; use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::block::pack_block_info; use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, + block::{legacy_miniblock_hash, miniblock_hash, pack_block_info}, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, @@ -27,6 +15,20 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u256, u256_to_h256}; +use crate::{ + interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + constants::{ + BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, + }, + tests::tester::{default_l1_batch, VmTesterBuilder}, + utils::l2_blocks::get_l2_block_hash_key, + HistoryEnabled, Vm, + }, + HistoryMode, +}; + fn get_l1_noop() -> Transaction { Transaction { common_data: ExecuteTransactionCommon::L1(L1TxCommonData { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index dedaae5c933..2de5e23bdd2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -1,12 +1,19 @@ use zksync_types::{Execute, Nonce}; -use crate::interface::VmRevertReason; -use crate::interface::{ExecutionResult, Halt, TxRevertReason, VmExecutionMode}; -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_latest::tests::utils::read_nonce_holder_tester; -use crate::vm_latest::types::internals::TransactionData; -use crate::vm_latest::HistoryEnabled; +use crate::{ + interface::{ + ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, + VmRevertReason, + }, + vm_latest::{ + tests::{ + tester::{Account, VmTesterBuilder}, + utils::read_nonce_holder_tester, + }, + types::internals::TransactionData, + HistoryEnabled, + }, +}; pub enum NonceHolderTestMode { SetValueUnderNonce, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index 9d4afcdb317..dc1f4fe55bc 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -1,9 +1,14 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_latest::tests::utils::read_test_contract; - -use crate::vm_latest::types::internals::TransactionData; -use crate::vm_latest::HistoryEnabled; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + tests::{ + tester::{DeployContractsTx, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, + types::internals::TransactionData, + HistoryEnabled, + }, +}; #[test] fn test_predetermined_refunded_gas() { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index 1ad6f351206..c03e5fe6421 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,22 +1,24 @@ use std::convert::TryInto; use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; +use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; use zksync_types::{ - AccountTreeId, Address, Eip712Domain, Execute, L2ChainId, Nonce, Transaction, U256, + fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, + L2ChainId, Nonce, Transaction, U256, }; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_latest::tests::utils::read_many_owners_custom_account_contract; -use crate::vm_latest::HistoryDisabled; +use crate::{ + interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + vm_latest::{ + tests::{ + tester::{Account, VmTester, VmTesterBuilder}, + utils::read_many_owners_custom_account_contract, + }, + HistoryDisabled, + }, +}; impl VmTester { pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 343d30dcd95..23c1ab49ad9 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,21 +1,22 @@ use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - +use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; use zksync_state::WriteStorage; use zksync_types::{get_nonce_key, Execute, U256}; -use crate::interface::dyn_tracers::vm_1_4_0::DynTracer; -use crate::interface::tracer::{TracerExecutionStatus, TracerExecutionStopReason}; -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled}; -use crate::vm_latest::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_latest::tests::utils::read_test_contract; -use crate::vm_latest::types::internals::ZkSyncVmState; -use crate::vm_latest::{ - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, +use crate::{ + interface::{ + dyn_tracers::vm_1_4_0::DynTracer, + tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, + }, + vm_latest::{ + tests::{ + tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, + utils::read_test_contract, + }, + types::internals::ZkSyncVmState, + BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, + }, }; #[test] diff --git a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs index 9f0c855b459..a864538524a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs @@ -1,6 +1,10 @@ -use crate::interface::{ExecutionResult, VmExecutionMode, VmInterface}; -use crate::vm_latest::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_latest::HistoryDisabled; +use crate::{ + interface::{ExecutionResult, VmExecutionMode, VmInterface}, + vm_latest::{ + tests::tester::{TxType, VmTesterBuilder}, + HistoryDisabled, + }, +}; #[test] fn estimate_fee() { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs index 4767f934479..b82e995c2db 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs @@ -1,15 +1,19 @@ use std::collections::HashMap; -use zk_evm_1_4_0::aux_structures::Timestamp; -use zk_evm_1_4_0::vm_state::VmLocalState; +use zk_evm_1_4_0::{aux_structures::Timestamp, vm_state::VmLocalState}; use zksync_state::WriteStorage; - use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; -use crate::vm_latest::old_vm::event_sink::InMemoryEventSink; -use crate::vm_latest::old_vm::history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}; -use crate::vm_latest::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; -use crate::HistoryMode as CommonHistoryMode; +use crate::{ + vm_latest::{ + old_vm::{ + event_sink::InMemoryEventSink, + history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, + }, + HistoryEnabled, HistoryMode, SimpleMemory, Vm, + }, + HistoryMode as CommonHistoryMode, +}; #[derive(Clone, Debug)] pub(crate) struct ModifiedKeysMap(HashMap); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs index 6fdfa7955e0..114f80d1a21 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs @@ -1,12 +1,12 @@ use zksync_types::{ExecuteTransactionCommon, Transaction}; -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, +use crate::{ + interface::{ + CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, + }, + vm_latest::{tests::tester::vm_tester::VmTester, HistoryEnabled}, }; -use crate::interface::{VmInterface, VmInterfaceHistoryEnabled, VmRevertReason}; -use crate::vm_latest::tests::tester::vm_tester::VmTester; -use crate::vm_latest::HistoryEnabled; #[derive(Debug, Clone)] pub(crate) enum TxModifier { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs index cbf009d5b02..6218a391824 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs @@ -1,28 +1,31 @@ use std::marker::PhantomData; + use zksync_contracts::BaseSystemContracts; use zksync_state::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, - ProtocolVersionId, U256, + block::legacy_miniblock_hash, + get_code_key, get_is_account_key, + helpers::unix_timestamp_ms, + utils::{deployed_address_create, storage_key_for_eth_balance}, + Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, }; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_latest::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use crate::{ + interface::{ + L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, + }, + vm_latest::{ + constants::BLOCK_GAS_LIMIT, + tests::{ + tester::{Account, TxType}, + utils::read_test_contract, + }, + utils::l2_blocks::load_last_l2_block, + Vm, + }, + HistoryMode, }; -use crate::vm_latest::tests::tester::Account; -use crate::vm_latest::tests::tester::TxType; -use crate::vm_latest::tests::utils::read_test_contract; -use crate::vm_latest::utils::l2_blocks::load_last_l2_block; -use crate::vm_latest::Vm; -use crate::HistoryMode; pub(crate) type InMemoryStorageView = StorageView; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index f55eadecde6..f02de899b03 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -1,12 +1,15 @@ use zksync_types::{Execute, H160}; -use crate::interface::TxExecutionMode; -use crate::interface::{TxRevertReason, VmRevertReason}; -use crate::vm_latest::tests::tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}; -use crate::vm_latest::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, +use crate::{ + interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, + vm_latest::{ + tests::{ + tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, + utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, + }, + HistoryEnabled, + }, }; -use crate::vm_latest::HistoryEnabled; #[test] fn test_tracing_of_execution_errors() { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index 65780114e9a..b5c493ca707 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -1,28 +1,28 @@ use zk_evm_1_4_0::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; use zksync_state::WriteStorage; use zksync_test_account::TxType; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, +use zksync_types::{ + ethabi::{Contract, Token}, + get_code_key, get_known_code_key, + protocol_version::ProtocolUpgradeTxCommonData, + Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, + CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, + REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, }; -use crate::vm_latest::tests::tester::VmTesterBuilder; -use crate::vm_latest::tests::utils::verify_required_storage; -use crate::vm_latest::HistoryEnabled; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; use super::utils::read_test_contract; +use crate::{ + interface::{ + ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, + VmInterfaceHistoryEnabled, + }, + vm_latest::{ + tests::{tester::VmTesterBuilder, utils::verify_required_storage}, + HistoryEnabled, + }, +}; /// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: /// - This transaction must be the only one in block diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs index e30f0b9f39a..90bc8c2aef2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs @@ -1,18 +1,17 @@ use ethabi::Contract; use once_cell::sync::Lazy; - -use crate::vm_latest::tests::tester::InMemoryStorageView; use zksync_contracts::{ load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, }; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_types::{ + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use crate::vm_latest::types::internals::ZkSyncVmState; -use crate::vm_latest::HistoryMode; +use crate::vm_latest::{ + tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, +}; pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs index 9582e6e1053..0e18d989af6 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs @@ -1,8 +1,8 @@ -use std::fmt::{Debug, Formatter}; -use std::marker::PhantomData; +use std::{ + fmt::{Debug, Formatter}, + marker::PhantomData, +}; -use crate::interface::tracer::{TracerExecutionStopReason, VmExecutionStopReason}; -use crate::interface::{Halt, VmExecutionMode}; use zk_evm_1_4_0::{ tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, @@ -14,23 +14,30 @@ use zk_evm_1_4_0::{ use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::Timestamp; -use crate::interface::traits::tracers::dyn_tracers::vm_1_4_0::DynTracer; -use crate::interface::types::tracer::TracerExecutionStatus; -use crate::vm_latest::bootloader_state::utils::apply_l2_block; -use crate::vm_latest::bootloader_state::BootloaderState; -use crate::vm_latest::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_latest::old_vm::history_recorder::HistoryMode; -use crate::vm_latest::old_vm::memory::SimpleMemory; -use crate::vm_latest::tracers::dispatcher::TracerDispatcher; -use crate::vm_latest::tracers::utils::{ - computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, - print_debug_if_needed, VmHook, -}; -use crate::vm_latest::tracers::{RefundsTracer, ResultTracer}; -use crate::vm_latest::types::internals::ZkSyncVmState; -use crate::vm_latest::VmTracer; - use super::PubdataTracer; +use crate::{ + interface::{ + tracer::{TracerExecutionStopReason, VmExecutionStopReason}, + traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, + types::tracer::TracerExecutionStatus, + Halt, VmExecutionMode, + }, + vm_latest::{ + bootloader_state::{utils::apply_l2_block, BootloaderState}, + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, + tracers::{ + dispatcher::TracerDispatcher, + utils::{ + computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, + print_debug_if_needed, VmHook, + }, + RefundsTracer, ResultTracer, + }, + types::internals::ZkSyncVmState, + VmTracer, + }, +}; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. pub(crate) struct DefaultExecutionTracer { diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/dispatcher.rs b/core/lib/multivm/src/versions/vm_latest/tracers/dispatcher.rs index b75277670dc..5ee5c8ab0c1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/dispatcher.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/dispatcher.rs @@ -1,13 +1,18 @@ -use crate::interface::dyn_tracers::vm_1_4_0::DynTracer; -use crate::interface::tracer::{TracerExecutionStatus, VmExecutionStopReason}; -use crate::vm_latest::{ - BootloaderState, HistoryMode, SimpleMemory, TracerPointer, VmTracer, ZkSyncVmState, -}; use zk_evm_1_4_0::tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, VmLocalStateData, }; use zksync_state::{StoragePtr, WriteStorage}; +use crate::{ + interface::{ + dyn_tracers::vm_1_4_0::DynTracer, + tracer::{TracerExecutionStatus, VmExecutionStopReason}, + }, + vm_latest::{ + BootloaderState, HistoryMode, SimpleMemory, TracerPointer, VmTracer, ZkSyncVmState, + }, +}; + /// Tracer dispatcher is a tracer that can dispatch calls to multiple tracers. pub struct TracerDispatcher { tracers: Vec>, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index 59a9d8eb452..388b5ef209e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -1,9 +1,9 @@ use std::marker::PhantomData; + use zk_evm_1_4_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{ event::{ @@ -14,24 +14,24 @@ use zksync_types::{ zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS, }; -use zksync_utils::u256_to_h256; -use zksync_utils::{h256_to_u256, u256_to_bytes_be}; +use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; -use crate::vm_latest::{ - old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, - types::internals::pubdata::PubdataInput, -}; -use crate::{vm_latest::constants::BOOTLOADER_HEAP_PAGE, vm_latest::StorageOracle}; - -use crate::interface::dyn_tracers::vm_1_4_0::DynTracer; -use crate::interface::tracer::{TracerExecutionStatus, TracerExecutionStopReason}; -use crate::interface::types::inputs::L1BatchEnv; -use crate::vm_latest::tracers::{traits::VmTracer, utils::VmHook}; -use crate::vm_latest::types::internals::ZkSyncVmState; -use crate::vm_latest::utils::logs::collect_events_and_l1_system_logs_after_timestamp; use crate::{ - interface::VmExecutionMode, - vm_latest::bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, + interface::{ + dyn_tracers::vm_1_4_0::DynTracer, + tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + types::inputs::L1BatchEnv, + VmExecutionMode, + }, + vm_latest::{ + bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, + tracers::{traits::VmTracer, utils::VmHook}, + types::internals::{pubdata::PubdataInput, ZkSyncVmState}, + utils::logs::collect_events_and_l1_system_logs_after_timestamp, + StorageOracle, + }, }; /// Tracer responsible for collecting information about refunds. diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs index f3e6c336684..e852fba1dac 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/refunds.rs @@ -1,9 +1,6 @@ use std::marker::PhantomData; -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; -use crate::interface::traits::tracers::dyn_tracers::vm_1_4_0::DynTracer; -use crate::interface::types::tracer::TracerExecutionStatus; -use crate::interface::{L1BatchEnv, Refunds}; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zk_evm_1_4_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, @@ -16,24 +13,29 @@ use zksync_types::{ l2_to_l1_log::L2ToL1Log, L1BatchNumber, U256, }; -use zksync_utils::bytecode::bytecode_len_in_bytes; -use zksync_utils::{ceil_div_u256, u256_to_h256}; - -use crate::vm_latest::constants::{ - BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_latest::old_vm::{ - events::merge_events, history_recorder::HistoryMode, memory::SimpleMemory, - utils::eth_price_per_pubdata_byte, -}; - -use crate::vm_latest::bootloader_state::BootloaderState; -use crate::vm_latest::tracers::utils::gas_spent_on_bytecodes_and_long_messages_this_opcode; -use crate::vm_latest::tracers::{ - traits::VmTracer, - utils::{get_vm_hook_params, VmHook}, +use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; + +use crate::{ + interface::{ + traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, types::tracer::TracerExecutionStatus, + L1BatchEnv, Refunds, + }, + vm_latest::{ + bootloader_state::BootloaderState, + constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}, + old_vm::{ + events::merge_events, history_recorder::HistoryMode, memory::SimpleMemory, + utils::eth_price_per_pubdata_byte, + }, + tracers::{ + traits::VmTracer, + utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, get_vm_hook_params, VmHook, + }, + }, + types::internals::ZkSyncVmState, + }, }; -use crate::vm_latest::types::internals::ZkSyncVmState; /// Tracer responsible for collecting information about refunds. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/result_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/result_tracer.rs index 7e6e08a0a49..b3412587725 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/result_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/result_tracer.rs @@ -1,27 +1,29 @@ use std::marker::PhantomData; + use zk_evm_1_4_0::{ tracing::{AfterDecodingData, BeforeExecutionData, VmLocalStateData}, vm_state::{ErrorFlags, VmLocalState}, zkevm_opcode_defs::FatPointer, }; use zksync_state::{StoragePtr, WriteStorage}; - -use crate::interface::{ - tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, - types::tracer::TracerExecutionStopReason, ExecutionResult, Halt, TxRevertReason, - VmExecutionMode, VmRevertReason, -}; use zksync_types::U256; -use crate::vm_latest::{ - constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}, - old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, - tracers::{ - traits::VmTracer, - utils::{get_vm_hook_params, read_pointer, VmHook}, +use crate::{ + interface::{ + tracer::VmExecutionStopReason, traits::tracers::dyn_tracers::vm_1_4_0::DynTracer, + types::tracer::TracerExecutionStopReason, ExecutionResult, Halt, TxRevertReason, + VmExecutionMode, VmRevertReason, + }, + vm_latest::{ + constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}, + old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, + tracers::{ + traits::VmTracer, + utils::{get_vm_hook_params, read_pointer, VmHook}, + }, + types::internals::ZkSyncVmState, + BootloaderState, HistoryMode, SimpleMemory, }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryMode, SimpleMemory, }; #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/traits.rs b/core/lib/multivm/src/versions/vm_latest/tracers/traits.rs index a3970541bac..68307b3f286 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/traits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/traits.rs @@ -1,11 +1,16 @@ -use crate::interface::dyn_tracers::vm_1_4_0::DynTracer; -use crate::interface::tracer::{TracerExecutionStatus, VmExecutionStopReason}; use zksync_state::WriteStorage; -use crate::vm_latest::bootloader_state::BootloaderState; -use crate::vm_latest::old_vm::history_recorder::HistoryMode; -use crate::vm_latest::old_vm::memory::SimpleMemory; -use crate::vm_latest::types::internals::ZkSyncVmState; +use crate::{ + interface::{ + dyn_tracers::vm_1_4_0::DynTracer, + tracer::{TracerExecutionStatus, VmExecutionStopReason}, + }, + vm_latest::{ + bootloader_state::BootloaderState, + old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, + types::internals::ZkSyncVmState, + }, +}; pub type TracerPointer = Box>; diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index 52ff84f8c3c..93710586fda 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -1,10 +1,10 @@ -use zk_evm_1_4_0::aux_structures::MemoryPage; -use zk_evm_1_4_0::zkevm_opcode_defs::{FarCallABI, FarCallForwardPageType}; use zk_evm_1_4_0::{ + aux_structures::MemoryPage, tracing::{BeforeExecutionData, VmLocalStateData}, - zkevm_opcode_defs::{FatPointer, LogOpcode, Opcode, UMAOpcode}, + zkevm_opcode_defs::{ + FarCallABI, FarCallForwardPageType, FatPointer, LogOpcode, Opcode, UMAOpcode, + }, }; - use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, @@ -12,12 +12,16 @@ use zksync_system_constants::{ use zksync_types::U256; use zksync_utils::u256_to_h256; -use crate::vm_latest::constants::{ - BOOTLOADER_HEAP_PAGE, VM_HOOK_PARAMS_COUNT, VM_HOOK_PARAMS_START_POSITION, VM_HOOK_POSITION, +use crate::vm_latest::{ + constants::{ + BOOTLOADER_HEAP_PAGE, VM_HOOK_PARAMS_COUNT, VM_HOOK_PARAMS_START_POSITION, VM_HOOK_POSITION, + }, + old_vm::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + utils::{aux_heap_page_from_base, heap_page_from_base}, + }, }; -use crate::vm_latest::old_vm::history_recorder::HistoryMode; -use crate::vm_latest::old_vm::memory::SimpleMemory; -use crate::vm_latest::old_vm::utils::{aux_heap_page_from_base, heap_page_from_base}; #[derive(Clone, Debug, Copy)] pub(crate) enum VmHook { diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs index e246bceeac5..fd00b333c72 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs @@ -1,5 +1,5 @@ -use zksync_types::ethabi; use zksync_types::{ + ethabi, event::L1MessengerL2ToL1Log, writes::{compress_state_diffs, StateDiffRecord}, }; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 3c7b9bcac03..f5b97ca9793 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -1,15 +1,15 @@ use std::convert::TryInto; -use zksync_types::ethabi::{encode, Address, Token}; -use zksync_types::fee::{encoding_len, Fee}; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::{PaymasterParams, TransactionRequest}; + use zksync_types::{ - l2::TransactionType, Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, - Nonce, Transaction, H256, U256, + ethabi::{encode, Address, Token}, + fee::{encoding_len, Fee}, + l1::is_l1_tx_type, + l2::{L2Tx, TransactionType}, + transaction_request::{PaymasterParams, TransactionRequest}, + Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, + U256, }; -use zksync_utils::address_to_h256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; use crate::vm_latest::utils::overhead::{get_amortized_overhead, OverheadCoefficients}; @@ -303,9 +303,10 @@ impl TryInto for TransactionData { #[cfg(test)] mod tests { - use super::*; use zksync_types::fee::encoding_len; + use super::*; + #[test] fn test_consistency_with_encoding_length() { let transaction = TransactionData { diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index 0c519a324c0..f65785dcfe5 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -1,34 +1,40 @@ use zk_evm_1_4_0::{ - aux_structures::MemoryPage, - aux_structures::Timestamp, + aux_structures::{MemoryPage, Timestamp}, block_properties::BlockProperties, vm_state::{CallStackEntry, PrimitiveValue, VmState}, witness_trace::DummyTracer, zkevm_opcode_defs::{ system_params::{BOOTLOADER_MAX_MEMORY, INITIAL_FRAME_FORMAL_EH_LOCATION}, - FatPointer, BOOTLOADER_CALLDATA_PAGE, + FatPointer, BOOTLOADER_BASE_PAGE, BOOTLOADER_CALLDATA_PAGE, BOOTLOADER_CODE_PAGE, + STARTING_BASE_PAGE, STARTING_TIMESTAMP, }, }; - -use crate::interface::{L1BatchEnv, L2Block, SystemEnv}; -use zk_evm_1_4_0::zkevm_opcode_defs::{ - BOOTLOADER_BASE_PAGE, BOOTLOADER_CODE_PAGE, STARTING_BASE_PAGE, STARTING_TIMESTAMP, -}; use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::{zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, MiniblockNumber}; +use zksync_types::{ + block::legacy_miniblock_hash, zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, + MiniblockNumber, +}; use zksync_utils::h256_to_u256; -use crate::vm_latest::bootloader_state::BootloaderState; -use crate::vm_latest::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_latest::old_vm::{ - event_sink::InMemoryEventSink, history_recorder::HistoryMode, memory::SimpleMemory, - oracles::decommitter::DecommitterOracle, oracles::precompile::PrecompilesProcessorWithHistory, +use crate::{ + interface::{L1BatchEnv, L2Block, SystemEnv}, + vm_latest::{ + bootloader_state::BootloaderState, + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{ + event_sink::InMemoryEventSink, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, + }, + }, + oracles::storage::StorageOracle, + types::l1_batch::bootloader_initial_memory, + utils::l2_blocks::{assert_next_block, load_last_l2_block}, + }, }; -use crate::vm_latest::oracles::storage::StorageOracle; -use crate::vm_latest::types::l1_batch::bootloader_initial_memory; -use crate::vm_latest::utils::l2_blocks::{assert_next_block, load_last_l2_block}; pub type ZkSyncVmState = VmState< StorageOracle, diff --git a/core/lib/multivm/src/versions/vm_latest/types/l1_batch.rs b/core/lib/multivm/src/versions/vm_latest/types/l1_batch.rs index 631f1436cc3..6f16e95f8d7 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/l1_batch.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/l1_batch.rs @@ -1,7 +1,8 @@ -use crate::interface::L1BatchEnv; use zksync_types::U256; use zksync_utils::{address_to_u256, h256_to_u256}; +use crate::interface::L1BatchEnv; + const OPERATOR_ADDRESS_SLOT: usize = 0; const PREV_BLOCK_HASH_SLOT: usize = 1; const NEW_BLOCK_TIMESTAMP_SLOT: usize = 2; diff --git a/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs index 3d5f58094e0..5dd26c4c027 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/l2_blocks.rs @@ -1,15 +1,17 @@ -use crate::interface::{L2Block, L2BlockEnv}; use zksync_state::{ReadStorage, StoragePtr}; use zksync_system_constants::{ SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; -use zksync_types::block::unpack_block_info; -use zksync_types::web3::signing::keccak256; -use zksync_types::{AccountTreeId, MiniblockNumber, StorageKey, H256, U256}; +use zksync_types::{ + block::unpack_block_info, web3::signing::keccak256, AccountTreeId, MiniblockNumber, StorageKey, + H256, U256, +}; use zksync_utils::{h256_to_u256, u256_to_h256}; +use crate::interface::{L2Block, L2BlockEnv}; + pub(crate) fn get_l2_block_hash_key(block_number: u32) -> StorageKey { let position = h256_to_u256(SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION) + U256::from(block_number % SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES); diff --git a/core/lib/multivm/src/versions/vm_latest/utils/logs.rs b/core/lib/multivm/src/versions/vm_latest/utils/logs.rs index a2b7f548684..b7fa07956a9 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/logs.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/logs.rs @@ -2,8 +2,11 @@ use zksync_state::WriteStorage; use zksync_types::{l2_to_l1_log::L2ToL1Log, Timestamp, VmEvent}; use crate::{ - interface::L1BatchEnv, vm_latest::old_vm::events::merge_events, - vm_latest::old_vm::history_recorder::HistoryMode, vm_latest::types::internals::ZkSyncVmState, + interface::L1BatchEnv, + vm_latest::{ + old_vm::{events::merge_events, history_recorder::HistoryMode}, + types::internals::ZkSyncVmState, + }, }; pub(crate) fn collect_events_and_l1_system_logs_after_timestamp( diff --git a/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs b/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs index c977267db8f..38aaede8d4b 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/overhead.rs @@ -1,12 +1,12 @@ -use crate::vm_latest::constants::{ - BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, -}; use zk_evm_1_4_0::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_system_constants::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::U256; +use zksync_types::{l1::is_l1_tx_type, U256}; use zksync_utils::ceil_div_u256; +use crate::vm_latest::constants::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, +}; + /// Derives the overhead for processing transactions in a block. pub fn derive_overhead( gas_limit: u32, diff --git a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs index 9aecef6367e..86c49a3eb15 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs @@ -1,6 +1,7 @@ -use crate::vm_latest::types::internals::TransactionData; use zksync_types::Transaction; +use crate::vm_latest::types::internals::TransactionData; + /// Extension for transactions, specific for VM. Required for bypassing the orphan rule pub trait TransactionVmExt { /// Get the size of the transaction in tokens. diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 20d74e39093..159f50a89ee 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,21 +1,25 @@ -use crate::HistoryMode; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}; -use zksync_types::{event::extract_l2tol1logs_from_l1_messenger, Transaction}; +use zksync_types::{ + event::extract_l2tol1logs_from_l1_messenger, + l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, + Transaction, +}; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_latest::old_vm::events::merge_events; -use crate::vm_latest::old_vm::history_recorder::HistoryEnabled; - -use crate::interface::{ - BootloaderMemory, CurrentExecutionState, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmInterfaceHistoryEnabled, VmMemoryMetrics, +use crate::{ + interface::{ + BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + vm_latest::{ + bootloader_state::BootloaderState, + old_vm::{events::merge_events, history_recorder::HistoryEnabled}, + tracers::dispatcher::TracerDispatcher, + types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, + }, + HistoryMode, }; -use crate::interface::{BytecodeCompressionError, VmInterface}; -use crate::vm_latest::bootloader_state::BootloaderState; -use crate::vm_latest::tracers::dispatcher::TracerDispatcher; - -use crate::vm_latest::types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}; /// Main entry point for Virtual Machine integration. /// The instance should process only one l1 batch diff --git a/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs index 5d1a075f6a5..7cfa8708fc3 100644 --- a/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_m5/errors/vm_revert_reason.rs @@ -1,5 +1,7 @@ -use std::convert::TryFrom; -use std::fmt::{Debug, Display}; +use std::{ + convert::TryFrom, + fmt::{Debug, Display}, +}; use zksync_types::U256; diff --git a/core/lib/multivm/src/versions/vm_m5/event_sink.rs b/core/lib/multivm/src/versions/vm_m5/event_sink.rs index 80ceb8baeaa..0bb1ee498f6 100644 --- a/core/lib/multivm/src/versions/vm_m5/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_m5/event_sink.rs @@ -1,5 +1,5 @@ -use crate::vm_m5::{oracles::OracleWithHistory, utils::collect_log_queries_after_timestamp}; use std::collections::HashMap; + use zk_evm_1_3_1::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -9,7 +9,10 @@ use zk_evm_1_3_1::{ }, }; -use crate::vm_m5::history_recorder::AppDataFrameManagerWithHistory; +use crate::vm_m5::{ + history_recorder::AppDataFrameManagerWithHistory, oracles::OracleWithHistory, + utils::collect_log_queries_after_timestamp, +}; #[derive(Debug, Default, Clone, PartialEq)] pub struct InMemoryEventSink { diff --git a/core/lib/multivm/src/versions/vm_m5/history_recorder.rs b/core/lib/multivm/src/versions/vm_m5/history_recorder.rs index 896b2261e9c..7a158b4dea7 100644 --- a/core/lib/multivm/src/versions/vm_m5/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_m5/history_recorder.rs @@ -3,18 +3,17 @@ use std::{ hash::{BuildHasherDefault, Hash, Hasher}, }; -use crate::vm_m5::storage::{Storage, StoragePtr}; - use zk_evm_1_3_1::{ aux_structures::Timestamp, reference_impls::event_sink::ApplicationData, vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; - use zksync_types::{StorageKey, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; +use crate::vm_m5::storage::{Storage, StoragePtr}; + pub type AppDataFrameManagerWithHistory = FrameManagerWithHistory>; pub type MemoryWithHistory = HistoryRecorder; pub type FrameManagerWithHistory = HistoryRecorder>; diff --git a/core/lib/multivm/src/versions/vm_m5/memory.rs b/core/lib/multivm/src/versions/vm_m5/memory.rs index 2c0b317a798..dc58450263e 100644 --- a/core/lib/multivm/src/versions/vm_m5/memory.rs +++ b/core/lib/multivm/src/versions/vm_m5/memory.rs @@ -1,12 +1,16 @@ -use zk_evm_1_3_1::abstractions::{Memory, MemoryType, MEMORY_CELLS_OTHER_PAGES}; -use zk_evm_1_3_1::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; -use zk_evm_1_3_1::vm_state::PrimitiveValue; -use zk_evm_1_3_1::zkevm_opcode_defs::FatPointer; +use zk_evm_1_3_1::{ + abstractions::{Memory, MemoryType, MEMORY_CELLS_OTHER_PAGES}, + aux_structures::{MemoryPage, MemoryQuery, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; use zksync_types::U256; -use crate::vm_m5::history_recorder::{IntFrameManagerWithHistory, MemoryWithHistory}; -use crate::vm_m5::oracles::OracleWithHistory; -use crate::vm_m5::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; +use crate::vm_m5::{ + history_recorder::{IntFrameManagerWithHistory, MemoryWithHistory}, + oracles::OracleWithHistory, + utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}, +}; #[derive(Debug, Default, Clone, PartialEq)] pub struct SimpleMemory { diff --git a/core/lib/multivm/src/versions/vm_m5/mod.rs b/core/lib/multivm/src/versions/vm_m5/mod.rs index d8231ea502d..fc549761e03 100644 --- a/core/lib/multivm/src/versions/vm_m5/mod.rs +++ b/core/lib/multivm/src/versions/vm_m5/mod.rs @@ -1,5 +1,16 @@ #![allow(clippy::derive_partial_eq_without_eq)] +pub use zk_evm_1_3_1; +pub use zksync_types::vm_trace::VmExecutionTrace; + +pub use self::{ + errors::TxRevertReason, + oracle_tools::OracleTools, + oracles::storage::StorageOracle, + vm::Vm, + vm_instance::{VmBlockResult, VmExecutionResult}, +}; + mod bootloader_state; pub mod errors; pub mod event_sink; @@ -12,24 +23,14 @@ mod pubdata_utils; mod refunds; pub mod storage; pub mod test_utils; +#[cfg(test)] +mod tests; pub mod transaction_data; pub mod utils; +mod vm; pub mod vm_instance; pub mod vm_with_bootloader; -#[cfg(test)] -mod tests; -mod vm; - -pub use errors::TxRevertReason; -pub use oracle_tools::OracleTools; -pub use oracles::storage::StorageOracle; -pub use vm::Vm; -pub use vm_instance::VmBlockResult; -pub use vm_instance::VmExecutionResult; -pub use zk_evm_1_3_1; -pub use zksync_types::vm_trace::VmExecutionTrace; - pub type Word = zksync_types::U256; pub const MEMORY_SIZE: usize = 1 << 16; diff --git a/core/lib/multivm/src/versions/vm_m5/oracle_tools.rs b/core/lib/multivm/src/versions/vm_m5/oracle_tools.rs index 4858a23adb6..32930f31cd7 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracle_tools.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracle_tools.rs @@ -1,15 +1,18 @@ -use crate::vm_m5::memory::SimpleMemory; -use crate::vm_m5::vm_instance::MultiVMSubversion; - use std::fmt::Debug; -use crate::vm_m5::event_sink::InMemoryEventSink; -use crate::vm_m5::oracles::decommitter::DecommitterOracle; -use crate::vm_m5::oracles::precompile::PrecompilesProcessorWithHistory; -use crate::vm_m5::oracles::storage::StorageOracle; -use crate::vm_m5::storage::{Storage, StoragePtr}; use zk_evm_1_3_1::witness_trace::DummyTracer; +use crate::vm_m5::{ + event_sink::InMemoryEventSink, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, + }, + storage::{Storage, StoragePtr}, + vm_instance::MultiVMSubversion, +}; + #[derive(Debug)] pub struct OracleTools { pub storage: StorageOracle, diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_m5/oracles/decommitter.rs index 24a18f998df..bc43c72966e 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/decommitter.rs @@ -1,20 +1,19 @@ use std::collections::HashMap; -use crate::vm_m5::history_recorder::HistoryRecorder; -use crate::vm_m5::storage::{Storage, StoragePtr}; - -use zk_evm_1_3_1::abstractions::MemoryType; -use zk_evm_1_3_1::aux_structures::Timestamp; use zk_evm_1_3_1::{ - abstractions::{DecommittmentProcessor, Memory}, - aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, + abstractions::{DecommittmentProcessor, Memory, MemoryType}, + aux_structures::{ + DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, + }, }; - use zksync_types::U256; -use zksync_utils::bytecode::bytecode_len_in_words; -use zksync_utils::{bytes_to_be_words, u256_to_h256}; +use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; use super::OracleWithHistory; +use crate::vm_m5::{ + history_recorder::HistoryRecorder, + storage::{Storage, StoragePtr}, +}; #[derive(Debug)] pub struct DecommitterOracle { diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/mod.rs b/core/lib/multivm/src/versions/vm_m5/oracles/mod.rs index 31686fa70f6..6b821c68e9d 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/mod.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/mod.rs @@ -1,11 +1,10 @@ use zk_evm_1_3_1::aux_structures::Timestamp; -// We will discard RAM as soon as the execution of a tx ends, so -// it is ok for now to use SimpleMemory -pub use zk_evm_1_3_1::reference_impls::memory::SimpleMemory as RamOracle; // All the changes to the events in the DB will be applied after the tx is executed, // so fow now it is fine. pub use zk_evm_1_3_1::reference_impls::event_sink::InMemoryEventSink as EventSinkOracle; - +// We will discard RAM as soon as the execution of a tx ends, so +// it is ok for now to use SimpleMemory +pub use zk_evm_1_3_1::reference_impls::memory::SimpleMemory as RamOracle; pub use zk_evm_1_3_1::testing::simple_tracer::NoopTracer; pub mod decommitter; diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_m5/oracles/precompile.rs index 137a1046d48..41a00b2e8a5 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/precompile.rs @@ -1,14 +1,11 @@ use zk_evm_1_3_1::{ - abstractions::Memory, - abstractions::PrecompileCyclesWitness, - abstractions::PrecompilesProcessor, + abstractions::{Memory, PrecompileCyclesWitness, PrecompilesProcessor}, aux_structures::{LogQuery, MemoryQuery, Timestamp}, precompiles::DefaultPrecompilesProcessor, }; -use crate::vm_m5::history_recorder::HistoryRecorder; - use super::OracleWithHistory; +use crate::vm_m5::history_recorder::HistoryRecorder; /// Wrap of DefaultPrecompilesProcessor that store queue /// of timestamp when precompiles are called to be executed. diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs b/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs index ca2c3ab7514..c81b90f9c9c 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/storage.rs @@ -1,29 +1,28 @@ use std::collections::HashMap; -use crate::vm_m5::storage::{Storage, StoragePtr}; - -use crate::vm_m5::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryRecorder, StorageWrapper, -}; -use crate::vm_m5::vm_instance::MultiVMSubversion; - -use zk_evm_1_3_1::abstractions::RefundedAmounts; -use zk_evm_1_3_1::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; use zk_evm_1_3_1::{ - abstractions::{RefundType, Storage as VmStorageOracle}, + abstractions::{RefundType, RefundedAmounts, Storage as VmStorageOracle}, aux_structures::{LogQuery, Timestamp}, reference_impls::event_sink::ApplicationData, + zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; - -use crate::glue::GlueInto; -use zksync_types::utils::storage_key_for_eth_balance; use zksync_types::{ - AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, - U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, + StorageLogQueryType, BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use super::OracleWithHistory; +use crate::{ + glue::GlueInto, + vm_m5::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryRecorder, StorageWrapper, + }, + storage::{Storage, StoragePtr}, + vm_instance::MultiVMSubversion, + }, +}; // While the storage does not support different shards, it was decided to write the // code of the StorageOracle with the shard parameters in mind. diff --git a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs index a9e3c32786a..ac370f832e4 100644 --- a/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs +++ b/core/lib/multivm/src/versions/vm_m5/oracles/tracer.rs @@ -1,19 +1,8 @@ -use std::fmt::Debug; use std::{ collections::HashSet, - fmt::{self, Display}, + fmt::{self, Debug, Display}, }; -use crate::vm_m5::{ - errors::VmRevertReasonParsingResult, - memory::SimpleMemory, - storage::StoragePtr, - utils::{aux_heap_page_from_base, heap_page_from_base}, - vm_instance::{get_vm_hook_params, VM_HOOK_POSITION}, - vm_with_bootloader::BOOTLOADER_HEAP_PAGE, -}; -// use zk_evm_1_3_1::testing::memory::SimpleMemory; -use crate::vm_m5::storage::Storage; use zk_evm_1_3_1::{ abstractions::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, @@ -26,7 +15,6 @@ use zk_evm_1_3_1::{ LogOpcode, Opcode, RetOpcode, UMAOpcode, }, }; - use zksync_types::{ get_code_key, web3::signing::keccak256, AccountTreeId, Address, StorageKey, ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, H256, @@ -37,6 +25,15 @@ use zksync_utils::{ be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, }; +use crate::vm_m5::{ + errors::VmRevertReasonParsingResult, + memory::SimpleMemory, + storage::{Storage, StoragePtr}, + utils::{aux_heap_page_from_base, heap_page_from_base}, + vm_instance::{get_vm_hook_params, VM_HOOK_POSITION}, + vm_with_bootloader::BOOTLOADER_HEAP_PAGE, +}; + pub trait ExecutionEndTracer: Tracer { // Returns whether the vm execution should stop. fn should_stop_execution(&self) -> bool; diff --git a/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs index 80c1cd2c0e4..63e45edcbb8 100644 --- a/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/pubdata_utils.rs @@ -1,16 +1,21 @@ -use crate::vm_m5::oracles::storage::storage_key_of_log; -use crate::vm_m5::storage::Storage; -use crate::vm_m5::utils::collect_storage_log_queries_after_timestamp; -use crate::vm_m5::vm_instance::VmInstance; use std::collections::HashMap; -use zk_evm_1_3_1::aux_structures::Timestamp; -use crate::glue::GlueInto; -use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; -use zksync_types::zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries; -use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; +use zk_evm_1_3_1::aux_structures::Timestamp; +use zksync_types::{ + event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, + zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, + StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS, +}; use zksync_utils::bytecode::bytecode_len_in_bytes; +use crate::{ + glue::GlueInto, + vm_m5::{ + oracles::storage::storage_key_of_log, storage::Storage, + utils::collect_storage_log_queries_after_timestamp, vm_instance::VmInstance, + }, +}; + impl VmInstance { pub fn pubdata_published(&self, from_timestamp: Timestamp) -> u32 { let storage_writes_pubdata_published = self.pubdata_published_for_writes(from_timestamp); diff --git a/core/lib/multivm/src/versions/vm_m5/refunds.rs b/core/lib/multivm/src/versions/vm_m5/refunds.rs index 8f1b2b44f4d..8e084fd9ee3 100644 --- a/core/lib/multivm/src/versions/vm_m5/refunds.rs +++ b/core/lib/multivm/src/versions/vm_m5/refunds.rs @@ -1,13 +1,13 @@ -use crate::vm_m5::storage::Storage; -use crate::vm_m5::vm_instance::VmInstance; -use crate::vm_m5::vm_with_bootloader::{ - eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET, -}; use zk_evm_1_3_1::aux_structures::Timestamp; - use zksync_types::U256; use zksync_utils::ceil_div_u256; +use crate::vm_m5::{ + storage::Storage, + vm_instance::VmInstance, + vm_with_bootloader::{eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET}, +}; + impl VmInstance { pub(crate) fn tx_body_refund( &self, diff --git a/core/lib/multivm/src/versions/vm_m5/storage.rs b/core/lib/multivm/src/versions/vm_m5/storage.rs index d5f448812ca..deb3501b416 100644 --- a/core/lib/multivm/src/versions/vm_m5/storage.rs +++ b/core/lib/multivm/src/versions/vm_m5/storage.rs @@ -1,7 +1,4 @@ -use std::cell::RefCell; -use std::collections::HashMap; -use std::fmt::Debug; -use std::rc::Rc; +use std::{cell::RefCell, collections::HashMap, fmt::Debug, rc::Rc}; use zksync_state::{ReadStorage, WriteStorage}; use zksync_types::{StorageKey, StorageValue, H256}; diff --git a/core/lib/multivm/src/versions/vm_m5/test_utils.rs b/core/lib/multivm/src/versions/vm_m5/test_utils.rs index 36c1d60dfda..6920e77b8a8 100644 --- a/core/lib/multivm/src/versions/vm_m5/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/test_utils.rs @@ -12,8 +12,10 @@ use itertools::Itertools; use zk_evm_1_3_1::{ aux_structures::Timestamp, reference_impls::event_sink::ApplicationData, vm_state::VmLocalState, }; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; -use zksync_contracts::{deployer_contract, get_loadnext_contract, load_contract}; +use zksync_contracts::{ + deployer_contract, get_loadnext_contract, load_contract, + test_contracts::LoadnextContractExecutionParams, +}; use zksync_types::{ ethabi::{Address, Token}, fee::Fee, @@ -26,13 +28,12 @@ use zksync_utils::{ address_to_h256, bytecode::hash_bytecode, h256_to_account_address, u256_to_h256, }; -use crate::vm_m5::storage::Storage; -use crate::vm_m5::vm_instance::VmInstance; -/// The tests here help us with the testing the VM use crate::vm_m5::{ event_sink::InMemoryEventSink, history_recorder::{FrameManager, HistoryRecorder}, memory::SimpleMemory, + storage::Storage, + vm_instance::VmInstance, }; #[derive(Clone, Debug)] diff --git a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs index 819f22a5324..f150db2ebaa 100644 --- a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs @@ -1,10 +1,13 @@ use zk_evm_1_3_1::zkevm_opcode_defs::system_params::{MAX_PUBDATA_PER_BLOCK, MAX_TX_ERGS_LIMIT}; -use zksync_types::ethabi::{encode, Address, Token}; -use zksync_types::fee::encoding_len; -use zksync_types::MAX_TXS_IN_BLOCK; -use zksync_types::{l2::TransactionType, ExecuteTransactionCommon, Transaction, U256}; -use zksync_utils::{address_to_h256, ceil_div_u256}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_types::{ + ethabi::{encode, Address, Token}, + fee::encoding_len, + l2::TransactionType, + ExecuteTransactionCommon, Transaction, MAX_TXS_IN_BLOCK, U256, +}; +use zksync_utils::{ + address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, ceil_div_u256, h256_to_u256, +}; use crate::vm_m5::vm_with_bootloader::{ BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, diff --git a/core/lib/multivm/src/versions/vm_m5/utils.rs b/core/lib/multivm/src/versions/vm_m5/utils.rs index b8fef994428..09170a8f502 100644 --- a/core/lib/multivm/src/versions/vm_m5/utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/utils.rs @@ -1,10 +1,7 @@ -use crate::vm_m5::{memory::SimpleMemory, vm_with_bootloader::BlockContext}; use once_cell::sync::Lazy; - -use crate::glue::GlueInto; -use zk_evm_1_3_1::block_properties::BlockProperties; use zk_evm_1_3_1::{ aux_structures::{LogQuery, MemoryPage, Timestamp}, + block_properties::BlockProperties, vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; @@ -13,6 +10,11 @@ use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogQuery, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; +use crate::{ + glue::GlueInto, + vm_m5::{memory::SimpleMemory, vm_with_bootloader::BlockContext}, +}; + pub const INITIAL_TIMESTAMP: u32 = 1024; pub const INITIAL_MEMORY_COUNTER: u32 = 2048; pub const INITIAL_CALLDATA_PAGE: u32 = 7; diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 87186bb7f15..67c4f126309 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -1,21 +1,23 @@ -use crate::interface::{ - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, -}; - use zksync_state::StoragePtr; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::{Transaction, VmVersion}; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::glue::history_mode::HistoryMode; -use crate::glue::GlueInto; -use crate::vm_m5::events::merge_events; -use crate::vm_m5::storage::Storage; -use crate::vm_m5::vm_instance::MultiVMSubversion; -use crate::vm_m5::vm_instance::VmInstance; +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + Transaction, VmVersion, +}; +use zksync_utils::{bytecode::CompressedBytecodeInfo, h256_to_u256, u256_to_h256}; + +use crate::{ + glue::{history_mode::HistoryMode, GlueInto}, + interface::{ + BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + vm_m5::{ + events::merge_events, + storage::Storage, + vm_instance::{MultiVMSubversion, VmInstance}, + }, +}; #[derive(Debug)] pub struct Vm { diff --git a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs index 5638ed1c023..99a96ded4d4 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_instance.rs @@ -1,43 +1,52 @@ -use std::convert::TryFrom; -use std::fmt::Debug; - -use zk_evm_1_3_1::aux_structures::Timestamp; -use zk_evm_1_3_1::vm_state::{PrimitiveValue, VmLocalState, VmState}; -use zk_evm_1_3_1::witness_trace::DummyTracer; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{ - AllowedPcOrImm, EncodingModeProduction, VmEncodingMode, +use std::{convert::TryFrom, fmt::Debug}; + +use zk_evm_1_3_1::{ + aux_structures::Timestamp, + vm_state::{PrimitiveValue, VmLocalState, VmState}, + witness_trace::DummyTracer, + zkevm_opcode_defs::{ + decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}, + definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; -use zk_evm_1_3_1::zkevm_opcode_defs::definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; use zksync_system_constants::MAX_TXS_IN_BLOCK; - -use crate::glue::GlueInto; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::tx::tx_execution_info::TxExecutionStatus; -use zksync_types::vm_trace::VmExecutionTrace; -use zksync_types::{L1BatchNumber, StorageLogQuery, VmEvent, U256}; - -use crate::interface::types::outputs::VmExecutionLogs; -use crate::vm_m5::bootloader_state::BootloaderState; -use crate::vm_m5::errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}; -use crate::vm_m5::event_sink::InMemoryEventSink; -use crate::vm_m5::events::merge_events; -use crate::vm_m5::memory::SimpleMemory; -use crate::vm_m5::oracles::decommitter::DecommitterOracle; -use crate::vm_m5::oracles::precompile::PrecompilesProcessorWithHistory; -use crate::vm_m5::oracles::storage::StorageOracle; -use crate::vm_m5::oracles::tracer::{ - BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, PubdataSpentTracer, - TransactionResultTracer, ValidationError, ValidationTracer, ValidationTracerParams, -}; -use crate::vm_m5::oracles::OracleWithHistory; -use crate::vm_m5::storage::Storage; -use crate::vm_m5::utils::{ - collect_log_queries_after_timestamp, collect_storage_log_queries_after_timestamp, - dump_memory_page_using_primitive_value, precompile_calls_count_after_timestamp, +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + tx::tx_execution_info::TxExecutionStatus, + vm_trace::VmExecutionTrace, + L1BatchNumber, StorageLogQuery, VmEvent, U256, }; -use crate::vm_m5::vm_with_bootloader::{ - BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, - OPERATOR_REFUNDS_OFFSET, + +use crate::{ + glue::GlueInto, + interface::types::outputs::VmExecutionLogs, + vm_m5::{ + bootloader_state::BootloaderState, + errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}, + event_sink::InMemoryEventSink, + events::merge_events, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, + precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, + tracer::{ + BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, + PubdataSpentTracer, TransactionResultTracer, ValidationError, ValidationTracer, + ValidationTracerParams, + }, + OracleWithHistory, + }, + storage::Storage, + utils::{ + collect_log_queries_after_timestamp, collect_storage_log_queries_after_timestamp, + dump_memory_page_using_primitive_value, precompile_calls_count_after_timestamp, + }, + vm_with_bootloader::{ + BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, + OPERATOR_REFUNDS_OFFSET, + }, + }, }; pub type ZkSyncVmState = VmState< diff --git a/core/lib/multivm/src/versions/vm_m5/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m5/vm_with_bootloader.rs index 0116660594e..f9ba88fea14 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm_with_bootloader.rs @@ -12,7 +12,6 @@ use zk_evm_1_3_1::{ }; use zksync_contracts::BaseSystemContracts; use zksync_system_constants::MAX_TXS_IN_BLOCK; - use zksync_types::{ zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, @@ -21,16 +20,15 @@ use zksync_utils::{ address_to_u256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, misc::ceil_div, }; -use crate::vm_m5::storage::Storage; use crate::vm_m5::{ bootloader_state::BootloaderState, oracles::OracleWithHistory, + storage::Storage, transaction_data::TransactionData, utils::{ code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, }, - vm_instance::VmInstance, - vm_instance::{MultiVMSubversion, ZkSyncVmState}, + vm_instance::{MultiVMSubversion, VmInstance, ZkSyncVmState}, OracleTools, }; diff --git a/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs b/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs index 9025ee9f378..fb2341c0b2e 100644 --- a/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs +++ b/core/lib/multivm/src/versions/vm_m6/errors/vm_revert_reason.rs @@ -1,5 +1,7 @@ -use std::convert::TryFrom; -use std::fmt::{Debug, Display}; +use std::{ + convert::TryFrom, + fmt::{Debug, Display}, +}; use zksync_types::U256; diff --git a/core/lib/multivm/src/versions/vm_m6/event_sink.rs b/core/lib/multivm/src/versions/vm_m6/event_sink.rs index 41fd22e9eed..2fb5d934e96 100644 --- a/core/lib/multivm/src/versions/vm_m6/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_m6/event_sink.rs @@ -1,9 +1,5 @@ -use crate::vm_m6::{ - history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, - oracles::OracleWithHistory, - utils::collect_log_queries_after_timestamp, -}; use std::collections::HashMap; + use zk_evm_1_3_1::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -13,6 +9,12 @@ use zk_evm_1_3_1::{ }, }; +use crate::vm_m6::{ + history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, + oracles::OracleWithHistory, + utils::collect_log_queries_after_timestamp, +}; + #[derive(Debug, Clone, PartialEq, Default)] pub struct InMemoryEventSink { pub frames_stack: AppDataFrameManagerWithHistory, diff --git a/core/lib/multivm/src/versions/vm_m6/history_recorder.rs b/core/lib/multivm/src/versions/vm_m6/history_recorder.rs index a85279e56c1..7ec8b2fde3b 100644 --- a/core/lib/multivm/src/versions/vm_m6/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_m6/history_recorder.rs @@ -4,17 +4,16 @@ use std::{ hash::{BuildHasherDefault, Hash, Hasher}, }; -use crate::vm_m6::storage::{Storage, StoragePtr}; - use zk_evm_1_3_1::{ aux_structures::Timestamp, vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; - use zksync_types::{StorageKey, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; +use crate::vm_m6::storage::{Storage, StoragePtr}; + pub type MemoryWithHistory = HistoryRecorder; pub type IntFrameManagerWithHistory = HistoryRecorder, H>; diff --git a/core/lib/multivm/src/versions/vm_m6/memory.rs b/core/lib/multivm/src/versions/vm_m6/memory.rs index 52a3d7f606f..5a5042e5657 100644 --- a/core/lib/multivm/src/versions/vm_m6/memory.rs +++ b/core/lib/multivm/src/versions/vm_m6/memory.rs @@ -1,15 +1,19 @@ -use zk_evm_1_3_1::abstractions::{Memory, MemoryType, MEMORY_CELLS_OTHER_PAGES}; -use zk_evm_1_3_1::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; -use zk_evm_1_3_1::vm_state::PrimitiveValue; -use zk_evm_1_3_1::zkevm_opcode_defs::FatPointer; +use zk_evm_1_3_1::{ + abstractions::{Memory, MemoryType, MEMORY_CELLS_OTHER_PAGES}, + aux_structures::{MemoryPage, MemoryQuery, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; use zksync_types::U256; -use crate::vm_m6::history_recorder::{ - FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, - MemoryWrapper, WithHistory, +use crate::vm_m6::{ + history_recorder::{ + FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, + MemoryWrapper, WithHistory, + }, + oracles::OracleWithHistory, + utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}, }; -use crate::vm_m6::oracles::OracleWithHistory; -use crate::vm_m6::utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}; #[derive(Debug, Clone, PartialEq, Default)] pub struct SimpleMemory { diff --git a/core/lib/multivm/src/versions/vm_m6/oracle_tools.rs b/core/lib/multivm/src/versions/vm_m6/oracle_tools.rs index 4acc2fe68e5..7ae5e874806 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracle_tools.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracle_tools.rs @@ -1,16 +1,18 @@ -use crate::vm_m6::memory::SimpleMemory; - use std::fmt::Debug; -use crate::vm_m6::event_sink::InMemoryEventSink; -use crate::vm_m6::history_recorder::HistoryMode; -use crate::vm_m6::oracles::{ - decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, - storage::StorageOracle, -}; -use crate::vm_m6::storage::{Storage, StoragePtr}; use zk_evm_1_3_1::witness_trace::DummyTracer; +use crate::vm_m6::{ + event_sink::InMemoryEventSink, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, + }, + storage::{Storage, StoragePtr}, +}; + /// zkEVM requires a bunch of objects implementing given traits to work. /// For example: Storage, Memory, PrecompilerProcessor etc /// (you can find all these traits in zk_evm crate -> src/abstractions/mod.rs) diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs index 48948827c3d..a1c2a97edf9 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/decommitter.rs @@ -1,19 +1,19 @@ use std::collections::HashMap; -use crate::vm_m6::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory}; -use crate::vm_m6::storage::{Storage, StoragePtr}; - -use zk_evm_1_3_1::abstractions::MemoryType; -use zk_evm_1_3_1::aux_structures::Timestamp; use zk_evm_1_3_1::{ - abstractions::{DecommittmentProcessor, Memory}, - aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, + abstractions::{DecommittmentProcessor, Memory, MemoryType}, + aux_structures::{ + DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, + }, }; use zksync_types::U256; -use zksync_utils::bytecode::bytecode_len_in_words; -use zksync_utils::{bytes_to_be_words, u256_to_h256}; +use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; use super::OracleWithHistory; +use crate::vm_m6::{ + history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory}, + storage::{Storage, StoragePtr}, +}; /// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is /// used by the VM to 'load' bytecodes into memory. diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/mod.rs b/core/lib/multivm/src/versions/vm_m6/oracles/mod.rs index d6b00c8500d..2b7aa3a49f7 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/mod.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/mod.rs @@ -1,11 +1,10 @@ use zk_evm_1_3_1::aux_structures::Timestamp; -// We will discard RAM as soon as the execution of a tx ends, so -// it is ok for now to use SimpleMemory -pub use zk_evm_1_3_1::reference_impls::memory::SimpleMemory as RamOracle; // All the changes to the events in the DB will be applied after the tx is executed, // so fow now it is fine. pub use zk_evm_1_3_1::reference_impls::event_sink::InMemoryEventSink as EventSinkOracle; - +// We will discard RAM as soon as the execution of a tx ends, so +// it is ok for now to use SimpleMemory +pub use zk_evm_1_3_1::reference_impls::memory::SimpleMemory as RamOracle; pub use zk_evm_1_3_1::testing::simple_tracer::NoopTracer; pub mod decommitter; diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_m6/oracles/precompile.rs index aff382614af..2e236b70267 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/precompile.rs @@ -1,14 +1,11 @@ use zk_evm_1_3_1::{ - abstractions::Memory, - abstractions::PrecompileCyclesWitness, - abstractions::PrecompilesProcessor, + abstractions::{Memory, PrecompileCyclesWitness, PrecompilesProcessor}, aux_structures::{LogQuery, MemoryQuery, Timestamp}, precompiles::DefaultPrecompilesProcessor, }; -use crate::vm_m6::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; - use super::OracleWithHistory; +use crate::vm_m6::history_recorder::{HistoryEnabled, HistoryMode, HistoryRecorder}; /// Wrap of DefaultPrecompilesProcessor that store queue /// of timestamp when precompiles are called to be executed. diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs b/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs index 45c3bdf50f8..7ceab94bd47 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/storage.rs @@ -1,27 +1,27 @@ use std::collections::HashMap; -use crate::vm_m6::storage::{Storage, StoragePtr}; - -use crate::glue::GlueInto; -use crate::vm_m6::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, WithHistory, -}; - -use zk_evm_1_3_1::abstractions::RefundedAmounts; -use zk_evm_1_3_1::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; use zk_evm_1_3_1::{ - abstractions::{RefundType, Storage as VmStorageOracle}, + abstractions::{RefundType, RefundedAmounts, Storage as VmStorageOracle}, aux_structures::{LogQuery, Timestamp}, + zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; -use zksync_types::utils::storage_key_for_eth_balance; use zksync_types::{ - AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, - U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, + StorageLogQueryType, BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use super::OracleWithHistory; +use crate::{ + glue::GlueInto, + vm_m6::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, + }, + storage::{Storage, StoragePtr}, + }, +}; // While the storage does not support different shards, it was decided to write the // code of the StorageOracle with the shard parameters in mind. diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/bootloader.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/bootloader.rs index 81902f330a5..5509cef9083 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/bootloader.rs @@ -1,12 +1,5 @@ use std::marker::PhantomData; -use crate::vm_m6::history_recorder::HistoryMode; -use crate::vm_m6::memory::SimpleMemory; -use crate::vm_m6::oracles::tracer::{ - utils::gas_spent_on_bytecodes_and_long_messages_this_opcode, ExecutionEndTracer, - PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, -}; - use zk_evm_1_3_1::{ abstractions::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, @@ -16,6 +9,15 @@ use zk_evm_1_3_1::{ zkevm_opcode_defs::{Opcode, RetOpcode}, }; +use crate::vm_m6::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::gas_spent_on_bytecodes_and_long_messages_this_opcode, ExecutionEndTracer, + PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, + }, +}; + /// Tells the VM to end the execution before `ret` from the bootloader if there is no panic or revert. /// Also, saves the information if this `ret` was caused by "out of gas" panic. #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs index f2ddd2762ad..1166e7a8cdb 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/call.rs @@ -1,21 +1,24 @@ -use crate::glue::GlueInto; -use crate::vm_m6::errors::VmRevertReason; -use crate::vm_m6::history_recorder::HistoryMode; -use crate::vm_m6::memory::SimpleMemory; -use std::convert::TryFrom; -use std::marker::PhantomData; -use std::mem; -use zk_evm_1_3_1::abstractions::{ - AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, -}; -use zk_evm_1_3_1::zkevm_opcode_defs::FatPointer; -use zk_evm_1_3_1::zkevm_opcode_defs::{ - FarCallABI, FarCallOpcode, Opcode, RetOpcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, - RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, +use std::{convert::TryFrom, marker::PhantomData, mem}; + +use zk_evm_1_3_1::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + zkevm_opcode_defs::{ + FarCallABI, FarCallOpcode, FatPointer, Opcode, RetOpcode, + CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_types::vm_trace::{Call, CallType}; -use zksync_types::U256; +use zksync_types::{ + vm_trace::{Call, CallType}, + U256, +}; + +use crate::{ + glue::GlueInto, + vm_m6::{errors::VmRevertReason, history_recorder::HistoryMode, memory::SimpleMemory}, +}; /// NOTE Auto implementing clone for this tracer can cause stack overflow. /// This is because of the stack field which is a Vec with nested vecs inside. @@ -283,10 +286,13 @@ fn filter_near_call(mut call: Call) -> Vec { #[cfg(test)] mod tests { - use crate::glue::GlueInto; - use crate::vm_m6::oracles::tracer::call::{filter_near_call, Call, CallType}; use zk_evm_1_3_1::zkevm_opcode_defs::FarCallOpcode; + use crate::{ + glue::GlueInto, + vm_m6::oracles::tracer::call::{filter_near_call, Call, CallType}, + }; + #[test] fn test_filter_near_calls() { let mut call = Call::default(); diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/mod.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/mod.rs index 93486f039fa..cdf83345d2f 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/mod.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/mod.rs @@ -1,5 +1,15 @@ -use zk_evm_1_3_1::abstractions::Tracer; -use zk_evm_1_3_1::vm_state::VmLocalState; +use zk_evm_1_3_1::{abstractions::Tracer, vm_state::VmLocalState}; + +pub(crate) use self::transaction_result::TransactionResultTracer; +pub use self::{ + bootloader::BootloaderTracer, + call::CallTracer, + one_tx::OneTxTracer, + validation::{ + ValidationError, ValidationTracer, ValidationTracerParams, ViolatedValidationRule, + }, +}; +use crate::vm_m6::{history_recorder::HistoryMode, memory::SimpleMemory}; mod bootloader; mod call; @@ -8,18 +18,6 @@ mod transaction_result; mod utils; mod validation; -pub use bootloader::BootloaderTracer; -pub use call::CallTracer; -pub use one_tx::OneTxTracer; -pub use validation::{ - ValidationError, ValidationTracer, ValidationTracerParams, ViolatedValidationRule, -}; - -pub(crate) use transaction_result::TransactionResultTracer; - -use crate::vm_m6::history_recorder::HistoryMode; -use crate::vm_m6::memory::SimpleMemory; - pub trait ExecutionEndTracer: Tracer> { // Returns whether the vm execution should stop. fn should_stop_execution(&self) -> bool; diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/one_tx.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/one_tx.rs index d5fbb78c909..346daba2131 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/one_tx.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/one_tx.rs @@ -1,23 +1,23 @@ +use zk_evm_1_3_1::{ + abstractions::{ + AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, + }, + vm_state::VmLocalState, +}; +use zksync_types::vm_trace::Call; + use super::utils::{computational_gas_price, print_debug_if_needed}; use crate::vm_m6::{ history_recorder::HistoryMode, memory::SimpleMemory, oracles::tracer::{ utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, VmHook}, - BootloaderTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + BootloaderTracer, CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, }, vm_instance::get_vm_hook_params, }; -use crate::vm_m6::oracles::tracer::{CallTracer, StorageInvocationTracer}; -use zk_evm_1_3_1::{ - abstractions::{ - AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, - }, - vm_state::VmLocalState, -}; -use zksync_types::vm_trace::Call; - /// Allows any opcodes, but tells the VM to end the execution once the tx is over. // Internally depeds on Bootloader's VMHooks to get the notification once the transaction is finished. #[derive(Debug)] diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/transaction_result.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/transaction_result.rs index a3e4391af24..2ecf484b60a 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/transaction_result.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/transaction_result.rs @@ -7,18 +7,18 @@ use zk_evm_1_3_1::{ }; use zksync_types::{vm_trace, U256}; -use crate::vm_m6::memory::SimpleMemory; -use crate::vm_m6::oracles::tracer::{ - CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, - StorageInvocationTracer, -}; -use crate::vm_m6::vm_instance::get_vm_hook_params; use crate::vm_m6::{ history_recorder::HistoryMode, - oracles::tracer::utils::{ - gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, read_pointer, - VmHook, + memory::SimpleMemory, + oracles::tracer::{ + utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, print_debug_if_needed, + read_pointer, VmHook, + }, + CallTracer, ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, + StorageInvocationTracer, }, + vm_instance::get_vm_hook_params, }; #[derive(Debug)] diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs index b256575726a..d29476ea4cc 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/utils.rs @@ -1,14 +1,9 @@ -use crate::vm_m6::history_recorder::HistoryMode; -use crate::vm_m6::memory::SimpleMemory; -use crate::vm_m6::utils::{aux_heap_page_from_base, heap_page_from_base}; -use crate::vm_m6::vm_instance::{get_vm_hook_params, VM_HOOK_POSITION}; -use crate::vm_m6::vm_with_bootloader::BOOTLOADER_HEAP_PAGE; - -use zk_evm_1_3_1::aux_structures::MemoryPage; -use zk_evm_1_3_1::zkevm_opcode_defs::{FarCallABI, FarCallForwardPageType}; use zk_evm_1_3_1::{ abstractions::{BeforeExecutionData, VmLocalStateData}, - zkevm_opcode_defs::{FatPointer, LogOpcode, Opcode, UMAOpcode}, + aux_structures::MemoryPage, + zkevm_opcode_defs::{ + FarCallABI, FarCallForwardPageType, FatPointer, LogOpcode, Opcode, UMAOpcode, + }, }; use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, @@ -17,6 +12,14 @@ use zksync_system_constants::{ use zksync_types::U256; use zksync_utils::u256_to_h256; +use crate::vm_m6::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + utils::{aux_heap_page_from_base, heap_page_from_base}, + vm_instance::{get_vm_hook_params, VM_HOOK_POSITION}, + vm_with_bootloader::BOOTLOADER_HEAP_PAGE, +}; + #[derive(Clone, Debug, Copy)] pub(crate) enum VmHook { AccountValidationEntered, diff --git a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs index 13a0badd442..e75a9f34a4b 100644 --- a/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs +++ b/core/lib/multivm/src/versions/vm_m6/oracles/tracer/validation.rs @@ -1,16 +1,4 @@ -use std::fmt; -use std::fmt::Display; -use std::{collections::HashSet, marker::PhantomData}; - -use crate::vm_m6::{ - errors::VmRevertReasonParsingResult, - history_recorder::HistoryMode, - memory::SimpleMemory, - oracles::tracer::{ - utils::{computational_gas_price, print_debug_if_needed, VmHook}, - ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, - }, -}; +use std::{collections::HashSet, fmt, fmt::Display, marker::PhantomData}; use zk_evm_1_3_1::{ abstractions::{ @@ -18,15 +6,11 @@ use zk_evm_1_3_1::{ }, zkevm_opcode_defs::{ContextOpcode, FarCallABI, LogOpcode, Opcode}, }; - -use crate::vm_m6::oracles::tracer::{utils::get_calldata_page_via_abi, StorageInvocationTracer}; -use crate::vm_m6::storage::{Storage, StoragePtr}; use zksync_system_constants::{ ACCOUNT_CODE_STORAGE_ADDRESS, BOOTLOADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, L2_ETH_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, SYSTEM_CONTEXT_ADDRESS, }; - use zksync_types::{ get_code_key, web3::signing::keccak256, AccountTreeId, Address, StorageKey, H256, U256, }; @@ -34,6 +18,19 @@ use zksync_utils::{ be_bytes_to_safe_address, h256_to_account_address, u256_to_account_address, u256_to_h256, }; +use crate::vm_m6::{ + errors::VmRevertReasonParsingResult, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::tracer::{ + utils::{ + computational_gas_price, get_calldata_page_via_abi, print_debug_if_needed, VmHook, + }, + ExecutionEndTracer, PendingRefundTracer, PubdataSpentTracer, StorageInvocationTracer, + }, + storage::{Storage, StoragePtr}, +}; + #[derive(Debug, Clone, Eq, PartialEq, Copy)] #[allow(clippy::enum_variant_names)] pub enum ValidationTracerMode { diff --git a/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs b/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs index a823e5f5ae6..33307507f7e 100644 --- a/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/pubdata_utils.rs @@ -1,16 +1,21 @@ -use crate::glue::GlueInto; -use crate::vm_m6::history_recorder::HistoryMode; -use crate::vm_m6::oracles::storage::storage_key_of_log; -use crate::vm_m6::storage::Storage; -use crate::vm_m6::utils::collect_storage_log_queries_after_timestamp; -use crate::vm_m6::VmInstance; use std::collections::HashMap; + use zk_evm_1_3_1::aux_structures::Timestamp; -use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; -use zksync_types::zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries; -use zksync_types::{StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS}; +use zksync_types::{ + event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, + zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, + StorageKey, PUBLISH_BYTECODE_OVERHEAD, SYSTEM_CONTEXT_ADDRESS, +}; use zksync_utils::bytecode::bytecode_len_in_bytes; +use crate::{ + glue::GlueInto, + vm_m6::{ + history_recorder::HistoryMode, oracles::storage::storage_key_of_log, storage::Storage, + utils::collect_storage_log_queries_after_timestamp, VmInstance, + }, +}; + impl VmInstance { pub fn pubdata_published(&self, from_timestamp: Timestamp) -> u32 { let storage_writes_pubdata_published = self.pubdata_published_for_writes(from_timestamp); diff --git a/core/lib/multivm/src/versions/vm_m6/refunds.rs b/core/lib/multivm/src/versions/vm_m6/refunds.rs index da16d621911..4b4229b306b 100644 --- a/core/lib/multivm/src/versions/vm_m6/refunds.rs +++ b/core/lib/multivm/src/versions/vm_m6/refunds.rs @@ -1,13 +1,14 @@ -use crate::vm_m6::history_recorder::HistoryMode; -use crate::vm_m6::storage::Storage; -use crate::vm_m6::vm_with_bootloader::{ - eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_m6::VmInstance; use zk_evm_1_3_1::aux_structures::Timestamp; use zksync_types::U256; use zksync_utils::ceil_div_u256; +use crate::vm_m6::{ + history_recorder::HistoryMode, + storage::Storage, + vm_with_bootloader::{eth_price_per_pubdata_byte, BOOTLOADER_HEAP_PAGE, TX_GAS_LIMIT_OFFSET}, + VmInstance, +}; + impl VmInstance { pub(crate) fn tx_body_refund( &self, diff --git a/core/lib/multivm/src/versions/vm_m6/storage.rs b/core/lib/multivm/src/versions/vm_m6/storage.rs index 5441fc8a296..80f7e016010 100644 --- a/core/lib/multivm/src/versions/vm_m6/storage.rs +++ b/core/lib/multivm/src/versions/vm_m6/storage.rs @@ -1,7 +1,4 @@ -use std::cell::RefCell; -use std::collections::HashMap; -use std::fmt::Debug; -use std::rc::Rc; +use std::{cell::RefCell, collections::HashMap, fmt::Debug, rc::Rc}; use zksync_state::{ReadStorage, WriteStorage}; use zksync_types::{get_known_code_key, StorageKey, StorageValue, H256}; diff --git a/core/lib/multivm/src/versions/vm_m6/test_utils.rs b/core/lib/multivm/src/versions/vm_m6/test_utils.rs index 6cce779362d..55e5add1164 100644 --- a/core/lib/multivm/src/versions/vm_m6/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/test_utils.rs @@ -10,8 +10,10 @@ use std::collections::HashMap; use itertools::Itertools; use zk_evm_1_3_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; -use zksync_contracts::{deployer_contract, get_loadnext_contract, load_contract}; +use zksync_contracts::{ + deployer_contract, get_loadnext_contract, load_contract, + test_contracts::LoadnextContractExecutionParams, +}; use zksync_types::{ ethabi::{Address, Token}, fee::Fee, @@ -24,14 +26,13 @@ use zksync_utils::{ address_to_h256, bytecode::hash_bytecode, h256_to_account_address, u256_to_h256, }; -use crate::vm_m6::storage::Storage; -/// The tests here help us with the testing the VM use crate::vm_m6::{ event_sink::InMemoryEventSink, history_recorder::{ AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode, HistoryRecorder, }, memory::SimpleMemory, + storage::Storage, VmInstance, }; diff --git a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs index 6779ce95fc3..136a6d7647a 100644 --- a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs @@ -1,11 +1,14 @@ use zk_evm_1_3_1::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; -use zksync_types::ethabi::{encode, Address, Token}; -use zksync_types::fee::encoding_len; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::{l2::TransactionType, ExecuteTransactionCommon, Transaction, U256}; -use zksync_types::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; -use zksync_utils::{address_to_h256, ceil_div_u256}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_types::{ + ethabi::{encode, Address, Token}, + fee::encoding_len, + l1::is_l1_tx_type, + l2::TransactionType, + ExecuteTransactionCommon, Transaction, MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK, U256, +}; +use zksync_utils::{ + address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, ceil_div_u256, h256_to_u256, +}; use crate::vm_m6::vm_with_bootloader::{ BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, diff --git a/core/lib/multivm/src/versions/vm_m6/utils.rs b/core/lib/multivm/src/versions/vm_m6/utils.rs index a8ed8b02a52..070d51a6b25 100644 --- a/core/lib/multivm/src/versions/vm_m6/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/utils.rs @@ -1,15 +1,7 @@ -use crate::vm_m6::history_recorder::HistoryMode; -use crate::vm_m6::{ - memory::SimpleMemory, oracles::tracer::PubdataSpentTracer, vm_with_bootloader::BlockContext, - VmInstance, -}; use once_cell::sync::Lazy; - -use crate::glue::GlueInto; -use crate::vm_m6::storage::Storage; -use zk_evm_1_3_1::block_properties::BlockProperties; use zk_evm_1_3_1::{ aux_structures::{LogQuery, MemoryPage, Timestamp}, + block_properties::BlockProperties, vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; @@ -18,6 +10,14 @@ use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogQuery, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; +use crate::{ + glue::GlueInto, + vm_m6::{ + history_recorder::HistoryMode, memory::SimpleMemory, oracles::tracer::PubdataSpentTracer, + storage::Storage, vm_with_bootloader::BlockContext, VmInstance, + }, +}; + pub const INITIAL_TIMESTAMP: u32 = 1024; pub const INITIAL_MEMORY_COUNTER: u32 = 2048; pub const INITIAL_CALLDATA_PAGE: u32 = 7; diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 2937b621a9a..e82d51d2bf3 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -1,23 +1,24 @@ -use crate::interface::{ - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, -}; - use std::collections::HashSet; use zksync_state::StoragePtr; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::{Transaction, VmVersion}; -use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; -use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + Transaction, VmVersion, +}; +use zksync_utils::{ + bytecode::{hash_bytecode, CompressedBytecodeInfo}, + h256_to_u256, u256_to_h256, +}; -use crate::glue::history_mode::HistoryMode; -use crate::glue::GlueInto; -use crate::vm_m6::events::merge_events; -use crate::vm_m6::storage::Storage; -use crate::vm_m6::vm_instance::MultiVMSubversion; -use crate::vm_m6::VmInstance; +use crate::{ + glue::{history_mode::HistoryMode, GlueInto}, + interface::{ + BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, FinishedL1Batch, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + vm_m6::{events::merge_events, storage::Storage, vm_instance::MultiVMSubversion, VmInstance}, +}; #[derive(Debug)] pub struct Vm { diff --git a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs index f15adde2584..379476d7664 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_instance.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_instance.rs @@ -1,45 +1,54 @@ -use std::convert::TryFrom; -use std::fmt::Debug; - -use crate::glue::GlueInto; -use zk_evm_1_3_1::aux_structures::Timestamp; -use zk_evm_1_3_1::vm_state::{PrimitiveValue, VmLocalState, VmState}; -use zk_evm_1_3_1::witness_trace::DummyTracer; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{ - AllowedPcOrImm, EncodingModeProduction, VmEncodingMode, +use std::{convert::TryFrom, fmt::Debug}; + +use zk_evm_1_3_1::{ + aux_structures::Timestamp, + vm_state::{PrimitiveValue, VmLocalState, VmState}, + witness_trace::DummyTracer, + zkevm_opcode_defs::{ + decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}, + definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; -use zk_evm_1_3_1::zkevm_opcode_defs::definitions::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; use zksync_system_constants::MAX_TXS_IN_BLOCK; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::tx::tx_execution_info::TxExecutionStatus; -use zksync_types::vm_trace::{Call, VmExecutionTrace, VmTrace}; -use zksync_types::{L1BatchNumber, StorageLogQuery, VmEvent, H256, U256}; - -use crate::interface::types::outputs::VmExecutionLogs; -use crate::vm_m6::bootloader_state::BootloaderState; -use crate::vm_m6::errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}; -use crate::vm_m6::event_sink::InMemoryEventSink; -use crate::vm_m6::events::merge_events; -use crate::vm_m6::history_recorder::{HistoryEnabled, HistoryMode}; -use crate::vm_m6::memory::SimpleMemory; -use crate::vm_m6::oracles::decommitter::DecommitterOracle; -use crate::vm_m6::oracles::precompile::PrecompilesProcessorWithHistory; -use crate::vm_m6::oracles::storage::StorageOracle; -use crate::vm_m6::oracles::tracer::{ - BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, PubdataSpentTracer, - StorageInvocationTracer, TransactionResultTracer, ValidationError, ValidationTracer, - ValidationTracerParams, +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + tx::tx_execution_info::TxExecutionStatus, + vm_trace::{Call, VmExecutionTrace, VmTrace}, + L1BatchNumber, StorageLogQuery, VmEvent, H256, U256, }; -use crate::vm_m6::oracles::OracleWithHistory; -use crate::vm_m6::storage::Storage; -use crate::vm_m6::utils::{ - calculate_computational_gas_used, collect_log_queries_after_timestamp, - collect_storage_log_queries_after_timestamp, dump_memory_page_using_primitive_value, - precompile_calls_count_after_timestamp, -}; -use crate::vm_m6::vm_with_bootloader::{ - BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, - OPERATOR_REFUNDS_OFFSET, + +use crate::{ + glue::GlueInto, + interface::types::outputs::VmExecutionLogs, + vm_m6::{ + bootloader_state::BootloaderState, + errors::{TxRevertReason, VmRevertReason, VmRevertReasonParsingResult}, + event_sink::InMemoryEventSink, + events::merge_events, + history_recorder::{HistoryEnabled, HistoryMode}, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, + precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, + tracer::{ + BootloaderTracer, ExecutionEndTracer, OneTxTracer, PendingRefundTracer, + PubdataSpentTracer, StorageInvocationTracer, TransactionResultTracer, + ValidationError, ValidationTracer, ValidationTracerParams, + }, + OracleWithHistory, + }, + storage::Storage, + utils::{ + calculate_computational_gas_used, collect_log_queries_after_timestamp, + collect_storage_log_queries_after_timestamp, dump_memory_page_using_primitive_value, + precompile_calls_count_after_timestamp, + }, + vm_with_bootloader::{ + BootloaderJobType, DerivedBlockContext, TxExecutionMode, BOOTLOADER_HEAP_PAGE, + OPERATOR_REFUNDS_OFFSET, + }, + }, }; pub type ZkSyncVmState = VmState< diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 998f41275b4..c7d4ee3d45e 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -12,7 +12,6 @@ use zk_evm_1_3_1::{ }; use zksync_contracts::BaseSystemContracts; use zksync_system_constants::MAX_TXS_IN_BLOCK; - use zksync_types::{ zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, Transaction, BOOTLOADER_ADDRESS, L1_GAS_PER_PUBDATA_BYTE, MAX_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, U256, @@ -24,10 +23,10 @@ use zksync_utils::{ misc::ceil_div, }; -use crate::vm_m6::storage::Storage; use crate::vm_m6::{ bootloader_state::BootloaderState, history_recorder::HistoryMode, + storage::Storage, transaction_data::{TransactionData, L1_TX_TYPE}, utils::{ code_page_candidate_from_base, heap_page_from_base, BLOCK_GAS_LIMIT, INITIAL_BASE_PAGE, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/l2_block.rs index 56b5b1b6b39..6cd1096b3bd 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/l2_block.rs @@ -1,11 +1,15 @@ use std::cmp::Ordering; + use zksync_types::{MiniblockNumber, H256}; use zksync_utils::concat_and_hash; -use crate::interface::{L2Block, L2BlockEnv}; -use crate::vm_refunds_enhancement::bootloader_state::snapshot::L2BlockSnapshot; -use crate::vm_refunds_enhancement::bootloader_state::tx::BootloaderTx; -use crate::vm_refunds_enhancement::utils::l2_blocks::l2_block_hash; +use crate::{ + interface::{L2Block, L2BlockEnv}, + vm_refunds_enhancement::{ + bootloader_state::{snapshot::L2BlockSnapshot, tx::BootloaderTx}, + utils::l2_blocks::l2_block_hash, + }, +}; const EMPTY_TXS_ROLLING_HASH: H256 = H256::zero(); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs index 4c8d48bc1a7..d436a2adb0a 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs @@ -1,17 +1,22 @@ -use crate::vm_refunds_enhancement::bootloader_state::l2_block::BootloaderL2Block; -use crate::vm_refunds_enhancement::bootloader_state::snapshot::BootloaderStateSnapshot; -use crate::vm_refunds_enhancement::bootloader_state::utils::{apply_l2_block, apply_tx_to_memory}; use std::cmp::Ordering; + use zksync_types::{L2ChainId, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}; -use crate::vm_refunds_enhancement::{ - constants::TX_DESCRIPTION_OFFSET, types::internals::TransactionData, - utils::l2_blocks::assert_next_block, -}; - use super::tx::BootloaderTx; +use crate::{ + interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + vm_refunds_enhancement::{ + bootloader_state::{ + l2_block::BootloaderL2Block, + snapshot::BootloaderStateSnapshot, + utils::{apply_l2_block, apply_tx_to_memory}, + }, + constants::TX_DESCRIPTION_OFFSET, + types::internals::TransactionData, + utils::l2_blocks::assert_next_block, + }, +}; /// Intermediate bootloader-related VM state. /// /// Required to process transactions one by one (since we intercept the VM execution to execute diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs index 3bd10e9374b..e7f833e5bad 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/tx.rs @@ -1,7 +1,8 @@ -use crate::vm_refunds_enhancement::types::internals::TransactionData; use zksync_types::{L2ChainId, H256, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; +use crate::vm_refunds_enhancement::types::internals::TransactionData; + /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] pub(super) struct BootloaderTx { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index fed5108d7f3..8adeb3e0b42 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -1,16 +1,19 @@ use zksync_types::U256; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; - -use crate::interface::{BootloaderMemory, TxExecutionMode}; -use crate::vm_refunds_enhancement::bootloader_state::l2_block::BootloaderL2Block; -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, COMPRESSED_BYTECODES_OFFSET, - OPERATOR_REFUNDS_OFFSET, TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, -}; +use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; +use crate::{ + interface::{BootloaderMemory, TxExecutionMode}, + vm_refunds_enhancement::{ + bootloader_state::l2_block::BootloaderL2Block, + constants::{ + BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, + COMPRESSED_BYTECODES_OFFSET, OPERATOR_REFUNDS_OFFSET, TX_DESCRIPTION_OFFSET, + TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, + TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, + }, + }, +}; pub(super) fn get_memory_for_compressed_bytecodes( compressed_bytecodes: &[CompressedBytecodeInfo], diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/constants.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/constants.rs index 0dca7a6ce26..82ab754e403 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/constants.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/constants.rs @@ -1,14 +1,12 @@ use zk_evm_1_3_3::aux_structures::MemoryPage; - +pub use zk_evm_1_3_3::zkevm_opcode_defs::system_params::{ + ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, MAX_PUBDATA_PER_BLOCK, +}; use zksync_system_constants::{ L1_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, USED_BOOTLOADER_MEMORY_WORDS, }; -pub use zk_evm_1_3_3::zkevm_opcode_defs::system_params::{ - ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, MAX_PUBDATA_PER_BLOCK, -}; - use crate::vm_refunds_enhancement::old_vm::utils::heap_page_from_base; /// Max cycles for a single transaction. diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs index 4b7e529fc5b..69670f9682b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/bytecode.rs @@ -1,13 +1,12 @@ use itertools::Itertools; - -use crate::interface::VmInterface; -use crate::HistoryMode; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::U256; -use zksync_utils::bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}; -use zksync_utils::bytes_to_be_words; +use zksync_utils::{ + bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, + bytes_to_be_words, +}; -use crate::vm_refunds_enhancement::Vm; +use crate::{interface::VmInterface, vm_refunds_enhancement::Vm, HistoryMode}; impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index 9e55180d66f..a1d81bdce5e 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -1,15 +1,20 @@ -use crate::HistoryMode; use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; -use crate::interface::tracer::{TracerExecutionStatus, VmExecutionStopReason}; -use crate::interface::{VmExecutionMode, VmExecutionResultAndLogs}; -use crate::vm_refunds_enhancement::old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}; -use crate::vm_refunds_enhancement::tracers::dispatcher::TracerDispatcher; -use crate::vm_refunds_enhancement::tracers::{ - traits::VmTracer, DefaultExecutionTracer, RefundsTracer, +use crate::{ + interface::{ + tracer::{TracerExecutionStatus, VmExecutionStopReason}, + VmExecutionMode, VmExecutionResultAndLogs, + }, + vm_refunds_enhancement::{ + old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, + tracers::{ + dispatcher::TracerDispatcher, traits::VmTracer, DefaultExecutionTracer, RefundsTracer, + }, + vm::Vm, + }, + HistoryMode, }; -use crate::vm_refunds_enhancement::vm::Vm; impl Vm { pub(crate) fn inspect_inner( diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/gas.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/gas.rs index cce9bfad699..4083e27b0b3 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/gas.rs @@ -1,8 +1,9 @@ -use crate::HistoryMode; use zksync_state::WriteStorage; -use crate::vm_refunds_enhancement::tracers::DefaultExecutionTracer; -use crate::vm_refunds_enhancement::vm::Vm; +use crate::{ + vm_refunds_enhancement::{tracers::DefaultExecutionTracer, vm::Vm}, + HistoryMode, +}; impl Vm { /// Returns the amount of gas remaining to the VM. diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs index b8e8652f301..bded1c19041 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/logs.rs @@ -1,14 +1,18 @@ use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + VmEvent, +}; -use crate::HistoryMode; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::VmEvent; - -use crate::interface::types::outputs::VmExecutionLogs; -use crate::vm_refunds_enhancement::old_vm::events::merge_events; -use crate::vm_refunds_enhancement::old_vm::utils::precompile_calls_count_after_timestamp; -use crate::vm_refunds_enhancement::vm::Vm; +use crate::{ + interface::types::outputs::VmExecutionLogs, + vm_refunds_enhancement::{ + old_vm::{events::merge_events, utils::precompile_calls_count_after_timestamp}, + vm::Vm, + }, + HistoryMode, +}; impl Vm { pub(crate) fn collect_execution_logs_after_timestamp( diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/snapshots.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/snapshots.rs index 972d50e5d76..c34535726c0 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/snapshots.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/snapshots.rs @@ -1,13 +1,14 @@ -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; - use std::time::Duration; -use crate::vm_latest::HistoryEnabled; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; -use crate::vm_refunds_enhancement::{ - old_vm::oracles::OracleWithHistory, types::internals::VmSnapshot, vm::Vm, +use crate::{ + vm_latest::HistoryEnabled, + vm_refunds_enhancement::{ + old_vm::oracles::OracleWithHistory, types::internals::VmSnapshot, vm::Vm, + }, }; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index a49ce2a6746..3e9de5de4ec 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -1,12 +1,12 @@ use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; - -use crate::HistoryMode; use zksync_types::U256; -use crate::interface::{VmExecutionStatistics, VmMemoryMetrics}; -use crate::vm_refunds_enhancement::tracers::DefaultExecutionTracer; -use crate::vm_refunds_enhancement::vm::Vm; +use crate::{ + interface::{VmExecutionStatistics, VmMemoryMetrics}, + vm_refunds_enhancement::{tracers::DefaultExecutionTracer, vm::Vm}, + HistoryMode, +}; /// Module responsible for observing the VM behavior, i.e. calculating the statistics of the VM runs /// or reporting the VM memory usage. diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/tx.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/tx.rs index d6fd4858870..a786e9b0ad7 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/tx.rs @@ -1,15 +1,16 @@ -use crate::vm_refunds_enhancement::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_refunds_enhancement::implementation::bytecode::{ - bytecode_to_factory_dep, compress_bytecodes, -}; use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::Transaction; +use zksync_types::{l1::is_l1_tx_type, Transaction}; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::vm::Vm; -use crate::HistoryMode; +use crate::{ + vm_refunds_enhancement::{ + constants::BOOTLOADER_HEAP_PAGE, + implementation::bytecode::{bytecode_to_factory_dep, compress_bytecodes}, + types::internals::TransactionData, + vm::Vm, + }, + HistoryMode, +}; impl Vm { pub(crate) fn push_raw_transaction( diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs index 28a681e5e60..691d453c4b0 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/mod.rs @@ -1,30 +1,25 @@ -pub use old_vm::{ - history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, - memory::SimpleMemory, +pub use self::{ + bootloader_state::BootloaderState, + old_vm::{ + history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, + memory::SimpleMemory, + }, + oracles::storage::StorageOracle, + tracers::{ + dispatcher::TracerDispatcher, + traits::{ToTracerPointer, TracerPointer, VmTracer}, + }, + types::internals::ZkSyncVmState, + utils::transaction_encoding::TransactionVmExt, + vm::Vm, }; -pub use oracles::storage::StorageOracle; - -pub use tracers::dispatcher::TracerDispatcher; -pub use tracers::traits::{ToTracerPointer, TracerPointer, VmTracer}; - -pub use utils::transaction_encoding::TransactionVmExt; - -pub use bootloader_state::BootloaderState; -pub use types::internals::ZkSyncVmState; - -pub use vm::Vm; - mod bootloader_state; +pub mod constants; mod implementation; mod old_vm; mod oracles; pub(crate) mod tracers; mod types; -mod vm; - -pub mod constants; pub mod utils; - -// #[cfg(test)] -// mod tests; +mod vm; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs index adbee280a3d..43019cce1ce 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/event_sink.rs @@ -1,8 +1,5 @@ -use crate::vm_refunds_enhancement::old_vm::{ - history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, - oracles::OracleWithHistory, -}; use std::collections::HashMap; + use zk_evm_1_3_3::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -12,6 +9,11 @@ use zk_evm_1_3_3::{ }, }; +use crate::vm_refunds_enhancement::old_vm::{ + history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, + oracles::OracleWithHistory, +}; + #[derive(Debug, Clone, PartialEq, Default)] pub struct InMemoryEventSink { frames_stack: AppDataFrameManagerWithHistory, H>, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/history_recorder.rs index 44d510b0075..fdab00a199e 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/history_recorder.rs @@ -5,7 +5,6 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{StorageKey, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; @@ -771,11 +770,14 @@ impl HistoryRecorder, H> { #[cfg(test)] mod tests { - use crate::vm_refunds_enhancement::old_vm::history_recorder::{HistoryRecorder, MemoryWrapper}; - use crate::vm_refunds_enhancement::HistoryDisabled; use zk_evm_1_3_3::{aux_structures::Timestamp, vm_state::PrimitiveValue}; use zksync_types::U256; + use crate::vm_refunds_enhancement::{ + old_vm::history_recorder::{HistoryRecorder, MemoryWrapper}, + HistoryDisabled, + }; + #[test] fn memory_equality() { let mut a: HistoryRecorder = Default::default(); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/memory.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/memory.rs index 1ef04da58cb..8568d6c7215 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/memory.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/memory.rs @@ -1,16 +1,18 @@ -use zk_evm_1_3_3::abstractions::{Memory, MemoryType}; -use zk_evm_1_3_3::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; -use zk_evm_1_3_3::vm_state::PrimitiveValue; -use zk_evm_1_3_3::zkevm_opcode_defs::FatPointer; +use zk_evm_1_3_3::{ + abstractions::{Memory, MemoryType}, + aux_structures::{MemoryPage, MemoryQuery, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; use zksync_types::U256; -use crate::vm_refunds_enhancement::old_vm::history_recorder::{ - FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, - MemoryWrapper, WithHistory, -}; -use crate::vm_refunds_enhancement::old_vm::oracles::OracleWithHistory; -use crate::vm_refunds_enhancement::old_vm::utils::{ - aux_heap_page_from_base, heap_page_from_base, stack_page_from_base, +use crate::vm_refunds_enhancement::old_vm::{ + history_recorder::{ + FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, + MemoryWrapper, WithHistory, + }, + oracles::OracleWithHistory, + utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}, }; #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs index a39be0ba93b..6705831dbad 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/decommitter.rs @@ -1,23 +1,19 @@ -use std::collections::HashMap; -use std::fmt::Debug; +use std::{collections::HashMap, fmt::Debug}; -use crate::vm_refunds_enhancement::old_vm::history_recorder::{ - HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, -}; - -use zk_evm_1_3_3::abstractions::MemoryType; -use zk_evm_1_3_3::aux_structures::Timestamp; use zk_evm_1_3_3::{ - abstractions::{DecommittmentProcessor, Memory}, - aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, + abstractions::{DecommittmentProcessor, Memory, MemoryType}, + aux_structures::{ + DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, + }, }; - use zksync_state::{ReadStorage, StoragePtr}; use zksync_types::U256; -use zksync_utils::bytecode::bytecode_len_in_words; -use zksync_utils::{bytes_to_be_words, u256_to_h256}; +use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; use super::OracleWithHistory; +use crate::vm_refunds_enhancement::old_vm::history_recorder::{ + HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, +}; /// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is /// used by the VM to 'load' bytecodes into memory. diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/precompile.rs index eb3f7b866b1..c59fb188e59 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/oracles/precompile.rs @@ -1,17 +1,14 @@ use zk_evm_1_3_3::{ - abstractions::Memory, - abstractions::PrecompileCyclesWitness, - abstractions::PrecompilesProcessor, + abstractions::{Memory, PrecompileCyclesWitness, PrecompilesProcessor}, aux_structures::{LogQuery, MemoryQuery, Timestamp}, precompiles::DefaultPrecompilesProcessor, }; +use super::OracleWithHistory; use crate::vm_refunds_enhancement::old_vm::history_recorder::{ HistoryEnabled, HistoryMode, HistoryRecorder, }; -use super::OracleWithHistory; - /// Wrap of DefaultPrecompilesProcessor that store queue /// of timestamp when precompiles are called to be executed. /// Number of precompiles per block is strictly limited, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/utils.rs index 9b4aae851d2..bc4b2c3eff1 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/old_vm/utils.rs @@ -1,22 +1,19 @@ -use crate::vm_refunds_enhancement::old_vm::memory::SimpleMemory; - -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::HistoryMode; - -use zk_evm_1_3_3::zkevm_opcode_defs::decoding::{ - AllowedPcOrImm, EncodingModeProduction, VmEncodingMode, -}; -use zk_evm_1_3_3::zkevm_opcode_defs::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; use zk_evm_1_3_3::{ aux_structures::{MemoryPage, Timestamp}, vm_state::PrimitiveValue, - zkevm_opcode_defs::FatPointer, + zkevm_opcode_defs::{ + decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}, + FatPointer, RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; use zksync_state::WriteStorage; use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; - use zksync_types::{Address, U256}; +use crate::vm_refunds_enhancement::{ + old_vm::memory::SimpleMemory, types::internals::ZkSyncVmState, HistoryMode, +}; + #[derive(Debug, Clone)] pub(crate) enum VmExecutionResult { Ok(Vec), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs index e054cdbe2a6..b970a8a95f7 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/oracles/storage.rs @@ -1,26 +1,25 @@ use std::collections::HashMap; -use crate::vm_refunds_enhancement::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, -}; -use crate::vm_refunds_enhancement::old_vm::oracles::OracleWithHistory; - -use zk_evm_1_3_3::abstractions::RefundedAmounts; -use zk_evm_1_3_3::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; use zk_evm_1_3_3::{ - abstractions::{RefundType, Storage as VmStorageOracle}, + abstractions::{RefundType, RefundedAmounts, Storage as VmStorageOracle}, aux_structures::{LogQuery, Timestamp}, + zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; - use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_eth_balance; use zksync_types::{ - AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, - U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, + StorageLogQueryType, BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; +use crate::vm_refunds_enhancement::old_vm::{ + history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, VectorHistoryEvent, WithHistory, + }, + oracles::OracleWithHistory, +}; + // While the storage does not support different shards, it was decided to write the // code of the StorageOracle with the shard parameters in mind. pub(crate) fn triplet_to_storage_key(_shard_id: u8, address: Address, key: U256) -> StorageKey { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/default_tracers.rs index 51fbf06d855..8e9c0f11aba 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/default_tracers.rs @@ -1,11 +1,5 @@ use std::fmt::{Debug, Formatter}; -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::tracer::{ - TracerExecutionStatus, TracerExecutionStopReason, VmExecutionStopReason, -}; -use crate::interface::{Halt, VmExecutionMode}; -use crate::vm_refunds_enhancement::VmTracer; use zk_evm_1_3_3::{ tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, @@ -17,18 +11,28 @@ use zk_evm_1_3_3::{ use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::Timestamp; -use crate::vm_refunds_enhancement::bootloader_state::utils::apply_l2_block; -use crate::vm_refunds_enhancement::bootloader_state::BootloaderState; -use crate::vm_refunds_enhancement::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_refunds_enhancement::old_vm::history_recorder::HistoryMode; -use crate::vm_refunds_enhancement::old_vm::memory::SimpleMemory; -use crate::vm_refunds_enhancement::tracers::dispatcher::TracerDispatcher; -use crate::vm_refunds_enhancement::tracers::utils::{ - computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, - print_debug_if_needed, VmHook, +use crate::{ + interface::{ + dyn_tracers::vm_1_3_3::DynTracer, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, VmExecutionStopReason}, + Halt, VmExecutionMode, + }, + vm_refunds_enhancement::{ + bootloader_state::{utils::apply_l2_block, BootloaderState}, + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, + tracers::{ + dispatcher::TracerDispatcher, + utils::{ + computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, + print_debug_if_needed, VmHook, + }, + RefundsTracer, ResultTracer, + }, + types::internals::ZkSyncVmState, + VmTracer, + }, }; -use crate::vm_refunds_enhancement::tracers::{RefundsTracer, ResultTracer}; -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. pub(crate) struct DefaultExecutionTracer { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/dispatcher.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/dispatcher.rs index f2296d205a9..2392c3e51af 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/dispatcher.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/dispatcher.rs @@ -1,13 +1,18 @@ -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::tracer::{TracerExecutionStatus, VmExecutionStopReason}; -use crate::vm_refunds_enhancement::{ - BootloaderState, HistoryMode, SimpleMemory, TracerPointer, VmTracer, ZkSyncVmState, -}; use zk_evm_1_3_3::tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, VmLocalStateData, }; use zksync_state::{StoragePtr, WriteStorage}; +use crate::{ + interface::{ + dyn_tracers::vm_1_3_3::DynTracer, + tracer::{TracerExecutionStatus, VmExecutionStopReason}, + }, + vm_refunds_enhancement::{ + BootloaderState, HistoryMode, SimpleMemory, TracerPointer, VmTracer, ZkSyncVmState, + }, +}; + /// Tracer dispatcher is a tracer that can dispatch calls to multiple tracers. pub struct TracerDispatcher { tracers: Vec>, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs index 5256561b5eb..f906cef6230 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/refunds.rs @@ -1,5 +1,4 @@ use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; - use zk_evm_1_3_3::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, @@ -12,27 +11,27 @@ use zksync_types::{ l2_to_l1_log::L2ToL1Log, L1BatchNumber, U256, }; -use zksync_utils::bytecode::bytecode_len_in_bytes; -use zksync_utils::{ceil_div_u256, u256_to_h256}; - -use crate::interface::{ - dyn_tracers::vm_1_3_3::DynTracer, tracer::TracerExecutionStatus, L1BatchEnv, Refunds, -}; -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET, -}; +use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; -use crate::vm_refunds_enhancement::{ - bootloader_state::BootloaderState, - old_vm::{ - events::merge_events, history_recorder::HistoryMode, memory::SimpleMemory, - utils::eth_price_per_pubdata_byte, +use crate::{ + interface::{ + dyn_tracers::vm_1_3_3::DynTracer, tracer::TracerExecutionStatus, L1BatchEnv, Refunds, }, - tracers::{ - traits::VmTracer, - utils::{gas_spent_on_bytecodes_and_long_messages_this_opcode, get_vm_hook_params, VmHook}, + vm_refunds_enhancement::{ + bootloader_state::BootloaderState, + constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}, + old_vm::{ + events::merge_events, history_recorder::HistoryMode, memory::SimpleMemory, + utils::eth_price_per_pubdata_byte, + }, + tracers::{ + traits::VmTracer, + utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, get_vm_hook_params, VmHook, + }, + }, + types::internals::ZkSyncVmState, }, - types::internals::ZkSyncVmState, }; /// Tracer responsible for collecting information about refunds. diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/result_tracer.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/result_tracer.rs index c0a8e5d6cc0..1281b416bb4 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/result_tracer.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/result_tracer.rs @@ -4,23 +4,27 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::FatPointer, }; use zksync_state::{StoragePtr, WriteStorage}; - -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::tracer::{TracerExecutionStopReason, VmExecutionStopReason}; -use crate::interface::{ExecutionResult, Halt, TxRevertReason, VmExecutionMode, VmRevertReason}; use zksync_types::U256; -use crate::vm_refunds_enhancement::bootloader_state::BootloaderState; -use crate::vm_refunds_enhancement::old_vm::{ - history_recorder::HistoryMode, - memory::SimpleMemory, - utils::{vm_may_have_ended_inner, VmExecutionResult}, +use crate::{ + interface::{ + dyn_tracers::vm_1_3_3::DynTracer, + tracer::{TracerExecutionStopReason, VmExecutionStopReason}, + ExecutionResult, Halt, TxRevertReason, VmExecutionMode, VmRevertReason, + }, + vm_refunds_enhancement::{ + bootloader_state::BootloaderState, + constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}, + old_vm::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + utils::{vm_may_have_ended_inner, VmExecutionResult}, + }, + tracers::utils::{get_vm_hook_params, read_pointer, VmHook}, + types::internals::ZkSyncVmState, + VmTracer, + }, }; -use crate::vm_refunds_enhancement::tracers::utils::{get_vm_hook_params, read_pointer, VmHook}; - -use crate::vm_refunds_enhancement::constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}; -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::VmTracer; #[derive(Debug, Clone)] enum Result { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/traits.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/traits.rs index 13b295b9fe9..b54819148fa 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/traits.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/traits.rs @@ -1,11 +1,16 @@ -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::tracer::{TracerExecutionStatus, VmExecutionStopReason}; use zksync_state::WriteStorage; -use crate::vm_refunds_enhancement::bootloader_state::BootloaderState; -use crate::vm_refunds_enhancement::old_vm::history_recorder::HistoryMode; -use crate::vm_refunds_enhancement::old_vm::memory::SimpleMemory; -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; +use crate::{ + interface::{ + dyn_tracers::vm_1_3_3::DynTracer, + tracer::{TracerExecutionStatus, VmExecutionStopReason}, + }, + vm_refunds_enhancement::{ + bootloader_state::BootloaderState, + old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, + types::internals::ZkSyncVmState, + }, +}; pub type TracerPointer = Box>; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs index 3026afea007..8de2ad181f4 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tracers/utils.rs @@ -5,7 +5,6 @@ use zk_evm_1_3_3::{ FarCallABI, FarCallForwardPageType, FatPointer, LogOpcode, Opcode, UMAOpcode, }, }; - use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, @@ -13,13 +12,15 @@ use zksync_system_constants::{ use zksync_types::U256; use zksync_utils::u256_to_h256; -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, VM_HOOK_PARAMS_COUNT, VM_HOOK_PARAMS_START_POSITION, VM_HOOK_POSITION, -}; -use crate::vm_refunds_enhancement::old_vm::{ - history_recorder::HistoryMode, - memory::SimpleMemory, - utils::{aux_heap_page_from_base, heap_page_from_base}, +use crate::vm_refunds_enhancement::{ + constants::{ + BOOTLOADER_HEAP_PAGE, VM_HOOK_PARAMS_COUNT, VM_HOOK_PARAMS_START_POSITION, VM_HOOK_POSITION, + }, + old_vm::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + utils::{aux_heap_page_from_base, heap_page_from_base}, + }, }; #[derive(Clone, Debug, Copy)] diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs index 1ad2ce0f977..4b70a79fdd4 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs @@ -1,15 +1,15 @@ use std::convert::TryInto; -use zksync_types::ethabi::{encode, Address, Token}; -use zksync_types::fee::{encoding_len, Fee}; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::{PaymasterParams, TransactionRequest}; + use zksync_types::{ - l2::TransactionType, Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, - Nonce, Transaction, H256, U256, + ethabi::{encode, Address, Token}, + fee::{encoding_len, Fee}, + l1::is_l1_tx_type, + l2::{L2Tx, TransactionType}, + transaction_request::{PaymasterParams, TransactionRequest}, + Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, + U256, }; -use zksync_utils::address_to_h256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; use crate::vm_refunds_enhancement::utils::overhead::{ get_amortized_overhead, OverheadCoefficients, @@ -305,9 +305,10 @@ impl TryInto for TransactionData { #[cfg(test)] mod tests { - use super::*; use zksync_types::fee::encoding_len; + use super::*; + #[test] fn test_consistency_with_encoding_length() { let transaction = TransactionData { diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/vm_state.rs index b656cd09f9b..adeef89466f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/vm_state.rs @@ -1,34 +1,40 @@ use zk_evm_1_3_3::{ - aux_structures::MemoryPage, - aux_structures::Timestamp, + aux_structures::{MemoryPage, Timestamp}, block_properties::BlockProperties, vm_state::{CallStackEntry, PrimitiveValue, VmState}, witness_trace::DummyTracer, zkevm_opcode_defs::{ system_params::{BOOTLOADER_MAX_MEMORY, INITIAL_FRAME_FORMAL_EH_LOCATION}, - FatPointer, BOOTLOADER_CALLDATA_PAGE, + FatPointer, BOOTLOADER_BASE_PAGE, BOOTLOADER_CALLDATA_PAGE, BOOTLOADER_CODE_PAGE, + STARTING_BASE_PAGE, STARTING_TIMESTAMP, }, }; - -use crate::interface::{L1BatchEnv, L2Block, SystemEnv}; -use zk_evm_1_3_3::zkevm_opcode_defs::{ - BOOTLOADER_BASE_PAGE, BOOTLOADER_CODE_PAGE, STARTING_BASE_PAGE, STARTING_TIMESTAMP, -}; use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::{zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, MiniblockNumber}; +use zksync_types::{ + block::legacy_miniblock_hash, zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, + MiniblockNumber, +}; use zksync_utils::h256_to_u256; -use crate::vm_refunds_enhancement::bootloader_state::BootloaderState; -use crate::vm_refunds_enhancement::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_refunds_enhancement::old_vm::{ - event_sink::InMemoryEventSink, history_recorder::HistoryMode, memory::SimpleMemory, - oracles::decommitter::DecommitterOracle, oracles::precompile::PrecompilesProcessorWithHistory, +use crate::{ + interface::{L1BatchEnv, L2Block, SystemEnv}, + vm_refunds_enhancement::{ + bootloader_state::BootloaderState, + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{ + event_sink::InMemoryEventSink, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, + }, + }, + oracles::storage::StorageOracle, + types::l1_batch::bootloader_initial_memory, + utils::l2_blocks::{assert_next_block, load_last_l2_block}, + }, }; -use crate::vm_refunds_enhancement::oracles::storage::StorageOracle; -use crate::vm_refunds_enhancement::types::l1_batch::bootloader_initial_memory; -use crate::vm_refunds_enhancement::utils::l2_blocks::{assert_next_block, load_last_l2_block}; pub type ZkSyncVmState = VmState< StorageOracle, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/l1_batch.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/l1_batch.rs index 631f1436cc3..6f16e95f8d7 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/l1_batch.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/l1_batch.rs @@ -1,7 +1,8 @@ -use crate::interface::L1BatchEnv; use zksync_types::U256; use zksync_utils::{address_to_u256, h256_to_u256}; +use crate::interface::L1BatchEnv; + const OPERATOR_ADDRESS_SLOT: usize = 0; const PREV_BLOCK_HASH_SLOT: usize = 1; const NEW_BLOCK_TIMESTAMP_SLOT: usize = 2; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/l2_blocks.rs index 3d5f58094e0..5dd26c4c027 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/l2_blocks.rs @@ -1,15 +1,17 @@ -use crate::interface::{L2Block, L2BlockEnv}; use zksync_state::{ReadStorage, StoragePtr}; use zksync_system_constants::{ SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; -use zksync_types::block::unpack_block_info; -use zksync_types::web3::signing::keccak256; -use zksync_types::{AccountTreeId, MiniblockNumber, StorageKey, H256, U256}; +use zksync_types::{ + block::unpack_block_info, web3::signing::keccak256, AccountTreeId, MiniblockNumber, StorageKey, + H256, U256, +}; use zksync_utils::{h256_to_u256, u256_to_h256}; +use crate::interface::{L2Block, L2BlockEnv}; + pub(crate) fn get_l2_block_hash_key(block_number: u32) -> StorageKey { let position = h256_to_u256(SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION) + U256::from(block_number % SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs index 6c56515cfd7..ab5149a050f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/overhead.rs @@ -1,12 +1,12 @@ -use crate::vm_refunds_enhancement::constants::{ - BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, -}; use zk_evm_1_3_3::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_system_constants::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::U256; +use zksync_types::{l1::is_l1_tx_type, U256}; use zksync_utils::ceil_div_u256; +use crate::vm_refunds_enhancement::constants::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, +}; + /// Derives the overhead for processing transactions in a block. pub fn derive_overhead( gas_limit: u32, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/transaction_encoding.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/transaction_encoding.rs index ab1352c2c75..56052eca813 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/transaction_encoding.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/utils/transaction_encoding.rs @@ -1,6 +1,7 @@ -use crate::vm_refunds_enhancement::types::internals::TransactionData; use zksync_types::Transaction; +use crate::vm_refunds_enhancement::types::internals::TransactionData; + /// Extension for transactions, specific for VM. Required for bypassing the orphan rule pub trait TransactionVmExt { /// Get the size of the transaction in tokens. diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 11eea1206a8..678a467d447 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,20 +1,22 @@ -use crate::HistoryMode; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::l2_to_l1_log::UserL2ToL1Log; -use zksync_types::Transaction; +use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_refunds_enhancement::old_vm::events::merge_events; - -use crate::interface::{ - BootloaderMemory, CurrentExecutionState, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, +use crate::{ + interface::{ + BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + vm_latest::HistoryEnabled, + vm_refunds_enhancement::{ + bootloader_state::BootloaderState, + old_vm::events::merge_events, + tracers::dispatcher::TracerDispatcher, + types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, + }, + HistoryMode, }; -use crate::interface::{BytecodeCompressionError, VmMemoryMetrics}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_refunds_enhancement::bootloader_state::BootloaderState; -use crate::vm_refunds_enhancement::tracers::dispatcher::TracerDispatcher; -use crate::vm_refunds_enhancement::types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}; /// Main entry point for Virtual Machine integration. /// The instance should process only one l1 batch diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/l2_block.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/l2_block.rs index fac7cb33d21..8ce851d6699 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/l2_block.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/l2_block.rs @@ -1,11 +1,15 @@ use std::cmp::Ordering; + use zksync_types::{MiniblockNumber, H256}; use zksync_utils::concat_and_hash; -use crate::interface::{L2Block, L2BlockEnv}; -use crate::vm_virtual_blocks::bootloader_state::snapshot::L2BlockSnapshot; -use crate::vm_virtual_blocks::bootloader_state::tx::BootloaderTx; -use crate::vm_virtual_blocks::utils::l2_blocks::l2_block_hash; +use crate::{ + interface::{L2Block, L2BlockEnv}, + vm_virtual_blocks::{ + bootloader_state::{snapshot::L2BlockSnapshot, tx::BootloaderTx}, + utils::l2_blocks::l2_block_hash, + }, +}; const EMPTY_TXS_ROLLING_HASH: H256 = H256::zero(); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs index 2d67121e89b..685b1821fd5 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs @@ -1,19 +1,22 @@ -use crate::vm_virtual_blocks::bootloader_state::{ - l2_block::BootloaderL2Block, - snapshot::BootloaderStateSnapshot, - utils::{apply_l2_block, apply_tx_to_memory}, -}; use std::cmp::Ordering; + use zksync_types::{L2ChainId, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}; -use crate::vm_virtual_blocks::{ - constants::TX_DESCRIPTION_OFFSET, types::internals::TransactionData, - utils::l2_blocks::assert_next_block, -}; - use super::tx::BootloaderTx; +use crate::{ + interface::{BootloaderMemory, L2BlockEnv, TxExecutionMode}, + vm_virtual_blocks::{ + bootloader_state::{ + l2_block::BootloaderL2Block, + snapshot::BootloaderStateSnapshot, + utils::{apply_l2_block, apply_tx_to_memory}, + }, + constants::TX_DESCRIPTION_OFFSET, + types::internals::TransactionData, + utils::l2_blocks::assert_next_block, + }, +}; /// Intermediate bootloader-related VM state. /// /// Required to process transactions one by one (since we intercept the VM execution to execute diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs index 3b53c918fda..067d62a9fdd 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/tx.rs @@ -1,7 +1,8 @@ -use crate::vm_virtual_blocks::types::internals::TransactionData; use zksync_types::{L2ChainId, H256, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; +use crate::vm_virtual_blocks::types::internals::TransactionData; + /// Information about tx necessary for execution in bootloader. #[derive(Debug, Clone)] pub(super) struct BootloaderTx { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 6e836ad201d..a3986d6fe46 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -1,16 +1,19 @@ use zksync_types::U256; -use zksync_utils::bytecode::CompressedBytecodeInfo; -use zksync_utils::{bytes_to_be_words, h256_to_u256}; - -use crate::interface::{BootloaderMemory, TxExecutionMode}; -use crate::vm_virtual_blocks::bootloader_state::l2_block::BootloaderL2Block; -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, COMPRESSED_BYTECODES_OFFSET, - OPERATOR_REFUNDS_OFFSET, TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, -}; +use zksync_utils::{bytecode::CompressedBytecodeInfo, bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; +use crate::{ + interface::{BootloaderMemory, TxExecutionMode}, + vm_virtual_blocks::{ + bootloader_state::l2_block::BootloaderL2Block, + constants::{ + BOOTLOADER_TX_DESCRIPTION_OFFSET, BOOTLOADER_TX_DESCRIPTION_SIZE, + COMPRESSED_BYTECODES_OFFSET, OPERATOR_REFUNDS_OFFSET, TX_DESCRIPTION_OFFSET, + TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, + TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, + }, + }, +}; pub(super) fn get_memory_for_compressed_bytecodes( compressed_bytecodes: &[CompressedBytecodeInfo], diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/constants.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/constants.rs index 5535be90381..c03260f1b6d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/constants.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/constants.rs @@ -1,14 +1,12 @@ use zk_evm_1_3_3::aux_structures::MemoryPage; - +pub use zk_evm_1_3_3::zkevm_opcode_defs::system_params::{ + ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, MAX_PUBDATA_PER_BLOCK, +}; use zksync_system_constants::{ L1_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, MAX_TXS_IN_BLOCK, USED_BOOTLOADER_MEMORY_WORDS, }; -pub use zk_evm_1_3_3::zkevm_opcode_defs::system_params::{ - ERGS_PER_CIRCUIT, INITIAL_STORAGE_WRITE_PUBDATA_BYTES, MAX_PUBDATA_PER_BLOCK, -}; - use crate::vm_virtual_blocks::old_vm::utils::heap_page_from_base; /// Max cycles for a single transaction. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs index 2ae53a48ef3..570581740ef 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/bytecode.rs @@ -1,13 +1,12 @@ use itertools::Itertools; - -use crate::interface::VmInterface; -use crate::HistoryMode; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::U256; -use zksync_utils::bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}; -use zksync_utils::bytes_to_be_words; +use zksync_utils::{ + bytecode::{compress_bytecode, hash_bytecode, CompressedBytecodeInfo}, + bytes_to_be_words, +}; -use crate::vm_virtual_blocks::Vm; +use crate::{interface::VmInterface, vm_virtual_blocks::Vm, HistoryMode}; impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index ac95312019d..2938280d266 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -1,16 +1,22 @@ -use crate::interface::tracer::{TracerExecutionStopReason, VmExecutionStopReason}; -use crate::interface::{VmExecutionMode, VmExecutionResultAndLogs}; -use crate::HistoryMode; use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; -use crate::vm_virtual_blocks::old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}; -use crate::vm_virtual_blocks::tracers::dispatcher::TracerDispatcher; -use crate::vm_virtual_blocks::tracers::{ - traits::{ExecutionEndTracer, VmTracer}, - DefaultExecutionTracer, RefundsTracer, +use crate::{ + interface::{ + tracer::{TracerExecutionStopReason, VmExecutionStopReason}, + VmExecutionMode, VmExecutionResultAndLogs, + }, + vm_virtual_blocks::{ + old_vm::utils::{vm_may_have_ended_inner, VmExecutionResult}, + tracers::{ + dispatcher::TracerDispatcher, + traits::{ExecutionEndTracer, VmTracer}, + DefaultExecutionTracer, RefundsTracer, + }, + vm::Vm, + }, + HistoryMode, }; -use crate::vm_virtual_blocks::vm::Vm; impl Vm { pub(crate) fn inspect_inner( diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/gas.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/gas.rs index 1f06ecb0827..0ca52d2b687 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/gas.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/gas.rs @@ -1,8 +1,9 @@ -use crate::HistoryMode; use zksync_state::WriteStorage; -use crate::vm_virtual_blocks::tracers::DefaultExecutionTracer; -use crate::vm_virtual_blocks::vm::Vm; +use crate::{ + vm_virtual_blocks::{tracers::DefaultExecutionTracer, vm::Vm}, + HistoryMode, +}; impl Vm { /// Returns the amount of gas remaining to the VM. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs index a32f3a16572..0d407efd041 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/logs.rs @@ -1,14 +1,18 @@ use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; +use zksync_types::{ + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + VmEvent, +}; -use crate::interface::types::outputs::VmExecutionLogs; -use crate::HistoryMode; -use zksync_types::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; -use zksync_types::VmEvent; - -use crate::vm_virtual_blocks::old_vm::events::merge_events; -use crate::vm_virtual_blocks::old_vm::utils::precompile_calls_count_after_timestamp; -use crate::vm_virtual_blocks::vm::Vm; +use crate::{ + interface::types::outputs::VmExecutionLogs, + vm_virtual_blocks::{ + old_vm::{events::merge_events, utils::precompile_calls_count_after_timestamp}, + vm::Vm, + }, + HistoryMode, +}; impl Vm { pub(crate) fn collect_execution_logs_after_timestamp( diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/snapshots.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/snapshots.rs index 1a8ad6fefd2..569e1115039 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/snapshots.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/snapshots.rs @@ -1,13 +1,12 @@ -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; - use std::time::Duration; -use crate::vm_latest::HistoryEnabled; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; -use crate::vm_virtual_blocks::{ - old_vm::oracles::OracleWithHistory, types::internals::VmSnapshot, vm::Vm, +use crate::{ + vm_latest::HistoryEnabled, + vm_virtual_blocks::{old_vm::oracles::OracleWithHistory, types::internals::VmSnapshot, vm::Vm}, }; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index dd4a5ad55b2..074e8dae56e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -1,12 +1,12 @@ use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; - -use crate::interface::{VmExecutionStatistics, VmMemoryMetrics}; -use crate::HistoryMode; use zksync_types::U256; -use crate::vm_virtual_blocks::tracers::DefaultExecutionTracer; -use crate::vm_virtual_blocks::vm::Vm; +use crate::{ + interface::{VmExecutionStatistics, VmMemoryMetrics}, + vm_virtual_blocks::{tracers::DefaultExecutionTracer, vm::Vm}, + HistoryMode, +}; /// Module responsible for observing the VM behavior, i.e. calculating the statistics of the VM runs /// or reporting the VM memory usage. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/tx.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/tx.rs index bfeeb56e022..72a7dbc65de 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/tx.rs @@ -1,15 +1,16 @@ -use crate::vm_virtual_blocks::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_virtual_blocks::implementation::bytecode::{ - bytecode_to_factory_dep, compress_bytecodes, -}; -use crate::HistoryMode; use zk_evm_1_3_3::aux_structures::Timestamp; use zksync_state::WriteStorage; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::Transaction; +use zksync_types::{l1::is_l1_tx_type, Transaction}; -use crate::vm_virtual_blocks::types::internals::TransactionData; -use crate::vm_virtual_blocks::vm::Vm; +use crate::{ + vm_virtual_blocks::{ + constants::BOOTLOADER_HEAP_PAGE, + implementation::bytecode::{bytecode_to_factory_dep, compress_bytecodes}, + types::internals::TransactionData, + vm::Vm, + }, + HistoryMode, +}; impl Vm { pub(crate) fn push_raw_transaction( diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/mod.rs index 3a7a96e729d..1500e7027b7 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/mod.rs @@ -1,30 +1,24 @@ -pub use old_vm::{ - history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, - memory::SimpleMemory, - oracles::storage::StorageOracle, +pub use self::{ + bootloader_state::BootloaderState, + old_vm::{ + history_recorder::{HistoryDisabled, HistoryEnabled, HistoryMode}, + memory::SimpleMemory, + oracles::storage::StorageOracle, + }, + tracers::{ + dispatcher::TracerDispatcher, + traits::{ExecutionEndTracer, ExecutionProcessing, TracerPointer, VmTracer}, + }, + types::internals::ZkSyncVmState, + utils::transaction_encoding::TransactionVmExt, + vm::Vm, }; -pub use tracers::{ - dispatcher::TracerDispatcher, - traits::{ExecutionEndTracer, ExecutionProcessing, TracerPointer, VmTracer}, -}; - -pub use types::internals::ZkSyncVmState; -pub use utils::transaction_encoding::TransactionVmExt; - -pub use bootloader_state::BootloaderState; - -pub use vm::Vm; - mod bootloader_state; +pub mod constants; mod implementation; mod old_vm; pub(crate) mod tracers; mod types; -mod vm; - -pub mod constants; pub mod utils; - -// #[cfg(test)] -// mod tests; +mod vm; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs index 49ec162fd5e..02938594b5c 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/event_sink.rs @@ -1,8 +1,5 @@ -use crate::vm_virtual_blocks::old_vm::{ - history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, - oracles::OracleWithHistory, -}; use std::collections::HashMap; + use zk_evm_1_3_3::{ abstractions::EventSink, aux_structures::{LogQuery, Timestamp}, @@ -12,6 +9,11 @@ use zk_evm_1_3_3::{ }, }; +use crate::vm_virtual_blocks::old_vm::{ + history_recorder::{AppDataFrameManagerWithHistory, HistoryEnabled, HistoryMode}, + oracles::OracleWithHistory, +}; + #[derive(Debug, Clone, PartialEq, Default)] pub struct InMemoryEventSink { frames_stack: AppDataFrameManagerWithHistory, H>, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/history_recorder.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/history_recorder.rs index a38ee177245..ca02739032c 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/history_recorder.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/history_recorder.rs @@ -5,7 +5,6 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::{self}, }; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::{StorageKey, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; @@ -767,11 +766,14 @@ impl HistoryRecorder, H> { #[cfg(test)] mod tests { - use crate::vm_virtual_blocks::old_vm::history_recorder::{HistoryRecorder, MemoryWrapper}; - use crate::vm_virtual_blocks::HistoryDisabled; use zk_evm_1_3_3::{aux_structures::Timestamp, vm_state::PrimitiveValue}; use zksync_types::U256; + use crate::vm_virtual_blocks::{ + old_vm::history_recorder::{HistoryRecorder, MemoryWrapper}, + HistoryDisabled, + }; + #[test] fn memory_equality() { let mut a: HistoryRecorder = Default::default(); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/memory.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/memory.rs index f1a424c36ae..c78f8a9e779 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/memory.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/memory.rs @@ -1,16 +1,18 @@ -use zk_evm_1_3_3::abstractions::{Memory, MemoryType}; -use zk_evm_1_3_3::aux_structures::{MemoryPage, MemoryQuery, Timestamp}; -use zk_evm_1_3_3::vm_state::PrimitiveValue; -use zk_evm_1_3_3::zkevm_opcode_defs::FatPointer; +use zk_evm_1_3_3::{ + abstractions::{Memory, MemoryType}, + aux_structures::{MemoryPage, MemoryQuery, Timestamp}, + vm_state::PrimitiveValue, + zkevm_opcode_defs::FatPointer, +}; use zksync_types::U256; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, - MemoryWrapper, WithHistory, -}; -use crate::vm_virtual_blocks::old_vm::oracles::OracleWithHistory; -use crate::vm_virtual_blocks::old_vm::utils::{ - aux_heap_page_from_base, heap_page_from_base, stack_page_from_base, +use crate::vm_virtual_blocks::old_vm::{ + history_recorder::{ + FramedStack, HistoryEnabled, HistoryMode, IntFrameManagerWithHistory, MemoryWithHistory, + MemoryWrapper, WithHistory, + }, + oracles::OracleWithHistory, + utils::{aux_heap_page_from_base, heap_page_from_base, stack_page_from_base}, }; #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs index 12c3ffd403d..061912f83c9 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/decommitter.rs @@ -1,23 +1,19 @@ -use std::collections::HashMap; -use std::fmt::Debug; +use std::{collections::HashMap, fmt::Debug}; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, -}; - -use zk_evm_1_3_3::abstractions::MemoryType; -use zk_evm_1_3_3::aux_structures::Timestamp; use zk_evm_1_3_3::{ - abstractions::{DecommittmentProcessor, Memory}, - aux_structures::{DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery}, + abstractions::{DecommittmentProcessor, Memory, MemoryType}, + aux_structures::{ + DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, + }, }; - use zksync_state::{ReadStorage, StoragePtr}; use zksync_types::U256; -use zksync_utils::bytecode::bytecode_len_in_words; -use zksync_utils::{bytes_to_be_words, u256_to_h256}; +use zksync_utils::{bytecode::bytecode_len_in_words, bytes_to_be_words, u256_to_h256}; use super::OracleWithHistory; +use crate::vm_virtual_blocks::old_vm::history_recorder::{ + HistoryEnabled, HistoryMode, HistoryRecorder, WithHistory, +}; /// The main job of the DecommiterOracle is to implement the DecommittmentProcessor trait - that is /// used by the VM to 'load' bytecodes into memory. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/precompile.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/precompile.rs index 11ddb26d03a..8fd77ef7f87 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/precompile.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/precompile.rs @@ -1,17 +1,14 @@ use zk_evm_1_3_3::{ - abstractions::Memory, - abstractions::PrecompileCyclesWitness, - abstractions::PrecompilesProcessor, + abstractions::{Memory, PrecompileCyclesWitness, PrecompilesProcessor}, aux_structures::{LogQuery, MemoryQuery, Timestamp}, precompiles::DefaultPrecompilesProcessor, }; +use super::OracleWithHistory; use crate::vm_virtual_blocks::old_vm::history_recorder::{ HistoryEnabled, HistoryMode, HistoryRecorder, }; -use super::OracleWithHistory; - /// Wrap of DefaultPrecompilesProcessor that store queue /// of timestamp when precompiles are called to be executed. /// Number of precompiles per block is strictly limited, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs index 70186b78b32..91c293f4ac8 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/oracles/storage.rs @@ -1,26 +1,22 @@ use std::collections::HashMap; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, - HistoryRecorder, StorageWrapper, WithHistory, -}; - -use zk_evm_1_3_3::abstractions::RefundedAmounts; -use zk_evm_1_3_3::zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; use zk_evm_1_3_3::{ - abstractions::{RefundType, Storage as VmStorageOracle}, + abstractions::{RefundType, RefundedAmounts, Storage as VmStorageOracle}, aux_structures::{LogQuery, Timestamp}, + zkevm_opcode_defs::system_params::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; - use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_eth_balance; use zksync_types::{ - AccountTreeId, Address, StorageKey, StorageLogQuery, StorageLogQueryType, BOOTLOADER_ADDRESS, - U256, + utils::storage_key_for_eth_balance, AccountTreeId, Address, StorageKey, StorageLogQuery, + StorageLogQueryType, BOOTLOADER_ADDRESS, U256, }; use zksync_utils::u256_to_h256; use super::OracleWithHistory; +use crate::vm_virtual_blocks::old_vm::history_recorder::{ + AppDataFrameManagerWithHistory, HashMapHistoryEvent, HistoryEnabled, HistoryMode, + HistoryRecorder, StorageWrapper, WithHistory, +}; // While the storage does not support different shards, it was decided to write the // code of the StorageOracle with the shard parameters in mind. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/utils.rs index 65497778495..7d38ba1058d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/old_vm/utils.rs @@ -1,22 +1,19 @@ -use crate::vm_virtual_blocks::old_vm::memory::SimpleMemory; - -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; -use crate::vm_virtual_blocks::HistoryMode; - -use zk_evm_1_3_3::zkevm_opcode_defs::decoding::{ - AllowedPcOrImm, EncodingModeProduction, VmEncodingMode, -}; -use zk_evm_1_3_3::zkevm_opcode_defs::RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER; use zk_evm_1_3_3::{ aux_structures::{MemoryPage, Timestamp}, vm_state::PrimitiveValue, - zkevm_opcode_defs::FatPointer, + zkevm_opcode_defs::{ + decoding::{AllowedPcOrImm, EncodingModeProduction, VmEncodingMode}, + FatPointer, RET_IMPLICIT_RETURNDATA_PARAMS_REGISTER, + }, }; use zksync_state::WriteStorage; use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; - use zksync_types::{Address, U256}; +use crate::vm_virtual_blocks::{ + old_vm::memory::SimpleMemory, types::internals::ZkSyncVmState, HistoryMode, +}; + #[derive(Debug, Clone)] pub(crate) enum VmExecutionResult { Ok(Vec), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/default_tracers.rs index f394ab5f752..463bdaa4f35 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/default_tracers.rs @@ -1,33 +1,37 @@ -use std::fmt::{Debug, Formatter}; -use std::marker::PhantomData; +use std::{ + fmt::{Debug, Formatter}, + marker::PhantomData, +}; -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::tracer::VmExecutionStopReason; -use crate::interface::VmExecutionMode; -use zk_evm_1_3_3::witness_trace::DummyTracer; -use zk_evm_1_3_3::zkevm_opcode_defs::{Opcode, RetOpcode}; use zk_evm_1_3_3::{ tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, Tracer, VmLocalStateData, }, vm_state::VmLocalState, + witness_trace::DummyTracer, + zkevm_opcode_defs::{Opcode, RetOpcode}, }; use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::Timestamp; -use crate::vm_virtual_blocks::bootloader_state::utils::apply_l2_block; -use crate::vm_virtual_blocks::bootloader_state::BootloaderState; -use crate::vm_virtual_blocks::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_virtual_blocks::old_vm::history_recorder::HistoryMode; -use crate::vm_virtual_blocks::old_vm::memory::SimpleMemory; -use crate::vm_virtual_blocks::tracers::dispatcher::TracerDispatcher; -use crate::vm_virtual_blocks::tracers::traits::{ExecutionEndTracer, ExecutionProcessing}; -use crate::vm_virtual_blocks::tracers::utils::{ - computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, - print_debug_if_needed, VmHook, +use crate::{ + interface::{dyn_tracers::vm_1_3_3::DynTracer, tracer::VmExecutionStopReason, VmExecutionMode}, + vm_virtual_blocks::{ + bootloader_state::{utils::apply_l2_block, BootloaderState}, + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, + tracers::{ + dispatcher::TracerDispatcher, + traits::{ExecutionEndTracer, ExecutionProcessing}, + utils::{ + computational_gas_price, gas_spent_on_bytecodes_and_long_messages_this_opcode, + print_debug_if_needed, VmHook, + }, + RefundsTracer, ResultTracer, + }, + types::internals::ZkSyncVmState, + }, }; -use crate::vm_virtual_blocks::tracers::{RefundsTracer, ResultTracer}; -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. pub(crate) struct DefaultExecutionTracer { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/dispatcher.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/dispatcher.rs index 7eb89461eab..b1b5ef418ee 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/dispatcher.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/dispatcher.rs @@ -1,17 +1,18 @@ -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::tracer::VmExecutionStopReason; -use crate::interface::VmExecutionResultAndLogs; -use crate::vm_virtual_blocks::TracerPointer; -use crate::vm_virtual_blocks::{ - BootloaderState, ExecutionEndTracer, ExecutionProcessing, HistoryMode, SimpleMemory, VmTracer, - ZkSyncVmState, -}; - use zk_evm_1_3_3::tracing::{ AfterDecodingData, AfterExecutionData, BeforeExecutionData, VmLocalStateData, }; use zksync_state::{StoragePtr, WriteStorage}; +use crate::{ + interface::{ + dyn_tracers::vm_1_3_3::DynTracer, tracer::VmExecutionStopReason, VmExecutionResultAndLogs, + }, + vm_virtual_blocks::{ + BootloaderState, ExecutionEndTracer, ExecutionProcessing, HistoryMode, SimpleMemory, + TracerPointer, VmTracer, ZkSyncVmState, + }, +}; + impl From> for TracerDispatcher { fn from(value: TracerPointer) -> Self { Self { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs index 6496e13172a..6051cd7bb7d 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/refunds.rs @@ -1,9 +1,6 @@ -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; - use std::collections::HashMap; -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::{L1BatchEnv, Refunds, VmExecutionResultAndLogs}; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zk_evm_1_3_3::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, @@ -17,23 +14,26 @@ use zksync_types::{ zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, L1BatchNumber, StorageKey, U256, }; -use zksync_utils::bytecode::bytecode_len_in_bytes; -use zksync_utils::{ceil_div_u256, u256_to_h256}; - -use crate::vm_virtual_blocks::bootloader_state::BootloaderState; -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_virtual_blocks::old_vm::{ - events::merge_events, history_recorder::HistoryMode, memory::SimpleMemory, - oracles::storage::storage_key_of_log, utils::eth_price_per_pubdata_byte, -}; -use crate::vm_virtual_blocks::tracers::utils::gas_spent_on_bytecodes_and_long_messages_this_opcode; -use crate::vm_virtual_blocks::tracers::{ - traits::{ExecutionEndTracer, ExecutionProcessing, VmTracer}, - utils::{get_vm_hook_params, VmHook}, +use zksync_utils::{bytecode::bytecode_len_in_bytes, ceil_div_u256, u256_to_h256}; + +use crate::{ + interface::{dyn_tracers::vm_1_3_3::DynTracer, L1BatchEnv, Refunds, VmExecutionResultAndLogs}, + vm_virtual_blocks::{ + bootloader_state::BootloaderState, + constants::{BOOTLOADER_HEAP_PAGE, OPERATOR_REFUNDS_OFFSET, TX_GAS_LIMIT_OFFSET}, + old_vm::{ + events::merge_events, history_recorder::HistoryMode, memory::SimpleMemory, + oracles::storage::storage_key_of_log, utils::eth_price_per_pubdata_byte, + }, + tracers::{ + traits::{ExecutionEndTracer, ExecutionProcessing, VmTracer}, + utils::{ + gas_spent_on_bytecodes_and_long_messages_this_opcode, get_vm_hook_params, VmHook, + }, + }, + types::internals::ZkSyncVmState, + }, }; -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; /// Tracer responsible for collecting information about refunds. #[derive(Debug, Clone)] diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/result_tracer.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/result_tracer.rs index 1f566fea567..8c6a5d1793f 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/result_tracer.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/result_tracer.rs @@ -4,28 +4,28 @@ use zk_evm_1_3_3::{ zkevm_opcode_defs::FatPointer, }; use zksync_state::{StoragePtr, WriteStorage}; - -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::tracer::VmExecutionStopReason; -use crate::interface::{ - ExecutionResult, Halt, TxRevertReason, VmExecutionMode, VmExecutionResultAndLogs, - VmRevertReason, -}; use zksync_types::U256; -use crate::vm_virtual_blocks::bootloader_state::BootloaderState; -use crate::vm_virtual_blocks::old_vm::{ - history_recorder::HistoryMode, - memory::SimpleMemory, - utils::{vm_may_have_ended_inner, VmExecutionResult}, +use crate::{ + interface::{ + dyn_tracers::vm_1_3_3::DynTracer, tracer::VmExecutionStopReason, ExecutionResult, Halt, + TxRevertReason, VmExecutionMode, VmExecutionResultAndLogs, VmRevertReason, + }, + vm_virtual_blocks::{ + bootloader_state::BootloaderState, + constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}, + old_vm::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + utils::{vm_may_have_ended_inner, VmExecutionResult}, + }, + tracers::{ + traits::{ExecutionEndTracer, ExecutionProcessing, VmTracer}, + utils::{get_vm_hook_params, read_pointer, VmHook}, + }, + types::internals::ZkSyncVmState, + }, }; -use crate::vm_virtual_blocks::tracers::{ - traits::{ExecutionEndTracer, ExecutionProcessing, VmTracer}, - utils::{get_vm_hook_params, read_pointer, VmHook}, -}; -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; - -use crate::vm_virtual_blocks::constants::{BOOTLOADER_HEAP_PAGE, RESULT_SUCCESS_FIRST_SLOT}; #[derive(Debug, Clone)] enum Result { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/traits.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/traits.rs index 3045e6f8319..6d8fdab4e66 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/traits.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/traits.rs @@ -1,12 +1,15 @@ -use crate::interface::dyn_tracers::vm_1_3_3::DynTracer; -use crate::interface::tracer::VmExecutionStopReason; -use crate::interface::VmExecutionResultAndLogs; use zksync_state::WriteStorage; -use crate::vm_virtual_blocks::bootloader_state::BootloaderState; -use crate::vm_virtual_blocks::old_vm::history_recorder::HistoryMode; -use crate::vm_virtual_blocks::old_vm::memory::SimpleMemory; -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; +use crate::{ + interface::{ + dyn_tracers::vm_1_3_3::DynTracer, tracer::VmExecutionStopReason, VmExecutionResultAndLogs, + }, + vm_virtual_blocks::{ + bootloader_state::BootloaderState, + old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, + types::internals::ZkSyncVmState, + }, +}; pub type TracerPointer = Box>; /// Run tracer for collecting data during the vm execution cycles diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs index 0ab697f626f..b2358602fe0 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tracers/utils.rs @@ -1,10 +1,10 @@ -use zk_evm_1_3_3::aux_structures::MemoryPage; -use zk_evm_1_3_3::zkevm_opcode_defs::{FarCallABI, FarCallForwardPageType}; use zk_evm_1_3_3::{ + aux_structures::MemoryPage, tracing::{BeforeExecutionData, VmLocalStateData}, - zkevm_opcode_defs::{FatPointer, LogOpcode, Opcode, UMAOpcode}, + zkevm_opcode_defs::{ + FarCallABI, FarCallForwardPageType, FatPointer, LogOpcode, Opcode, UMAOpcode, + }, }; - use zksync_system_constants::{ ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, @@ -12,12 +12,16 @@ use zksync_system_constants::{ use zksync_types::U256; use zksync_utils::u256_to_h256; -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, VM_HOOK_PARAMS_COUNT, VM_HOOK_PARAMS_START_POSITION, VM_HOOK_POSITION, +use crate::vm_virtual_blocks::{ + constants::{ + BOOTLOADER_HEAP_PAGE, VM_HOOK_PARAMS_COUNT, VM_HOOK_PARAMS_START_POSITION, VM_HOOK_POSITION, + }, + old_vm::{ + history_recorder::HistoryMode, + memory::SimpleMemory, + utils::{aux_heap_page_from_base, heap_page_from_base}, + }, }; -use crate::vm_virtual_blocks::old_vm::history_recorder::HistoryMode; -use crate::vm_virtual_blocks::old_vm::memory::SimpleMemory; -use crate::vm_virtual_blocks::old_vm::utils::{aux_heap_page_from_base, heap_page_from_base}; #[derive(Clone, Debug, Copy)] pub(crate) enum VmHook { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs index add3d829d80..6d2fe36868b 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs @@ -1,15 +1,15 @@ use std::convert::TryInto; -use zksync_types::ethabi::{encode, Address, Token}; -use zksync_types::fee::{encoding_len, Fee}; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::{PaymasterParams, TransactionRequest}; + use zksync_types::{ - l2::TransactionType, Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, - Nonce, Transaction, H256, U256, + ethabi::{encode, Address, Token}, + fee::{encoding_len, Fee}, + l1::is_l1_tx_type, + l2::{L2Tx, TransactionType}, + transaction_request::{PaymasterParams, TransactionRequest}, + Bytes, Execute, ExecuteTransactionCommon, L2ChainId, L2TxCommonData, Nonce, Transaction, H256, + U256, }; -use zksync_utils::address_to_h256; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_utils::{address_to_h256, bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; use crate::vm_virtual_blocks::utils::overhead::{get_amortized_overhead, OverheadCoefficients}; @@ -303,9 +303,10 @@ impl TryInto for TransactionData { #[cfg(test)] mod tests { - use super::*; use zksync_types::fee::encoding_len; + use super::*; + #[test] fn test_consistency_with_encoding_length() { let transaction = TransactionData { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/vm_state.rs index 8784c754fad..5d67982e7b4 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/vm_state.rs @@ -1,34 +1,40 @@ use zk_evm_1_3_3::{ - aux_structures::MemoryPage, - aux_structures::Timestamp, + aux_structures::{MemoryPage, Timestamp}, block_properties::BlockProperties, vm_state::{CallStackEntry, PrimitiveValue, VmState}, witness_trace::DummyTracer, zkevm_opcode_defs::{ system_params::{BOOTLOADER_MAX_MEMORY, INITIAL_FRAME_FORMAL_EH_LOCATION}, - FatPointer, BOOTLOADER_CALLDATA_PAGE, + FatPointer, BOOTLOADER_BASE_PAGE, BOOTLOADER_CALLDATA_PAGE, BOOTLOADER_CODE_PAGE, + STARTING_BASE_PAGE, STARTING_TIMESTAMP, }, }; - -use crate::interface::{L1BatchEnv, L2Block, SystemEnv}; -use zk_evm_1_3_3::zkevm_opcode_defs::{ - BOOTLOADER_BASE_PAGE, BOOTLOADER_CODE_PAGE, STARTING_BASE_PAGE, STARTING_TIMESTAMP, -}; use zksync_state::{StoragePtr, WriteStorage}; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::{zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, MiniblockNumber}; +use zksync_types::{ + block::legacy_miniblock_hash, zkevm_test_harness::INITIAL_MONOTONIC_CYCLE_COUNTER, Address, + MiniblockNumber, +}; use zksync_utils::h256_to_u256; -use crate::vm_virtual_blocks::bootloader_state::BootloaderState; -use crate::vm_virtual_blocks::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_virtual_blocks::old_vm::{ - event_sink::InMemoryEventSink, history_recorder::HistoryMode, memory::SimpleMemory, - oracles::decommitter::DecommitterOracle, oracles::precompile::PrecompilesProcessorWithHistory, - oracles::storage::StorageOracle, +use crate::{ + interface::{L1BatchEnv, L2Block, SystemEnv}, + vm_virtual_blocks::{ + bootloader_state::BootloaderState, + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{ + event_sink::InMemoryEventSink, + history_recorder::HistoryMode, + memory::SimpleMemory, + oracles::{ + decommitter::DecommitterOracle, precompile::PrecompilesProcessorWithHistory, + storage::StorageOracle, + }, + }, + types::l1_batch_env::bootloader_initial_memory, + utils::l2_blocks::{assert_next_block, load_last_l2_block}, + }, }; -use crate::vm_virtual_blocks::types::l1_batch_env::bootloader_initial_memory; -use crate::vm_virtual_blocks::utils::l2_blocks::{assert_next_block, load_last_l2_block}; pub type ZkSyncVmState = VmState< StorageOracle, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/l1_batch_env.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/l1_batch_env.rs index 8af706954ed..0e43863b196 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/l1_batch_env.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/l1_batch_env.rs @@ -1,7 +1,8 @@ -use crate::interface::L1BatchEnv; use zksync_types::U256; use zksync_utils::{address_to_u256, h256_to_u256}; +use crate::interface::L1BatchEnv; + const OPERATOR_ADDRESS_SLOT: usize = 0; const PREV_BLOCK_HASH_SLOT: usize = 1; const NEW_BLOCK_TIMESTAMP_SLOT: usize = 2; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/l2_blocks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/l2_blocks.rs index 3d5f58094e0..5dd26c4c027 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/l2_blocks.rs @@ -1,15 +1,17 @@ -use crate::interface::{L2Block, L2BlockEnv}; use zksync_state::{ReadStorage, StoragePtr}; use zksync_system_constants::{ SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES, }; -use zksync_types::block::unpack_block_info; -use zksync_types::web3::signing::keccak256; -use zksync_types::{AccountTreeId, MiniblockNumber, StorageKey, H256, U256}; +use zksync_types::{ + block::unpack_block_info, web3::signing::keccak256, AccountTreeId, MiniblockNumber, StorageKey, + H256, U256, +}; use zksync_utils::{h256_to_u256, u256_to_h256}; +use crate::interface::{L2Block, L2BlockEnv}; + pub(crate) fn get_l2_block_hash_key(block_number: u32) -> StorageKey { let position = h256_to_u256(SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION) + U256::from(block_number % SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs index c17d619b464..79c52ac373b 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/overhead.rs @@ -1,12 +1,12 @@ -use crate::vm_virtual_blocks::constants::{ - BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, -}; use zk_evm_1_3_3::zkevm_opcode_defs::system_params::MAX_TX_ERGS_LIMIT; use zksync_system_constants::{MAX_L2_TX_GAS_LIMIT, MAX_TXS_IN_BLOCK}; -use zksync_types::l1::is_l1_tx_type; -use zksync_types::U256; +use zksync_types::{l1::is_l1_tx_type, U256}; use zksync_utils::ceil_div_u256; +use crate::vm_virtual_blocks::constants::{ + BLOCK_OVERHEAD_GAS, BLOCK_OVERHEAD_PUBDATA, BOOTLOADER_TX_ENCODING_SPACE, +}; + /// Derives the overhead for processing transactions in a block. pub fn derive_overhead( gas_limit: u32, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/transaction_encoding.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/transaction_encoding.rs index b45ec4d1411..5f9c37cbb73 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/utils/transaction_encoding.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/utils/transaction_encoding.rs @@ -1,6 +1,7 @@ -use crate::vm_virtual_blocks::types::internals::TransactionData; use zksync_types::Transaction; +use crate::vm_virtual_blocks::types::internals::TransactionData; + /// Extension for transactions, specific for VM. Required for bypassing the orphan rule pub trait TransactionVmExt { /// Get the size of the transaction in tokens. diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 4110825a260..ed05e951475 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,21 +1,22 @@ -use crate::interface::{ - BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, -}; -use crate::vm_latest::HistoryEnabled; -use crate::HistoryMode; use zksync_state::{StoragePtr, WriteStorage}; -use zksync_types::l2_to_l1_log::UserL2ToL1Log; -use zksync_types::Transaction; +use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::vm_virtual_blocks::old_vm::events::merge_events; - -use crate::vm_virtual_blocks::bootloader_state::BootloaderState; -use crate::vm_virtual_blocks::tracers::dispatcher::TracerDispatcher; - -use crate::vm_virtual_blocks::types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}; +use crate::{ + interface::{ + BootloaderMemory, BytecodeCompressionError, CurrentExecutionState, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + vm_latest::HistoryEnabled, + vm_virtual_blocks::{ + bootloader_state::BootloaderState, + old_vm::events::merge_events, + tracers::dispatcher::TracerDispatcher, + types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, + }, + HistoryMode, +}; /// Main entry point for Virtual Machine integration. /// The instance should process only one l1 batch diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 6b90da4bd3b..6716aeaf146 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,15 +1,16 @@ -use crate::interface::{ - BootloaderMemory, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, -}; - use zksync_state::{StoragePtr, WriteStorage}; use zksync_types::VmVersion; use zksync_utils::bytecode::CompressedBytecodeInfo; -use crate::glue::history_mode::HistoryMode; -use crate::tracers::TracerDispatcher; +use crate::{ + glue::history_mode::HistoryMode, + interface::{ + BootloaderMemory, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, + }, + tracers::TracerDispatcher, +}; #[derive(Debug)] pub enum VmInstance { diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index c248fb76595..6f589e83630 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -1,8 +1,8 @@ +use std::fmt::Debug; + use async_trait::async_trait; use tokio::{fs, io}; -use std::fmt::Debug; - use crate::raw::{Bucket, ObjectStore, ObjectStoreError}; impl From for ObjectStoreError { diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs index d01fb833b12..1d88aa5237a 100644 --- a/core/lib/object_store/src/gcs.rs +++ b/core/lib/object_store/src/gcs.rs @@ -1,21 +1,23 @@ //! GCS-based [`ObjectStore`] implementation. +use std::{fmt, future::Future, time::Duration}; + use async_trait::async_trait; use google_cloud_auth::{credentials::CredentialsFile, error::Error}; use google_cloud_storage::{ client::{Client, ClientConfig}, - http::objects::{ - delete::DeleteObjectRequest, - download::Range, - get::GetObjectRequest, - upload::{Media, UploadObjectRequest, UploadType}, + http::{ + objects::{ + delete::DeleteObjectRequest, + download::Range, + get::GetObjectRequest, + upload::{Media, UploadObjectRequest, UploadType}, + }, + Error as HttpError, }, - http::Error as HttpError, }; use http::StatusCode; -use std::{fmt, future::Future, time::Duration}; - use crate::{ metrics::GCS_METRICS, raw::{Bucket, ObjectStore, ObjectStoreError}, diff --git a/core/lib/object_store/src/metrics.rs b/core/lib/object_store/src/metrics.rs index 9cd51ba3ed7..f372b5bac1c 100644 --- a/core/lib/object_store/src/metrics.rs +++ b/core/lib/object_store/src/metrics.rs @@ -1,9 +1,9 @@ //! Metrics for the object storage. -use vise::{Buckets, Histogram, LabeledFamily, LatencyObserver, Metrics}; - use std::time::Duration; +use vise::{Buckets, Histogram, LabeledFamily, LatencyObserver, Metrics}; + use crate::Bucket; #[derive(Debug, Metrics)] diff --git a/core/lib/object_store/src/mock.rs b/core/lib/object_store/src/mock.rs index 727ef1e8d53..ac1a2fd7a44 100644 --- a/core/lib/object_store/src/mock.rs +++ b/core/lib/object_store/src/mock.rs @@ -1,10 +1,10 @@ //! Mock implementation of [`ObjectStore`]. +use std::collections::HashMap; + use async_trait::async_trait; use tokio::sync::Mutex; -use std::collections::HashMap; - use crate::raw::{Bucket, ObjectStore, ObjectStoreError}; type BucketMap = HashMap>; diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index e5ee186676e..35808bb4686 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -1,15 +1,17 @@ //! Stored objects. -use zksync_types::aggregated_operations::L1BatchProofForL1; use zksync_types::{ + aggregated_operations::L1BatchProofForL1, proofs::{AggregationRound, PrepareBasicCircuitsJob}, storage::witness_block_state::WitnessBlockState, zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, encodings::{recursion_request::RecursionRequest, QueueSimulator}, - witness::full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, - witness::oracle::VmWitnessOracle, + witness::{ + full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, + oracle::VmWitnessOracle, + }, LeafAggregationOutputDataWitness, NodeAggregationOutputDataWitness, SchedulerCircuitInstanceWitness, }, diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index bf318a61610..c68b4cb978f 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -1,10 +1,10 @@ -use async_trait::async_trait; - use std::{error, fmt, sync::Arc}; -use crate::{file::FileBackedObjectStore, gcs::GoogleCloudStorage, mock::MockStore}; +use async_trait::async_trait; use zksync_config::configs::object_store::{ObjectStoreConfig, ObjectStoreMode}; +use crate::{file::FileBackedObjectStore, gcs::GoogleCloudStorage, mock::MockStore}; + /// Bucket for [`ObjectStore`] in which objects can be placed. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[non_exhaustive] diff --git a/core/lib/object_store/tests/integration.rs b/core/lib/object_store/tests/integration.rs index dfa659dcf8b..9db2061f17f 100644 --- a/core/lib/object_store/tests/integration.rs +++ b/core/lib/object_store/tests/integration.rs @@ -1,7 +1,6 @@ //! Integration tests for object store. use tokio::fs; - use zksync_object_store::{Bucket, ObjectStoreFactory}; use zksync_types::{ proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, diff --git a/core/lib/prometheus_exporter/src/lib.rs b/core/lib/prometheus_exporter/src/lib.rs index 25f5915e205..4eda0bebe0e 100644 --- a/core/lib/prometheus_exporter/src/lib.rs +++ b/core/lib/prometheus_exporter/src/lib.rs @@ -1,11 +1,11 @@ +use std::{net::Ipv4Addr, time::Duration}; + use anyhow::Context as _; use metrics_exporter_prometheus::{Matcher, PrometheusBuilder}; use tokio::sync::watch; use vise::MetricsCollection; use vise_exporter::MetricsExporter; -use std::{net::Ipv4Addr, time::Duration}; - fn configure_legacy_exporter(builder: PrometheusBuilder) -> PrometheusBuilder { // in seconds let default_latency_buckets = [0.001, 0.005, 0.025, 0.1, 0.25, 1.0, 5.0, 30.0, 120.0]; diff --git a/core/lib/prover_utils/src/gcs_proof_fetcher.rs b/core/lib/prover_utils/src/gcs_proof_fetcher.rs index 8b59fe67a61..26872701a1f 100644 --- a/core/lib/prover_utils/src/gcs_proof_fetcher.rs +++ b/core/lib/prover_utils/src/gcs_proof_fetcher.rs @@ -1,6 +1,5 @@ use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_types::aggregated_operations::L1BatchProofForL1; -use zksync_types::L1BatchNumber; +use zksync_types::{aggregated_operations::L1BatchProofForL1, L1BatchNumber}; pub async fn load_wrapped_fri_proofs_for_range( from: L1BatchNumber, diff --git a/core/lib/prover_utils/src/region_fetcher.rs b/core/lib/prover_utils/src/region_fetcher.rs index 22a0cedce49..d2c49dd068d 100644 --- a/core/lib/prover_utils/src/region_fetcher.rs +++ b/core/lib/prover_utils/src/region_fetcher.rs @@ -1,8 +1,9 @@ use anyhow::Context as _; use regex::Regex; -use reqwest::header::{HeaderMap, HeaderValue}; -use reqwest::Method; - +use reqwest::{ + header::{HeaderMap, HeaderValue}, + Method, +}; use zksync_config::configs::ProverGroupConfig; use zksync_utils::http_with_retries::send_request_with_retries; diff --git a/core/lib/prover_utils/src/vk_commitment_helper.rs b/core/lib/prover_utils/src/vk_commitment_helper.rs index 05e35a4f7ee..9a6c074b1d2 100644 --- a/core/lib/prover_utils/src/vk_commitment_helper.rs +++ b/core/lib/prover_utils/src/vk_commitment_helper.rs @@ -1,5 +1,6 @@ -use anyhow::Context as _; use std::fs; + +use anyhow::Context as _; use toml_edit::{Document, Item, Value}; pub fn get_toml_formatted_value(string_value: String) -> Item { diff --git a/core/lib/queued_job_processor/src/lib.rs b/core/lib/queued_job_processor/src/lib.rs index d5ed185b256..2966fba49ca 100644 --- a/core/lib/queued_job_processor/src/lib.rs +++ b/core/lib/queued_job_processor/src/lib.rs @@ -1,15 +1,13 @@ -use std::fmt::Debug; -use std::time::{Duration, Instant}; +use std::{ + fmt::Debug, + time::{Duration, Instant}, +}; use anyhow::Context as _; pub use async_trait::async_trait; -use tokio::sync::watch; -use tokio::task::JoinHandle; -use tokio::time::sleep; - -use zksync_utils::panic_extractor::try_extract_panic_message; - +use tokio::{sync::watch, task::JoinHandle, time::sleep}; use vise::{Buckets, Counter, Histogram, LabeledFamily, Metrics}; +use zksync_utils::panic_extractor::try_extract_panic_message; const ATTEMPT_BUCKETS: Buckets = Buckets::exponential(1.0..=64.0, 2.0); diff --git a/core/lib/state/src/cache/metrics.rs b/core/lib/state/src/cache/metrics.rs index 13bc8c94aa9..0e8c8cd8685 100644 --- a/core/lib/state/src/cache/metrics.rs +++ b/core/lib/state/src/cache/metrics.rs @@ -1,9 +1,9 @@ //! General-purpose cache metrics. -use vise::{Buckets, Counter, EncodeLabelValue, Gauge, Histogram, LabeledFamily, Metrics}; - use std::time::Duration; +use vise::{Buckets, Counter, EncodeLabelValue, Gauge, Histogram, LabeledFamily, Metrics}; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] #[metrics(rename_all = "snake_case")] pub(super) enum Method { diff --git a/core/lib/state/src/in_memory.rs b/core/lib/state/src/in_memory.rs index fcb69affea8..6f9fb868045 100644 --- a/core/lib/state/src/in_memory.rs +++ b/core/lib/state/src/in_memory.rs @@ -1,6 +1,5 @@ use std::collections::{hash_map::Entry, BTreeMap, HashMap}; -use crate::ReadStorage; use zksync_types::{ block::DeployedContract, get_code_key, get_known_code_key, get_system_context_init_logs, system_contracts::get_system_smart_contracts, L2ChainId, StorageKey, StorageLog, @@ -8,6 +7,8 @@ use zksync_types::{ }; use zksync_utils::u256_to_h256; +use crate::ReadStorage; + /// Network ID we use by default for in memory storage. pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u32 = 270; diff --git a/core/lib/state/src/postgres/metrics.rs b/core/lib/state/src/postgres/metrics.rs index 33e5664bb2b..18fb54cdfa3 100644 --- a/core/lib/state/src/postgres/metrics.rs +++ b/core/lib/state/src/postgres/metrics.rs @@ -1,9 +1,9 @@ //! Metrics for `PostgresStorage`. -use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; - use std::time::Duration; +use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum ValuesUpdateStage { diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 8cc69f7bbbd..7208877abb3 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -1,23 +1,22 @@ -use tokio::{runtime::Handle, sync::mpsc}; - use std::{ mem, sync::{Arc, RwLock}, }; +use tokio::{runtime::Handle, sync::mpsc}; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{L1BatchNumber, MiniblockNumber, StorageKey, StorageValue, H256}; -mod metrics; -#[cfg(test)] -mod tests; - use self::metrics::{Method, ValuesUpdateStage, CACHE_METRICS, STORAGE_METRICS}; use crate::{ cache::{Cache, CacheValue}, ReadStorage, }; +mod metrics; +#[cfg(test)] +mod tests; + /// Type alias for smart contract source code cache. type FactoryDepsCache = Cache>; diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index 213360bb73d..6514da136d5 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -1,13 +1,12 @@ //! Tests for `PostgresStorage`. +use std::{collections::HashMap, mem}; + use rand::{ + rngs::StdRng, seq::{IteratorRandom, SliceRandom}, Rng, SeedableRng, }; - -use rand::rngs::StdRng; -use std::{collections::HashMap, mem}; - use zksync_dal::ConnectionPool; use zksync_types::StorageLog; diff --git a/core/lib/state/src/rocksdb/metrics.rs b/core/lib/state/src/rocksdb/metrics.rs index 81b035811d5..997f4b42ed3 100644 --- a/core/lib/state/src/rocksdb/metrics.rs +++ b/core/lib/state/src/rocksdb/metrics.rs @@ -1,9 +1,9 @@ //! Metrics for `RocksdbStorage`. -use vise::{Buckets, Gauge, Histogram, Metrics}; - use std::time::Duration; +use vise::{Buckets, Gauge, Histogram, Metrics}; + #[derive(Debug, Metrics)] #[metrics(prefix = "server_state_keeper_secondary_storage")] pub(super) struct RocksdbStorageMetrics { diff --git a/core/lib/state/src/rocksdb/mod.rs b/core/lib/state/src/rocksdb/mod.rs index e3748f3acfd..96d22727144 100644 --- a/core/lib/state/src/rocksdb/mod.rs +++ b/core/lib/state/src/rocksdb/mod.rs @@ -19,19 +19,19 @@ //! | Contracts | address (20 bytes) | `Vec` | Contract contents | //! | Factory deps | hash (32 bytes) | `Vec` | Bytecodes for new contracts that a certain contract may deploy. | -use itertools::{Either, Itertools}; use std::{collections::HashMap, convert::TryInto, mem, path::Path, time::Instant}; +use itertools::{Either, Itertools}; use zksync_dal::StorageProcessor; use zksync_storage::{db::NamedColumnFamily, RocksDB}; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256, U256}; use zksync_utils::{h256_to_u256, u256_to_h256}; -mod metrics; - use self::metrics::METRICS; use crate::{InMemoryStorage, ReadStorage}; +mod metrics; + fn serialize_block_number(block_number: u32) -> [u8; 4] { block_number.to_le_bytes() } @@ -506,13 +506,13 @@ impl ReadStorage for RocksdbStorage { #[cfg(test)] mod tests { use tempfile::TempDir; + use zksync_dal::ConnectionPool; + use zksync_types::{MiniblockNumber, StorageLog}; use super::*; use crate::test_utils::{ create_l1_batch, create_miniblock, gen_storage_logs, prepare_postgres, }; - use zksync_dal::ConnectionPool; - use zksync_types::{MiniblockNumber, StorageLog}; #[tokio::test] async fn rocksdb_storage_basics() { diff --git a/core/lib/state/src/shadow_storage.rs b/core/lib/state/src/shadow_storage.rs index dea713ba40c..0a2bd0fa43e 100644 --- a/core/lib/state/src/shadow_storage.rs +++ b/core/lib/state/src/shadow_storage.rs @@ -1,7 +1,7 @@ use vise::{Counter, Metrics}; +use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; use crate::ReadStorage; -use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; #[derive(Debug, Metrics)] #[metrics(prefix = "shadow_storage")] diff --git a/core/lib/state/src/storage_view.rs b/core/lib/state/src/storage_view.rs index 8476be78aa9..543b41bc657 100644 --- a/core/lib/state/src/storage_view.rs +++ b/core/lib/state/src/storage_view.rs @@ -1,14 +1,15 @@ -use std::cell::RefCell; -use std::rc::Rc; use std::{ + cell::RefCell, collections::HashMap, fmt, mem, + rc::Rc, time::{Duration, Instant}, }; -use crate::{ReadStorage, WriteStorage}; use zksync_types::{witness_block_state::WitnessBlockState, StorageKey, StorageValue, H256}; +use crate::{ReadStorage, WriteStorage}; + /// Metrics for [`StorageView`]. #[derive(Debug, Default, Clone, Copy)] pub struct StorageViewMetrics { @@ -204,9 +205,10 @@ impl WriteStorage for StorageView { #[cfg(test)] mod test { + use zksync_types::{AccountTreeId, Address, H256}; + use super::*; use crate::InMemoryStorage; - use zksync_types::{AccountTreeId, Address, H256}; #[test] fn test_storage_access() { diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index b9a9d81fc54..3a100c50569 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -1,5 +1,7 @@ //! Shared utils for unit tests. +use std::ops; + use zksync_dal::StorageProcessor; use zksync_types::{ block::{BlockGasCount, L1BatchHeader, MiniblockHeader}, @@ -7,8 +9,6 @@ use zksync_types::{ StorageLog, H256, }; -use std::ops; - pub(crate) async fn prepare_postgres(conn: &mut StorageProcessor<'_>) { if conn.blocks_dal().is_genesis_needed().await.unwrap() { conn.protocol_versions_dal() diff --git a/core/lib/state/src/witness.rs b/core/lib/state/src/witness.rs index 72aab8bbe6e..50e2d9b5407 100644 --- a/core/lib/state/src/witness.rs +++ b/core/lib/state/src/witness.rs @@ -1,9 +1,8 @@ use vise::{Counter, Metrics}; +use zksync_types::{witness_block_state::WitnessBlockState, StorageKey, StorageValue, H256}; use crate::ReadStorage; -use zksync_types::{witness_block_state::WitnessBlockState, StorageKey, StorageValue, H256}; - #[derive(Debug, Metrics)] #[metrics(prefix = "witness_storage")] struct WitnessStorageMetrics { diff --git a/core/lib/storage/src/db.rs b/core/lib/storage/src/db.rs index 617d14d272d..f6237d49950 100644 --- a/core/lib/storage/src/db.rs +++ b/core/lib/storage/src/db.rs @@ -1,8 +1,3 @@ -use rocksdb::{ - properties, BlockBasedOptions, Cache, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, - Direction, IteratorMode, Options, PrefixRange, ReadOptions, WriteOptions, DB, -}; - use std::{ collections::{HashMap, HashSet}, ffi::CStr, @@ -15,6 +10,11 @@ use std::{ time::{Duration, Instant}, }; +use rocksdb::{ + properties, BlockBasedOptions, Cache, ColumnFamily, ColumnFamilyDescriptor, DBPinnableSlice, + Direction, IteratorMode, Options, PrefixRange, ReadOptions, WriteOptions, DB, +}; + use crate::metrics::{RocksdbLabels, RocksdbSizeMetrics, METRICS}; /// Number of active RocksDB instances used to determine if it's safe to exit current process. diff --git a/core/lib/storage/src/metrics.rs b/core/lib/storage/src/metrics.rs index 928e735a30c..47b0a52ee98 100644 --- a/core/lib/storage/src/metrics.rs +++ b/core/lib/storage/src/metrics.rs @@ -1,14 +1,14 @@ //! General-purpose RocksDB metrics. All metrics code in the crate should be in this module. -use once_cell::sync::Lazy; -use vise::{Buckets, Collector, Counter, EncodeLabelSet, Family, Gauge, Histogram, Metrics, Unit}; - use std::{ collections::HashMap, sync::{Mutex, Weak}, time::Duration, }; +use once_cell::sync::Lazy; +use vise::{Buckets, Collector, Counter, EncodeLabelSet, Family, Gauge, Histogram, Metrics, Unit}; + use crate::db::RocksDBInner; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] diff --git a/core/lib/test_account/src/lib.rs b/core/lib/test_account/src/lib.rs index 00764df6dc4..5a84c84f4f5 100644 --- a/core/lib/test_account/src/lib.rs +++ b/core/lib/test_account/src/lib.rs @@ -1,21 +1,21 @@ use ethabi::Token; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; -use zksync_contracts::{deployer_contract, load_contract}; +use zksync_contracts::{ + deployer_contract, load_contract, test_contracts::LoadnextContractExecutionParams, +}; +use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner, PrivateKeySigner}; use zksync_system_constants::{ CONTRACT_DEPLOYER_ADDRESS, MAX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::utils::deployed_address_create; use zksync_types::{ + fee::Fee, + l1::{OpProcessingType, PriorityQueueType}, + l2::L2Tx, + utils::deployed_address_create, Address, Execute, ExecuteTransactionCommon, L1TxCommonData, L2ChainId, Nonce, PackedEthSignature, PriorityOpId, Transaction, H256, U256, }; - -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner, PrivateKeySigner}; -use zksync_types::l1::{OpProcessingType, PriorityQueueType}; - use zksync_utils::bytecode::hash_bytecode; + pub const L1_TEST_GAS_PER_PUBDATA_BYTE: u32 = 800; const BASE_FEE: u64 = 2_000_000_000; diff --git a/core/lib/types/src/aggregated_operations.rs b/core/lib/types/src/aggregated_operations.rs index 8819460f269..006eca562e7 100644 --- a/core/lib/types/src/aggregated_operations.rs +++ b/core/lib/types/src/aggregated_operations.rs @@ -1,12 +1,12 @@ -use codegen::serialize_proof; - use std::{fmt, ops, str::FromStr}; +use codegen::serialize_proof; use serde::{Deserialize, Serialize}; -use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zkevm_test_harness::bellman::bn256::Bn256; -use zkevm_test_harness::bellman::plonk::better_better_cs::proof::Proof; -use zkevm_test_harness::witness::oracle::VmWitnessOracle; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{bn256::Bn256, plonk::better_better_cs::proof::Proof}, + witness::oracle::VmWitnessOracle, +}; use zksync_basic_types::{ethabi::Token, L1BatchNumber}; use crate::{commitment::L1BatchWithMetadata, ProtocolVersionId, U256}; diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 1ad54ce6d1a..73fdf199deb 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -1,20 +1,21 @@ use chrono::{DateTime, Utc}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use strum::Display; - use zksync_basic_types::{ web3::types::{Bytes, H160, H256, H64, U256, U64}, L1BatchNumber, }; use zksync_contracts::BaseSystemContractsHashes; -use crate::protocol_version::L1VerifierConfig; pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; -use crate::vm_trace::{Call, CallType}; -use crate::web3::types::{AccessList, Index, H2048}; -use crate::{Address, MiniblockNumber, ProtocolVersionId}; +use crate::{ + protocol_version::L1VerifierConfig, + vm_trace::{Call, CallType}, + web3::types::{AccessList, Index, H2048}, + Address, MiniblockNumber, ProtocolVersionId, +}; pub mod en; diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index e61a56d2c91..0aa9d06711c 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -1,10 +1,9 @@ -use serde::{Deserialize, Serialize}; -use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; - use std::{fmt, ops}; +use serde::{Deserialize, Serialize}; use zksync_basic_types::{H2048, H256, U256}; use zksync_contracts::BaseSystemContractsHashes; +use zksync_system_constants::SYSTEM_BLOCK_INFO_BLOCK_NUMBER_MULTIPLIER; use crate::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, diff --git a/core/lib/types/src/circuit.rs b/core/lib/types/src/circuit.rs index 940b4ecf273..05f269c451e 100644 --- a/core/lib/types/src/circuit.rs +++ b/core/lib/types/src/circuit.rs @@ -1,5 +1,4 @@ -use zkevm_test_harness::geometry_config::get_geometry_config; -use zkevm_test_harness::toolset::GeometryConfig; +use zkevm_test_harness::{geometry_config::get_geometry_config, toolset::GeometryConfig}; pub const LEAF_SPLITTING_FACTOR: usize = 50; pub const NODE_SPLITTING_FACTOR: usize = 48; diff --git a/core/lib/types/src/commitment.rs b/core/lib/types/src/commitment.rs index 7925a37d92f..a0e3039a59a 100644 --- a/core/lib/types/src/commitment.rs +++ b/core/lib/types/src/commitment.rs @@ -6,15 +6,14 @@ //! required for the rollup to execute L1 batches, it's needed for the proof generation and the Ethereum //! transactions, thus the calculations are done separately and asynchronously. -use serde::{Deserialize, Serialize}; -use zksync_utils::u256_to_h256; - use std::{collections::HashMap, convert::TryFrom}; +use serde::{Deserialize, Serialize}; use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::{ L2_TO_L1_LOGS_TREE_ROOT_KEY, STATE_DIFF_HASH_KEY, ZKPORTER_IS_AVAILABLE, }; +use zksync_utils::u256_to_h256; use crate::{ block::L1BatchHeader, @@ -666,12 +665,15 @@ mod tests { use serde::{Deserialize, Serialize}; use serde_with::serde_as; - use crate::commitment::{ - L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchMetaParameters, L1BatchPassThroughData, + use crate::{ + commitment::{ + L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchMetaParameters, + L1BatchPassThroughData, + }, + l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, + writes::{InitialStorageWrite, RepeatedStorageWrite}, + H256, U256, }; - use crate::l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}; - use crate::writes::{InitialStorageWrite, RepeatedStorageWrite}; - use crate::{H256, U256}; #[serde_as] #[derive(Debug, Serialize, Deserialize)] diff --git a/core/lib/types/src/contract_verification_api.rs b/core/lib/types/src/contract_verification_api.rs index a7feb5116f2..02a5bef727d 100644 --- a/core/lib/types/src/contract_verification_api.rs +++ b/core/lib/types/src/contract_verification_api.rs @@ -6,9 +6,8 @@ use serde::{ Deserialize, Serialize, }; -use crate::{Address, Bytes}; - pub use crate::Execute as ExecuteData; +use crate::{Address, Bytes}; #[derive(Debug, Clone, Serialize)] #[serde(tag = "codeFormat", content = "sourceCode")] diff --git a/core/lib/types/src/eth_sender.rs b/core/lib/types/src/eth_sender.rs index 847662eaeaa..6c8d268888e 100644 --- a/core/lib/types/src/eth_sender.rs +++ b/core/lib/types/src/eth_sender.rs @@ -1,5 +1,4 @@ -use crate::aggregated_operations::AggregatedActionType; -use crate::{Address, Nonce, H256}; +use crate::{aggregated_operations::AggregatedActionType, Address, Nonce, H256}; #[derive(Clone)] pub struct EthTx { diff --git a/core/lib/types/src/event.rs b/core/lib/types/src/event.rs index 285567c8911..01561912cce 100644 --- a/core/lib/types/src/event.rs +++ b/core/lib/types/src/event.rs @@ -1,3 +1,10 @@ +use std::fmt::Debug; + +use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; +use zksync_basic_types::ethabi::Token; +use zksync_utils::{h256_to_account_address, u256_to_bytes_be, u256_to_h256}; + use crate::{ ethabi, l2_to_l1_log::L2ToL1Log, @@ -5,11 +12,6 @@ use crate::{ Address, L1BatchNumber, CONTRACT_DEPLOYER_ADDRESS, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, U256, }; -use once_cell::sync::Lazy; -use serde::{Deserialize, Serialize}; -use std::fmt::Debug; -use zksync_basic_types::ethabi::Token; -use zksync_utils::{h256_to_account_address, u256_to_bytes_be, u256_to_h256}; #[derive(Default, Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct VmEvent { @@ -348,13 +350,12 @@ mod tests { }; use zksync_utils::u256_to_h256; - use crate::VmEvent; - use super::{ extract_bytecode_publication_requests_from_l1_messenger, extract_l2tol1logs_from_l1_messenger, L1MessengerBytecodePublicationRequest, L1MessengerL2ToL1Log, }; + use crate::VmEvent; fn create_l2_to_l1_log_sent_value( tx_number: U256, diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index 75d7f71a883..16ce192bf8b 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -1,14 +1,15 @@ //! Definition of zkSync network priority operations: operations initiated from the L1. -use serde::{Deserialize, Serialize}; use std::convert::TryFrom; +use serde::{Deserialize, Serialize}; use zksync_basic_types::{ ethabi::{decode, ParamType, Token}, Address, L1BlockNumber, Log, PriorityOpId, H160, H256, U256, }; use zksync_utils::u256_to_account_address; +use super::Transaction; use crate::{ helpers::unix_timestamp_ms, l1::error::L1TxParseError, @@ -18,8 +19,6 @@ use crate::{ ExecuteTransactionCommon, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, }; -use super::Transaction; - pub mod error; #[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Copy)] diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 4c0632c5553..61a505909b2 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -2,26 +2,25 @@ use std::convert::TryFrom; use num_enum::TryFromPrimitive; use rlp::{Rlp, RlpStream}; +use serde::{Deserialize, Serialize}; use self::error::SignError; -use crate::transaction_request::PaymasterParams; -use crate::LEGACY_TX_TYPE; - use crate::{ - api, tx::primitives::PackedEthSignature, tx::Execute, web3::types::U64, Address, Bytes, - EIP712TypedStructure, Eip712Domain, ExecuteTransactionCommon, InputData, L2ChainId, Nonce, - StructBuilder, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE, H256, - PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, U256, + api, + api::TransactionRequest, + fee::{encoding_len, Fee}, + helpers::unix_timestamp_ms, + transaction_request::PaymasterParams, + tx::{primitives::PackedEthSignature, Execute}, + web3::types::U64, + Address, Bytes, EIP712TypedStructure, Eip712Domain, ExecuteTransactionCommon, InputData, + L2ChainId, Nonce, StructBuilder, Transaction, EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, + EIP_712_TX_TYPE, H256, LEGACY_TX_TYPE, PRIORITY_OPERATION_L2_TX_TYPE, PROTOCOL_UPGRADE_TX_TYPE, + U256, }; -use serde::{Deserialize, Serialize}; - pub mod error; -use crate::api::TransactionRequest; -use crate::fee::{encoding_len, Fee}; -use crate::helpers::unix_timestamp_ms; - #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, TryFromPrimitive)] #[repr(u32)] pub enum TransactionType { @@ -463,13 +462,12 @@ impl EIP712TypedStructure for L2Tx { mod tests { use zksync_basic_types::{Nonce, U256}; + use super::{L2Tx, TransactionType}; use crate::{ api::TransactionRequest, fee::Fee, transaction_request::PaymasterParams, Execute, L2TxCommonData, }; - use super::{L2Tx, TransactionType}; - #[test] fn test_correct_l2_tx_transaction_request_conversion() { // It is a random valid signature diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 670a2b22e81..335e6e740be 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -1,10 +1,10 @@ -use crate::commitment::SerializeCommitment; -use crate::{Address, H256}; use serde::{Deserialize, Serialize}; use zk_evm::reference_impls::event_sink::EventMessage; use zk_evm_1_4_0::reference_impls::event_sink::EventMessage as EventMessage_1_4_0; use zksync_utils::u256_to_h256; +use crate::{commitment::SerializeCommitment, Address, H256}; + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default, Eq)] pub struct L2ToL1Log { pub shard_id: u8, @@ -94,11 +94,12 @@ impl From for L2ToL1Log { #[cfg(test)] mod tests { - use super::L2ToL1Log; use zksync_basic_types::U256; use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_utils::u256_to_h256; + use super::L2ToL1Log; + #[test] fn l2_to_l1_log_to_bytes() { let expected_log_bytes = [ diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 4715a2f86da..4574824b37f 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,33 +5,32 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] +use std::{fmt, fmt::Debug}; + use fee::encoding_len; use serde::{Deserialize, Serialize}; -use std::{fmt, fmt::Debug}; pub use crate::{Nonce, H256, U256, U64}; pub type SerialId = u64; -use crate::l2::TransactionType; -use crate::protocol_version::ProtocolUpgradeTxCommonData; pub use event::{VmEvent, VmEventGroupKey}; pub use l1::L1TxCommonData; pub use l2::L2TxCommonData; pub use protocol_version::{ProtocolUpgrade, ProtocolVersion, ProtocolVersionId}; pub use storage::*; -pub use tx::primitives::*; -pub use tx::Execute; +pub use tx::{primitives::*, Execute}; pub use vm_version::VmVersion; pub use zk_evm::{ aux_structures::{LogQuery, Timestamp}, reference_impls::event_sink::EventMessage, zkevm_opcode_defs::FarCallOpcode, }; - pub use zkevm_test_harness; pub use zksync_basic_types::*; +use crate::{l2::TransactionType, protocol_version::ProtocolUpgradeTxCommonData}; + pub mod aggregated_operations; pub mod block; pub mod circuit; diff --git a/core/lib/types/src/priority_op_onchain_data.rs b/core/lib/types/src/priority_op_onchain_data.rs index a729aa27bf4..559bb996388 100644 --- a/core/lib/types/src/priority_op_onchain_data.rs +++ b/core/lib/types/src/priority_op_onchain_data.rs @@ -1,7 +1,7 @@ -use serde::{Deserialize, Serialize}; - use std::cmp::Ordering; +use serde::{Deserialize, Serialize}; + use crate::{ l1::{OpProcessingType, PriorityQueueType}, H256, U256, diff --git a/core/lib/types/src/proofs.rs b/core/lib/types/src/proofs.rs index b28b81b79fb..0067552c829 100644 --- a/core/lib/types/src/proofs.rs +++ b/core/lib/types/src/proofs.rs @@ -1,25 +1,25 @@ -use std::convert::{TryFrom, TryInto}; -use std::fmt::Debug; -use std::net::IpAddr; -use std::ops::Add; -use std::str::FromStr; +use std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, + net::IpAddr, + ops::Add, + str::FromStr, +}; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; -use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zkevm_test_harness::bellman::bn256::Bn256; -use zkevm_test_harness::bellman::plonk::better_better_cs::proof::Proof; -use zkevm_test_harness::encodings::{recursion_request::RecursionRequest, QueueSimulator}; -use zkevm_test_harness::witness::full_block_artifact::{ - BlockBasicCircuits, BlockBasicCircuitsPublicInputs, -}; -use zkevm_test_harness::witness::oracle::VmWitnessOracle; use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{bn256::Bn256, plonk::better_better_cs::proof::Proof}, + encodings::{recursion_request::RecursionRequest, QueueSimulator}, + witness::{ + full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, + oracle::VmWitnessOracle, + }, LeafAggregationOutputDataWitness, NodeAggregationOutputDataWitness, SchedulerCircuitInstanceWitness, }; - use zksync_basic_types::{L1BatchNumber, H256, U256}; const HASH_LEN: usize = H256::len_bytes(); diff --git a/core/lib/types/src/protocol_version.rs b/core/lib/types/src/protocol_version.rs index 09a722c72cd..b855cb92fdf 100644 --- a/core/lib/types/src/protocol_version.rs +++ b/core/lib/types/src/protocol_version.rs @@ -1,3 +1,10 @@ +use std::convert::{TryFrom, TryInto}; + +use num_enum::TryFromPrimitive; +use serde::{Deserialize, Serialize}; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_utils::u256_to_account_address; + use crate::{ ethabi::{decode, encode, ParamType, Token}, helpers::unix_timestamp_ms, @@ -8,11 +15,6 @@ use crate::{ Address, Execute, ExecuteTransactionCommon, Log, Transaction, TransactionType, VmVersion, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, }; -use num_enum::TryFromPrimitive; -use serde::{Deserialize, Serialize}; -use std::convert::{TryFrom, TryInto}; -use zksync_contracts::BaseSystemContractsHashes; -use zksync_utils::u256_to_account_address; #[repr(u16)] #[derive( diff --git a/core/lib/types/src/prover_server_api/mod.rs b/core/lib/types/src/prover_server_api/mod.rs index dc226f11d26..fdbbd57624f 100644 --- a/core/lib/types/src/prover_server_api/mod.rs +++ b/core/lib/types/src/prover_server_api/mod.rs @@ -1,10 +1,11 @@ use serde::{Deserialize, Serialize}; - use zksync_basic_types::L1BatchNumber; -use crate::aggregated_operations::L1BatchProofForL1; -use crate::proofs::PrepareBasicCircuitsJob; -use crate::protocol_version::{FriProtocolVersionId, L1VerifierConfig}; +use crate::{ + aggregated_operations::L1BatchProofForL1, + proofs::PrepareBasicCircuitsJob, + protocol_version::{FriProtocolVersionId, L1VerifierConfig}, +}; #[derive(Debug, Serialize, Deserialize)] pub struct ProofGenerationData { diff --git a/core/lib/types/src/storage/log.rs b/core/lib/types/src/storage/log.rs index aa295a2bade..11756f7175a 100644 --- a/core/lib/types/src/storage/log.rs +++ b/core/lib/types/src/storage/log.rs @@ -1,7 +1,6 @@ -use serde::{Deserialize, Serialize}; - use std::mem; +use serde::{Deserialize, Serialize}; use zk_evm::aux_structures::{LogQuery, Timestamp}; use zksync_basic_types::AccountTreeId; use zksync_utils::u256_to_h256; diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index 2ba57a9aea0..63ee1ba1c56 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -1,7 +1,9 @@ -use crate::{StorageKey, StorageValue}; -use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use serde::{Deserialize, Serialize}; + +use crate::{StorageKey, StorageValue}; + /// Storage data used during Witness Generation. #[derive(Debug, Default, Serialize, Deserialize)] pub struct WitnessBlockState { diff --git a/core/lib/types/src/storage/writes/compression.rs b/core/lib/types/src/storage/writes/compression.rs index a325801b8a8..cd0a174fa76 100644 --- a/core/lib/types/src/storage/writes/compression.rs +++ b/core/lib/types/src/storage/writes/compression.rs @@ -210,9 +210,10 @@ pub fn compress_with_best_strategy(prev_value: U256, new_value: U256) -> Vec #[cfg(test)] mod tests { - use super::*; use std::ops::{Add, BitAnd, Shr, Sub}; + use super::*; + #[test] fn test_compress_addition() { let initial_val = U256::from(255438218); diff --git a/core/lib/types/src/storage/writes/mod.rs b/core/lib/types/src/storage/writes/mod.rs index 54393f41785..22400964bf4 100644 --- a/core/lib/types/src/storage/writes/mod.rs +++ b/core/lib/types/src/storage/writes/mod.rs @@ -1,10 +1,10 @@ use std::convert::TryInto; -use crate::H256; use serde::{Deserialize, Serialize}; use zksync_basic_types::{Address, U256}; pub(crate) use self::compression::{compress_with_best_strategy, COMPRESSION_VERSION_NUMBER}; +use crate::H256; pub mod compression; @@ -184,12 +184,13 @@ fn prepend_header(compressed_state_diffs: Vec) -> Vec { #[cfg(test)] mod tests { - use std::ops::{Add, Sub}; - use std::str::FromStr; + use std::{ + ops::{Add, Sub}, + str::FromStr, + }; use super::*; - use crate::commitment::serialize_commitments; - use crate::{H256, U256}; + use crate::{commitment::serialize_commitments, H256, U256}; #[test] fn calculate_hash_for_storage_writes() { diff --git a/core/lib/types/src/storage_writes_deduplicator.rs b/core/lib/types/src/storage_writes_deduplicator.rs index 42ce67e6375..14a5413ee6a 100644 --- a/core/lib/types/src/storage_writes_deduplicator.rs +++ b/core/lib/types/src/storage_writes_deduplicator.rs @@ -2,9 +2,11 @@ use std::collections::HashMap; use zksync_utils::u256_to_h256; -use crate::tx::tx_execution_info::DeduplicatedWritesMetrics; -use crate::writes::compression::compress_with_best_strategy; -use crate::{AccountTreeId, StorageKey, StorageLogQuery, StorageLogQueryType, U256}; +use crate::{ + tx::tx_execution_info::DeduplicatedWritesMetrics, + writes::compression::compress_with_best_strategy, AccountTreeId, StorageKey, StorageLogQuery, + StorageLogQueryType, U256, +}; #[derive(Debug, Clone, Copy, PartialEq, Default)] pub struct ModifiedSlot { @@ -219,9 +221,8 @@ impl StorageWritesDeduplicator { mod tests { use zk_evm::aux_structures::{LogQuery, Timestamp}; - use crate::H160; - use super::*; + use crate::H160; fn storage_log_query( key: U256, diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index 430d8d4701d..7e896b83bc2 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -1,5 +1,6 @@ use std::path::PathBuf; +use once_cell::sync::Lazy; use zksync_basic_types::{AccountTreeId, Address, U256}; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage, SystemContractsRepo}; use zksync_system_constants::{ @@ -13,7 +14,6 @@ use crate::{ L1_MESSENGER_ADDRESS, L2_ETH_TOKEN_ADDRESS, MSG_VALUE_SIMULATOR_ADDRESS, NONCE_HOLDER_ADDRESS, SHA256_PRECOMPILE_ADDRESS, SYSTEM_CONTEXT_ADDRESS, }; -use once_cell::sync::Lazy; // Note, that in the NONCE_HOLDER_ADDRESS's storage the nonces of accounts // are stored in the following form: diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 85194902e32..e66c2495afe 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -1,17 +1,15 @@ -// Built-in uses use std::convert::{TryFrom, TryInto}; -// External uses use rlp::{DecoderError, Rlp, RlpStream}; use serde::{Deserialize, Serialize}; use thiserror::Error; use zksync_basic_types::H256; - use zksync_system_constants::{MAX_GAS_PER_PUBDATA_BYTE, USED_BOOTLOADER_MEMORY_BYTES}; -use zksync_utils::bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}; -use zksync_utils::{concat_and_hash, u256_to_h256}; +use zksync_utils::{ + bytecode::{hash_bytecode, validate_bytecode, InvalidBytecodeError}, + concat_and_hash, u256_to_h256, +}; -// Local uses use super::{EIP_1559_TX_TYPE, EIP_2930_TX_TYPE, EIP_712_TX_TYPE}; use crate::{ fee::Fee, @@ -947,13 +945,14 @@ pub fn validate_factory_deps( #[cfg(test)] mod tests { + use secp256k1::SecretKey; + use super::*; use crate::web3::{ api::Namespace, transports::test::TestTransport, types::{TransactionParameters, H256, U256}, }; - use secp256k1::SecretKey; #[tokio::test] async fn decode_real_tx() { diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index e33dff694fe..50340230cb9 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -1,8 +1,9 @@ -use crate::{web3::ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use zksync_utils::ZeroPrefixHexSerde; +use crate::{web3::ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; + /// `Execute` transaction executes a previously deployed smart contract in the L2 rollup. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "camelCase")] diff --git a/core/lib/types/src/tx/mod.rs b/core/lib/types/src/tx/mod.rs index 71f188f3217..1371fa74ee7 100644 --- a/core/lib/types/src/tx/mod.rs +++ b/core/lib/types/src/tx/mod.rs @@ -5,19 +5,18 @@ //! with metadata (such as fees and/or signatures) for L1 and L2 separately. use std::fmt::Debug; + use zksync_basic_types::{Address, H256}; use zksync_utils::bytecode::CompressedBytecodeInfo; +use self::tx_execution_info::TxExecutionStatus; +pub use self::{execute::Execute, tx_execution_info::ExecutionMetrics}; +use crate::{vm_trace::Call, Transaction}; + pub mod execute; pub mod primitives; pub mod tx_execution_info; -pub use self::execute::Execute; -use crate::vm_trace::Call; -use crate::Transaction; -pub use tx_execution_info::ExecutionMetrics; -use tx_execution_info::TxExecutionStatus; - #[derive(Debug, Clone, PartialEq)] pub struct TransactionExecutionResult { pub transaction: Transaction, diff --git a/core/lib/types/src/tx/primitives/eip712_signature/member_types.rs b/core/lib/types/src/tx/primitives/eip712_signature/member_types.rs index cc4906ef7e8..aecece572dd 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/member_types.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/member_types.rs @@ -1,9 +1,10 @@ -use crate::tx::primitives::eip712_signature::typed_structure::{ - EncodedStructureMember, StructMember, -}; -use crate::web3::signing::keccak256; use zksync_basic_types::{Address, H256, U256}; +use crate::{ + tx::primitives::eip712_signature::typed_structure::{EncodedStructureMember, StructMember}, + web3::signing::keccak256, +}; + impl StructMember for String { const MEMBER_TYPE: &'static str = "string"; const IS_REFERENCE_TYPE: bool = false; diff --git a/core/lib/types/src/tx/primitives/eip712_signature/struct_builder.rs b/core/lib/types/src/tx/primitives/eip712_signature/struct_builder.rs index f6189f504df..2093042b9f7 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/struct_builder.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/struct_builder.rs @@ -1,5 +1,6 @@ -use serde_json::Value; use std::collections::{BTreeMap, VecDeque}; + +use serde_json::Value; use zksync_basic_types::H256; use crate::tx::primitives::eip712_signature::typed_structure::{ diff --git a/core/lib/types/src/tx/primitives/eip712_signature/tests.rs b/core/lib/types/src/tx/primitives/eip712_signature/tests.rs index 70ae415531c..8bfd14b45c4 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/tests.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/tests.rs @@ -1,13 +1,20 @@ -use crate::tx::primitives::eip712_signature::{ - struct_builder::StructBuilder, - typed_structure::{EIP712TypedStructure, Eip712Domain}, -}; -use crate::tx::primitives::{eip712_signature::utils::get_eip712_json, PackedEthSignature}; -use crate::web3::signing::keccak256; -use serde::Serialize; use std::str::FromStr; + +use serde::Serialize; use zksync_basic_types::{Address, H256, U256}; +use crate::{ + tx::primitives::{ + eip712_signature::{ + struct_builder::StructBuilder, + typed_structure::{EIP712TypedStructure, Eip712Domain}, + utils::get_eip712_json, + }, + PackedEthSignature, + }, + web3::signing::keccak256, +}; + #[derive(Clone, Serialize)] struct Person { name: String, diff --git a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs index 28e9d27f0a6..daf1c9698ee 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/typed_structure.rs @@ -1,11 +1,11 @@ -use crate::web3::signing::keccak256; use serde::{Deserialize, Serialize}; use serde_json::Value; -use crate::tx::primitives::eip712_signature::struct_builder::{ - EncodeBuilder, StructBuilder, TypeBuilder, +use crate::{ + tx::primitives::eip712_signature::struct_builder::{EncodeBuilder, StructBuilder, TypeBuilder}, + web3::signing::keccak256, + L2ChainId, H256, U256, }; -use crate::{L2ChainId, H256, U256}; #[derive(Debug, Clone)] pub struct EncodedStructureMember { diff --git a/core/lib/types/src/tx/primitives/eip712_signature/utils.rs b/core/lib/types/src/tx/primitives/eip712_signature/utils.rs index 57db7894321..f338c017e2b 100644 --- a/core/lib/types/src/tx/primitives/eip712_signature/utils.rs +++ b/core/lib/types/src/tx/primitives/eip712_signature/utils.rs @@ -1,7 +1,8 @@ +use serde_json::{Map, Value}; + use crate::tx::primitives::eip712_signature::typed_structure::{ EIP712TypedStructure, Eip712Domain, }; -use serde_json::{Map, Value}; /// Formats the data that needs to be signed in json according to the standard eip-712. /// Compatible with `eth_signTypedData` RPC call. diff --git a/core/lib/types/src/tx/primitives/packed_eth_signature.rs b/core/lib/types/src/tx/primitives/packed_eth_signature.rs index 32564829ad8..c165f6a36b2 100644 --- a/core/lib/types/src/tx/primitives/packed_eth_signature.rs +++ b/core/lib/types/src/tx/primitives/packed_eth_signature.rs @@ -1,6 +1,3 @@ -use crate::tx::primitives::eip712_signature::typed_structure::{ - EIP712TypedStructure, Eip712Domain, -}; use ethereum_types_old::H256 as ParityCryptoH256; use parity_crypto::{ publickey::{ @@ -14,6 +11,10 @@ use thiserror::Error; use zksync_basic_types::{Address, H256}; use zksync_utils::ZeroPrefixHexSerde; +use crate::tx::primitives::eip712_signature::typed_structure::{ + EIP712TypedStructure, Eip712Domain, +}; + /// Struct used for working with Ethereum signatures created using eth_sign (using geth, ethers.js, etc) /// message is serialized as 65 bytes long `0x` prefixed string. /// diff --git a/core/lib/types/src/tx/tx_execution_info.rs b/core/lib/types/src/tx/tx_execution_info.rs index 0f72172f529..d19757ee970 100644 --- a/core/lib/types/src/tx/tx_execution_info.rs +++ b/core/lib/types/src/tx/tx_execution_info.rs @@ -1,14 +1,15 @@ -use crate::fee::TransactionExecutionMetrics; -use crate::l2_to_l1_log::L2ToL1Log; +use std::ops::{Add, AddAssign}; + use crate::{ commitment::SerializeCommitment, + fee::TransactionExecutionMetrics, + l2_to_l1_log::L2ToL1Log, writes::{ InitialStorageWrite, RepeatedStorageWrite, BYTES_PER_DERIVED_KEY, BYTES_PER_ENUMERATION_INDEX, }, ProtocolVersionId, }; -use std::ops::{Add, AddAssign}; #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum TxExecutionStatus { diff --git a/core/lib/types/src/utils.rs b/core/lib/types/src/utils.rs index 617179c4936..7828f2d1262 100644 --- a/core/lib/types/src/utils.rs +++ b/core/lib/types/src/utils.rs @@ -1,11 +1,11 @@ -use crate::system_contracts::DEPLOYMENT_NONCE_INCREMENT; -use crate::L2_ETH_TOKEN_ADDRESS; -use crate::{web3::signing::keccak256, AccountTreeId, StorageKey, U256}; - use zksync_basic_types::{Address, H256}; - use zksync_utils::{address_to_h256, u256_to_h256}; +use crate::{ + system_contracts::DEPLOYMENT_NONCE_INCREMENT, web3::signing::keccak256, AccountTreeId, + StorageKey, L2_ETH_TOKEN_ADDRESS, U256, +}; + /// Transforms the *full* account nonce into an *account* nonce. /// Full nonce is a composite one: it includes both account nonce (number of transactions /// initiated by the account) and deployer nonce (number of smart contracts deployed by the @@ -79,10 +79,11 @@ pub fn deployed_address_create(sender: Address, deploy_nonce: U256) -> Address { #[cfg(test)] mod tests { + use std::str::FromStr; + use crate::{ utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, }; - use std::str::FromStr; #[test] fn test_storage_key_for_eth_token() { diff --git a/core/lib/types/src/vk_transform.rs b/core/lib/types/src/vk_transform.rs index dfa022fb7c1..b19fdaef692 100644 --- a/core/lib/types/src/vk_transform.rs +++ b/core/lib/types/src/vk_transform.rs @@ -1,5 +1,5 @@ -use crate::{ethabi::Token, H256}; use std::str::FromStr; + use zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::{ @@ -14,6 +14,8 @@ use zkevm_test_harness::{ }, }; +use crate::{ethabi::Token, H256}; + /// Calculates commitment for vk from L1 verifier contract. pub fn l1_vk_commitment(token: Token) -> H256 { let vk = vk_from_token(token); diff --git a/core/lib/types/src/vm_trace.rs b/core/lib/types/src/vm_trace.rs index 6b37848dc5a..d3a94d51fa5 100644 --- a/core/lib/types/src/vm_trace.rs +++ b/core/lib/types/src/vm_trace.rs @@ -1,12 +1,16 @@ -use crate::{Address, U256}; +use std::{ + collections::{HashMap, HashSet}, + fmt, + fmt::Display, +}; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::collections::{HashMap, HashSet}; -use std::fmt; -use std::fmt::Display; use zk_evm::zkevm_opcode_defs::FarCallOpcode; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_utils::u256_to_h256; +use crate::{Address, U256}; + #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub enum VmTrace { ExecutionTrace(VmExecutionTrace), diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index 4a9d1cd2475..c533642d240 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,8 +1,10 @@ +use std::{collections::HashMap, convert::TryInto}; + use itertools::Itertools; -use std::collections::HashMap; -use std::convert::TryInto; -use zksync_basic_types::ethabi::{encode, Token}; -use zksync_basic_types::H256; +use zksync_basic_types::{ + ethabi::{encode, Token}, + H256, +}; use crate::bytes_to_chunks; diff --git a/core/lib/utils/src/convert.rs b/core/lib/utils/src/convert.rs index 973b28cc613..cc4699448e6 100644 --- a/core/lib/utils/src/convert.rs +++ b/core/lib/utils/src/convert.rs @@ -1,3 +1,5 @@ +use std::convert::TryInto; + use bigdecimal::BigDecimal; use num::{ bigint::ToBigInt, @@ -5,7 +7,6 @@ use num::{ traits::{sign::Signed, Pow}, BigUint, }; -use std::convert::TryInto; use zksync_basic_types::{Address, H256, U256}; pub fn u256_to_big_decimal(value: U256) -> BigDecimal { @@ -170,10 +171,12 @@ pub fn u256_to_bytes_be(value: &U256) -> Vec { #[cfg(test)] mod test { - use super::*; - use num::BigInt; use std::str::FromStr; + use num::BigInt; + + use super::*; + #[test] fn test_ratio_to_big_decimal() { let ratio = Ratio::from_integer(BigUint::from(0u32)); diff --git a/core/lib/utils/src/http_with_retries.rs b/core/lib/utils/src/http_with_retries.rs index 61742769fd6..15973ee6b2a 100644 --- a/core/lib/utils/src/http_with_retries.rs +++ b/core/lib/utils/src/http_with_retries.rs @@ -1,5 +1,4 @@ -use reqwest::header::HeaderMap; -use reqwest::{Client, Error, Method, Response}; +use reqwest::{header::HeaderMap, Client, Error, Method, Response}; use tokio::time::{sleep, Duration}; #[derive(Debug)] diff --git a/core/lib/utils/src/misc.rs b/core/lib/utils/src/misc.rs index 468e953f83b..887413a6f45 100644 --- a/core/lib/utils/src/misc.rs +++ b/core/lib/utils/src/misc.rs @@ -1,5 +1,4 @@ -use zksync_basic_types::web3::signing::keccak256; -use zksync_basic_types::{H256, U256}; +use zksync_basic_types::{web3::signing::keccak256, H256, U256}; pub const fn ceil_div(a: u64, b: u64) -> u64 { if a == 0 { diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index 2c6702ede96..1ea573148c4 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -1,18 +1,14 @@ //! This module contains the observability subsystem. //! It is responsible for providing a centralized interface for consistent observability configuration. -use std::backtrace::Backtrace; -use std::borrow::Cow; -use std::panic::PanicInfo; +use std::{backtrace::Backtrace, borrow::Cow, panic::PanicInfo}; +// Temporary re-export of `sentry::capture_message` aiming to simplify the transition from `vlog` to using +// crates directly. +pub use sentry::{capture_message, Level as AlertLevel}; use sentry::{types::Dsn, ClientInitGuard}; use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt}; -/// Temporary re-export of `sentry::capture_message` aiming to simplify the transition from `vlog` to using -/// crates directly. -pub use sentry::capture_message; -pub use sentry::Level as AlertLevel; - /// Specifies the format of the logs in stdout. #[derive(Debug, Clone, Copy, Default)] pub enum LogFormat { diff --git a/core/lib/web3_decl/src/namespaces/debug.rs b/core/lib/web3_decl/src/namespaces/debug.rs index 7db44f27527..02e75e946b7 100644 --- a/core/lib/web3_decl/src/namespaces/debug.rs +++ b/core/lib/web3_decl/src/namespaces/debug.rs @@ -1,8 +1,10 @@ -use crate::types::H256; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use zksync_types::{ + api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, + transaction_request::CallRequest, +}; -use zksync_types::api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}; -use zksync_types::transaction_request::CallRequest; +use crate::types::H256; #[cfg_attr( all(feature = "client", feature = "server"), diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index f92f2a56239..861478bf4bf 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -1,20 +1,14 @@ -// External uses use jsonrpsee::{core::RpcResult, proc_macros::rpc}; - -// Workspace uses -use crate::types::{ - Block, Bytes, FeeHistory, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, - U256, U64, -}; - use zksync_types::{ - api::Transaction, - api::{BlockIdVariant, BlockNumber, TransactionVariant}, + api::{BlockIdVariant, BlockNumber, Transaction, TransactionVariant}, transaction_request::CallRequest, Address, H256, }; -// Local uses +use crate::types::{ + Block, Bytes, FeeHistory, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, + U256, U64, +}; #[cfg_attr( all(feature = "client", feature = "server"), diff --git a/core/lib/web3_decl/src/namespaces/mod.rs b/core/lib/web3_decl/src/namespaces/mod.rs index 996cb27267c..26e610c1644 100644 --- a/core/lib/web3_decl/src/namespaces/mod.rs +++ b/core/lib/web3_decl/src/namespaces/mod.rs @@ -6,16 +6,13 @@ pub mod net; pub mod web3; pub mod zks; -// Server trait re-exports. -#[cfg(feature = "server")] -pub use self::{ - debug::DebugNamespaceServer, en::EnNamespaceServer, eth::EthNamespaceServer, - net::NetNamespaceServer, web3::Web3NamespaceServer, zks::ZksNamespaceServer, -}; - -// Client trait re-exports. #[cfg(feature = "client")] pub use self::{ debug::DebugNamespaceClient, en::EnNamespaceClient, eth::EthNamespaceClient, net::NetNamespaceClient, web3::Web3NamespaceClient, zks::ZksNamespaceClient, }; +#[cfg(feature = "server")] +pub use self::{ + debug::DebugNamespaceServer, en::EnNamespaceServer, eth::EthNamespaceServer, + net::NetNamespaceServer, web3::Web3NamespaceServer, zks::ZksNamespaceServer, +}; diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index d3bf43b9a97..e7ee1dffa9f 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use bigdecimal::BigDecimal; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; - use zksync_types::{ api::{ BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 7abe34637d6..61a3e10397c 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -5,16 +5,15 @@ //! //! These "extensions" are required to provide more zkSync-specific information while remaining Web3-compilant. -use itertools::unfold; -use rlp::Rlp; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; - use core::{ convert::{TryFrom, TryInto}, fmt, marker::PhantomData, }; +use itertools::unfold; +use rlp::Rlp; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; pub use zksync_types::{ api::{Block, BlockNumber, Log, TransactionReceipt, TransactionRequest}, vm_trace::{ContractSourceDebugInfo, VmDebugTrace, VmExecutionStep}, @@ -350,9 +349,10 @@ pub enum PubSubResult { #[cfg(test)] mod tests { - use super::*; use zksync_types::api::{BlockId, BlockIdVariant}; + use super::*; + #[test] fn get_block_number_serde() { let test_vector = &[ diff --git a/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs b/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs index 5f9730d458e..553f6f2ad45 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/api_decl.rs @@ -1,5 +1,4 @@ use actix_web::web; - use zksync_dal::connection::ConnectionPool; #[derive(Debug, Clone)] diff --git a/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs b/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs index d107483db01..81c9f7e264c 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/api_impl.rs @@ -3,7 +3,6 @@ use actix_web::{ HttpResponse, Result as ActixResult, }; use serde::Serialize; - use zksync_types::{contract_verification_api::VerificationIncomingRequest, Address}; use super::{api_decl::RestApi, metrics::METRICS}; diff --git a/core/lib/zksync_core/src/api_server/contract_verification/metrics.rs b/core/lib/zksync_core/src/api_server/contract_verification/metrics.rs index 1e114f68ff6..4947e48b094 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/metrics.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/metrics.rs @@ -1,9 +1,9 @@ //! Metrics for contract verification. -use vise::{Buckets, Histogram, LabeledFamily, Metrics}; - use std::time::Duration; +use vise::{Buckets, Histogram, LabeledFamily, Metrics}; + #[derive(Debug, Metrics)] #[metrics(prefix = "api_contract_verification")] pub(super) struct ContractVerificationMetrics { diff --git a/core/lib/zksync_core/src/api_server/contract_verification/mod.rs b/core/lib/zksync_core/src/api_server/contract_verification/mod.rs index 5b59fafa917..a1bf980a49a 100644 --- a/core/lib/zksync_core/src/api_server/contract_verification/mod.rs +++ b/core/lib/zksync_core/src/api_server/contract_verification/mod.rs @@ -1,22 +1,18 @@ use std::{net::SocketAddr, time::Duration}; use actix_cors::Cors; -use actix_web::{ - dev::Server, - {web, App, HttpResponse, HttpServer}, -}; +use actix_web::{dev::Server, web, App, HttpResponse, HttpServer}; use tokio::{sync::watch, task::JoinHandle}; - use zksync_config::configs::api::ContractVerificationApiConfig; use zksync_dal::connection::ConnectionPool; use zksync_utils::panic_notify::{spawn_panic_handler, ThreadPanicNotify}; +use self::api_decl::RestApi; + mod api_decl; mod api_impl; mod metrics; -use self::api_decl::RestApi; - fn start_server(api: RestApi, bind_to: SocketAddr, threads: usize) -> Server { HttpServer::new(move || { let api = api.clone(); diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs index 36ede77abdb..f0488f71190 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/apply.rs @@ -8,11 +8,11 @@ use std::time::{Duration, Instant}; -use multivm::vm_latest::{constants::BLOCK_GAS_LIMIT, HistoryDisabled}; - -use multivm::interface::VmInterface; -use multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv}; -use multivm::VmInstance; +use multivm::{ + interface::{L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface}, + vm_latest::{constants::BLOCK_GAS_LIMIT, HistoryDisabled}, + VmInstance, +}; use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; use zksync_state::{PostgresStorage, ReadStorage, StorageView, WriteStorage}; use zksync_system_constants::{ diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs index 59e874ade90..c5928cfd847 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/error.rs @@ -1,6 +1,5 @@ -use thiserror::Error; - use multivm::interface::{Halt, TxRevertReason}; +use thiserror::Error; #[derive(Debug, Error)] pub(crate) enum SandboxExecutionError { diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs index 9621adae2d8..c900dd4e5a5 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs @@ -1,13 +1,13 @@ //! Implementation of "executing" methods, e.g. `eth_call`. +use multivm::{ + interface::{TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface}, + tracers::StorageInvocations, + vm_latest::constants::ETH_CALL_GAS_LIMIT, + MultiVMTracer, +}; use tracing::{span, Level}; - -use multivm::interface::{TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface}; -use multivm::tracers::StorageInvocations; -use multivm::vm_latest::constants::ETH_CALL_GAS_LIMIT; -use multivm::MultiVMTracer; use zksync_dal::ConnectionPool; - use zksync_types::{ fee::TransactionExecutionMetrics, l2::L2Tx, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs index 67feced9d5e..461be71c089 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/mod.rs @@ -1,21 +1,13 @@ -use std::sync::Arc; -use std::time::Duration; -use tokio::runtime::Handle; +use std::{sync::Arc, time::Duration}; +use multivm::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata; +use tokio::runtime::Handle; use zksync_dal::{ConnectionPool, SqlxError, StorageProcessor}; use zksync_state::{PostgresStorage, PostgresStorageCaches, ReadStorage, StorageView}; use zksync_system_constants::PUBLISH_BYTECODE_OVERHEAD; use zksync_types::{api, AccountTreeId, L2ChainId, MiniblockNumber, U256}; use zksync_utils::bytecode::{compress_bytecode, hash_bytecode}; -// Note: keep the modules private, and instead re-export functions that make public interface. -mod apply; -mod error; -mod execute; -mod tracers; -mod validate; -mod vm_metrics; - use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, @@ -24,7 +16,14 @@ pub(super) use self::{ vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, }; use super::tx_sender::MultiVMBaseSystemContracts; -use multivm::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata; + +// Note: keep the modules private, and instead re-export functions that make public interface. +mod apply; +mod error; +mod execute; +mod tracers; +mod validate; +mod vm_metrics; /// Permit to invoke VM code. /// diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs index 9f987a150da..719f6da0b4a 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tracers.rs @@ -1,9 +1,7 @@ -use multivm::tracers::CallTracer; -use multivm::vm_latest::HistoryMode; -use multivm::{MultiVMTracer, MultiVmTracerPointer}; -use once_cell::sync::OnceCell; - use std::sync::Arc; + +use multivm::{tracers::CallTracer, vm_latest::HistoryMode, MultiVMTracer, MultiVmTracerPointer}; +use once_cell::sync::OnceCell; use zksync_state::WriteStorage; use zksync_types::vm_trace::Call; diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs index 4b9e13084ef..df70d02fe44 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/validate.rs @@ -1,12 +1,14 @@ -use multivm::interface::{ExecutionResult, VmExecutionMode, VmInterface}; -use multivm::MultiVMTracer; use std::collections::HashSet; -use multivm::tracers::{ - validator::{ValidationError, ValidationTracer, ValidationTracerParams}, - StorageInvocations, +use multivm::{ + interface::{ExecutionResult, VmExecutionMode, VmInterface}, + tracers::{ + validator::{ValidationError, ValidationTracer, ValidationTracerParams}, + StorageInvocations, + }, + vm_latest::HistoryDisabled, + MultiVMTracer, }; -use multivm::vm_latest::HistoryDisabled; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{l2::L2Tx, Transaction, TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, U256}; diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs index 138d06a3a7c..6842fe438f8 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/vm_metrics.rs @@ -1,12 +1,13 @@ -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; - use std::time::Duration; use multivm::interface::{VmExecutionResultAndLogs, VmMemoryMetrics}; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; use zksync_state::StorageViewMetrics; -use zksync_types::event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}; -use zksync_types::fee::TransactionExecutionMetrics; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; +use zksync_types::{ + event::{extract_long_l2_to_l1_messages, extract_published_bytecodes}, + fee::TransactionExecutionMetrics, + storage_writes_deduplicator::StorageWritesDeduplicator, +}; use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::metrics::InteractionType; diff --git a/core/lib/zksync_core/src/api_server/healthcheck.rs b/core/lib/zksync_core/src/api_server/healthcheck.rs index 74495f3439c..58444c30dc9 100644 --- a/core/lib/zksync_core/src/api_server/healthcheck.rs +++ b/core/lib/zksync_core/src/api_server/healthcheck.rs @@ -1,8 +1,7 @@ -use axum::{extract::State, http::StatusCode, routing::get, Json, Router}; -use tokio::sync::watch; - use std::{collections::HashSet, net::SocketAddr, sync::Arc, time::Duration}; +use axum::{extract::State, http::StatusCode, routing::get, Json, Router}; +use tokio::sync::watch; use zksync_health_check::{AppHealth, CheckHealth}; type SharedHealthchecks = Arc<[Box]>; diff --git a/core/lib/zksync_core/src/api_server/tree/metrics.rs b/core/lib/zksync_core/src/api_server/tree/metrics.rs index e6b552468d8..d185861d07c 100644 --- a/core/lib/zksync_core/src/api_server/tree/metrics.rs +++ b/core/lib/zksync_core/src/api_server/tree/metrics.rs @@ -1,9 +1,9 @@ //! Metrics for the Merkle tree API. -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics, Unit}; - use std::time::Duration; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics, Unit}; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "method", rename_all = "snake_case")] pub(super) enum MerkleTreeApiMethod { diff --git a/core/lib/zksync_core/src/api_server/tree/mod.rs b/core/lib/zksync_core/src/api_server/tree/mod.rs index 7b4c9086ac6..00a5fd28554 100644 --- a/core/lib/zksync_core/src/api_server/tree/mod.rs +++ b/core/lib/zksync_core/src/api_server/tree/mod.rs @@ -1,5 +1,7 @@ //! Primitive Merkle tree API used internally to fetch proofs. +use std::{fmt, future::Future, net::SocketAddr, pin::Pin}; + use anyhow::Context as _; use async_trait::async_trait; use axum::{ @@ -10,19 +12,16 @@ use axum::{ }; use serde::{Deserialize, Serialize}; use tokio::sync::watch; - -use std::{fmt, future::Future, net::SocketAddr, pin::Pin}; - use zksync_merkle_tree::NoVersionError; use zksync_types::{L1BatchNumber, H256, U256}; +use self::metrics::{MerkleTreeApiMethod, API_METRICS}; +use crate::metadata_calculator::{AsyncTreeReader, MerkleTreeInfo}; + mod metrics; #[cfg(test)] mod tests; -use self::metrics::{MerkleTreeApiMethod, API_METRICS}; -use crate::metadata_calculator::{AsyncTreeReader, MerkleTreeInfo}; - #[derive(Debug, Serialize, Deserialize)] struct TreeProofsRequest { l1_batch_number: L1BatchNumber, diff --git a/core/lib/zksync_core/src/api_server/tree/tests.rs b/core/lib/zksync_core/src/api_server/tree/tests.rs index 2f90b9fabdf..11161805633 100644 --- a/core/lib/zksync_core/src/api_server/tree/tests.rs +++ b/core/lib/zksync_core/src/api_server/tree/tests.rs @@ -1,9 +1,8 @@ //! Tests for the Merkle tree API. -use tempfile::TempDir; - use std::net::Ipv4Addr; +use tempfile::TempDir; use zksync_dal::ConnectionPool; use super::*; diff --git a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs index 12c73800415..3766c8a4ee1 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/mod.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/mod.rs @@ -1,27 +1,23 @@ //! Helper module to submit transactions into the zkSync Network. -// External uses +use std::{cmp, num::NonZeroU32, sync::Arc, time::Instant}; + use governor::{ clock::MonotonicClock, middleware::NoOpMiddleware, state::{InMemoryState, NotKeyed}, Quota, RateLimiter, }; - -// Built-in uses -use std::{cmp, num::NonZeroU32, sync::Arc, time::Instant}; - -// Workspace uses - -use multivm::interface::VmExecutionResultAndLogs; -use multivm::vm_latest::{ - constants::{BLOCK_GAS_LIMIT, MAX_PUBDATA_PER_BLOCK}, - utils::{ - fee::derive_base_fee_and_gas_per_pubdata, - overhead::{derive_overhead, OverheadCoefficients}, +use multivm::{ + interface::VmExecutionResultAndLogs, + vm_latest::{ + constants::{BLOCK_GAS_LIMIT, MAX_PUBDATA_PER_BLOCK}, + utils::{ + fee::derive_base_fee_and_gas_per_pubdata, + overhead::{derive_overhead, OverheadCoefficients}, + }, }, }; - use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, ConnectionPool}; @@ -29,26 +25,24 @@ use zksync_state::PostgresStorageCaches; use zksync_types::{ fee::{Fee, TransactionExecutionMetrics}, get_code_key, get_intrinsic_constants, - l2::error::TxCheckError::TxDuplication, - l2::L2Tx, + l2::{error::TxCheckError::TxDuplication, L2Tx}, utils::storage_key_for_eth_balance, AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature, ProtocolVersionId, Transaction, H160, H256, MAX_GAS_PER_PUBDATA_BYTE, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256, }; - use zksync_utils::h256_to_u256; -// Local uses -use crate::api_server::{ - execution_sandbox::{ - adjust_l1_gas_price_for_tx, execute_tx_eth_call, execute_tx_with_pending_state, - get_pubdata_for_factory_deps, BlockArgs, SubmitTxStage, TxExecutionArgs, TxSharedArgs, - VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, - }, - tx_sender::result::ApiCallResult, -}; +pub(super) use self::{proxy::TxProxy, result::SubmitTxError}; use crate::{ + api_server::{ + execution_sandbox::{ + adjust_l1_gas_price_for_tx, execute_tx_eth_call, execute_tx_with_pending_state, + get_pubdata_for_factory_deps, BlockArgs, SubmitTxStage, TxExecutionArgs, TxSharedArgs, + VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, + }, + tx_sender::result::ApiCallResult, + }, l1_gas_price::L1GasPriceProvider, metrics::{TxStage, APP_METRICS}, state_keeper::seal_criteria::{ConditionalSealer, SealData}, @@ -57,8 +51,6 @@ use crate::{ mod proxy; mod result; -pub(super) use self::{proxy::TxProxy, result::SubmitTxError}; - /// Type alias for the rate limiter implementation. type TxSenderRateLimiter = RateLimiter>; diff --git a/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs b/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs index 4f70b1d5e50..7a4b928a809 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/proxy.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use tokio::sync::RwLock; +use tokio::sync::RwLock; use zksync_types::{ api::{BlockId, Transaction, TransactionDetails, TransactionId, TransactionReceipt}, l2::L2Tx, diff --git a/core/lib/zksync_core/src/api_server/tx_sender/result.rs b/core/lib/zksync_core/src/api_server/tx_sender/result.rs index b02049f014e..5b5af7b9cac 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/result.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/result.rs @@ -1,10 +1,11 @@ -use crate::api_server::execution_sandbox::SandboxExecutionError; +use multivm::{ + interface::{ExecutionResult, VmExecutionResultAndLogs}, + tracers::validator::ValidationError, +}; use thiserror::Error; +use zksync_types::{l2::error::TxCheckError, U256}; -use multivm::interface::{ExecutionResult, VmExecutionResultAndLogs}; -use multivm::tracers::validator::ValidationError; -use zksync_types::l2::error::TxCheckError; -use zksync_types::U256; +use crate::api_server::execution_sandbox::SandboxExecutionError; #[derive(Debug, Error)] pub enum SubmitTxError { diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/batch_limiter_middleware.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/batch_limiter_middleware.rs index f85325c03bc..0192ffe3a5c 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/batch_limiter_middleware.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/batch_limiter_middleware.rs @@ -1,3 +1,5 @@ +use std::{future::Future, num::NonZeroU32, sync::Arc}; + use futures::{future, FutureExt}; use governor::{ clock::DefaultClock, @@ -12,8 +14,6 @@ use jsonrpc_core::{ use jsonrpc_pubsub::Session; use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; -use std::{future::Future, num::NonZeroU32, sync::Arc}; - /// Configures the rate limiting for the WebSocket API. /// Rate limiting is applied per active connection, e.g. a single connected user may not send more than X requests /// per minute. diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs index 4a30961c453..e750d05d9b5 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/error.rs @@ -1,8 +1,8 @@ +use std::fmt; + use jsonrpc_core::{Error, ErrorCode}; use zksync_web3_decl::error::Web3Error; -use std::fmt; - use crate::api_server::web3::metrics::API_METRICS; pub fn into_jsrpc_error(err: Web3Error) -> Error { diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs index 3775da78e41..57ae0bb7116 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/debug.rs @@ -1,15 +1,15 @@ -// External uses -use crate::api_server::web3::backend_jsonrpc::error::into_jsrpc_error; -use crate::api_server::web3::namespaces::DebugNamespace; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; - use zksync_types::{ api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, transaction_request::CallRequest, H256, }; +use crate::api_server::web3::{ + backend_jsonrpc::error::into_jsrpc_error, namespaces::DebugNamespace, +}; + #[rpc] pub trait DebugNamespaceT { #[rpc(name = "debug_traceBlockByNumber")] diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/en.rs index e75d7caade2..2fc08eafcac 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/en.rs @@ -1,13 +1,7 @@ -// Built-in uses - -// External uses use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; - -// Workspace uses use zksync_types::{api::en::SyncBlock, MiniblockNumber}; -// Local uses use crate::{ api_server::web3::{backend_jsonrpc::error::into_jsrpc_error, EnNamespace}, l1_gas_price::L1GasPriceProvider, diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs index 00ba9379ae5..706701cfcf3 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs @@ -1,10 +1,5 @@ -// Built-in uses - -// External uses use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; - -// Workspace uses use zksync_types::{ api::{ BlockId, BlockIdVariant, BlockNumber, Transaction, TransactionId, TransactionReceipt, @@ -16,9 +11,10 @@ use zksync_types::{ }; use zksync_web3_decl::types::{Block, Filter, FilterChanges, Log}; -// Local uses -use crate::web3::namespaces::EthNamespace; -use crate::{l1_gas_price::L1GasPriceProvider, web3::backend_jsonrpc::error::into_jsrpc_error}; +use crate::{ + l1_gas_price::L1GasPriceProvider, + web3::{backend_jsonrpc::error::into_jsrpc_error, namespaces::EthNamespace}, +}; #[rpc] pub trait EthNamespaceT { diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/net.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/net.rs index 89abd3177c8..4dbd9fb7a75 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/net.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/net.rs @@ -1,13 +1,7 @@ -// Built-in uses - -// External uses use jsonrpc_core::Result; use jsonrpc_derive::rpc; - -// Workspace uses use zksync_types::U256; -// Local uses use crate::web3::namespaces::NetNamespace; #[rpc] diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs index bf700a64156..48f413fba05 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/zks.rs @@ -1,12 +1,8 @@ -// Built-in uses use std::collections::HashMap; -// External uses use bigdecimal::BigDecimal; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; - -// Workspace uses use zksync_types::{ api::{ BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, @@ -18,9 +14,10 @@ use zksync_types::{ }; use zksync_web3_decl::types::{Filter, Log, Token}; -// Local uses -use crate::web3::namespaces::ZksNamespace; -use crate::{l1_gas_price::L1GasPriceProvider, web3::backend_jsonrpc::error::into_jsrpc_error}; +use crate::{ + l1_gas_price::L1GasPriceProvider, + web3::{backend_jsonrpc::error::into_jsrpc_error, namespaces::ZksNamespace}, +}; #[rpc] pub trait ZksNamespaceT { diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs index 1b11919abde..e54249f84c3 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/pub_sub.rs @@ -2,9 +2,7 @@ use std::sync::Arc; use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_derive::rpc; -use jsonrpc_pubsub::typed; -use jsonrpc_pubsub::{Session, SubscriptionId}; - +use jsonrpc_pubsub::{typed, Session, SubscriptionId}; use zksync_web3_decl::types::PubSubResult; use super::{super::EthSubscribe, batch_limiter_middleware::RateLimitMetadata}; diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs index 04f6102066f..2c4f2a3ce91 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/mod.rs @@ -3,8 +3,11 @@ //! namespace structures defined in `zksync_core`. use std::error::Error; -use zksync_web3_decl::error::Web3Error; -use zksync_web3_decl::jsonrpsee::types::{error::ErrorCode, ErrorObjectOwned}; + +use zksync_web3_decl::{ + error::Web3Error, + jsonrpsee::types::{error::ErrorCode, ErrorObjectOwned}, +}; pub mod namespaces; diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs index 6b6ed67c3c6..93a95790666 100644 --- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/zks.rs @@ -1,7 +1,6 @@ -use bigdecimal::BigDecimal; - use std::collections::HashMap; +use bigdecimal::BigDecimal; use zksync_types::{ api::{ BlockDetails, BridgeAddresses, L1BatchDetails, L2ToL1LogProof, Proof, ProtocolVersion, diff --git a/core/lib/zksync_core/src/api_server/web3/metrics.rs b/core/lib/zksync_core/src/api_server/web3/metrics.rs index 2df24f9dd60..60d41c6ea32 100644 --- a/core/lib/zksync_core/src/api_server/web3/metrics.rs +++ b/core/lib/zksync_core/src/api_server/web3/metrics.rs @@ -1,18 +1,18 @@ //! Metrics for the JSON-RPC server. -use vise::{ - Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LabeledFamily, - LatencyObserver, Metrics, Unit, -}; - use std::{ fmt, time::{Duration, Instant}, }; -use super::{ApiTransport, TypedFilter}; +use vise::{ + Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LabeledFamily, + LatencyObserver, Metrics, Unit, +}; use zksync_types::api; +use super::{ApiTransport, TypedFilter}; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "scheme", rename_all = "UPPERCASE")] pub(super) enum ApiTransportLabel { diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs index 2904d5af79d..411c04112c9 100644 --- a/core/lib/zksync_core/src/api_server/web3/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/mod.rs @@ -1,16 +1,17 @@ +use std::{net::SocketAddr, sync::Arc, time::Duration}; + use anyhow::Context as _; +use chrono::NaiveDateTime; use futures::future; use jsonrpc_core::MetaIoHandler; use jsonrpc_http_server::hyper; use jsonrpc_pubsub::PubSubHandler; use serde::Deserialize; -use tokio::sync::{mpsc, oneshot, watch, Mutex}; +use tokio::{ + sync::{mpsc, oneshot, watch, Mutex}, + task::JoinHandle, +}; use tower_http::{cors::CorsLayer, metrics::InFlightRequestsLayer}; - -use chrono::NaiveDateTime; -use std::{net::SocketAddr, sync::Arc, time::Duration}; -use tokio::task::JoinHandle; - use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_health_check::{HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_types::{api, MiniblockNumber}; @@ -27,6 +28,23 @@ use zksync_web3_decl::{ types::Filter, }; +use self::{ + backend_jsonrpc::{ + batch_limiter_middleware::{LimitMiddleware, Transport}, + error::internal_error, + namespaces::{ + debug::DebugNamespaceT, en::EnNamespaceT, eth::EthNamespaceT, net::NetNamespaceT, + web3::Web3NamespaceT, zks::ZksNamespaceT, + }, + pub_sub::Web3PubSub, + }, + metrics::API_METRICS, + namespaces::{ + DebugNamespace, EnNamespace, EthNamespace, NetNamespace, Web3Namespace, ZksNamespace, + }, + pubsub::{EthSubscribe, PubSubEvent}, + state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber}, +}; use crate::{ api_server::{ execution_sandbox::VmConcurrencyBarrier, tree::TreeApiHttpClient, tx_sender::TxSender, @@ -45,22 +63,6 @@ pub mod state; #[cfg(test)] pub(crate) mod tests; -use self::backend_jsonrpc::{ - batch_limiter_middleware::{LimitMiddleware, Transport}, - error::internal_error, - namespaces::{ - debug::DebugNamespaceT, en::EnNamespaceT, eth::EthNamespaceT, net::NetNamespaceT, - web3::Web3NamespaceT, zks::ZksNamespaceT, - }, - pub_sub::Web3PubSub, -}; -use self::metrics::API_METRICS; -use self::namespaces::{ - DebugNamespace, EnNamespace, EthNamespace, NetNamespace, Web3Namespace, ZksNamespace, -}; -use self::pubsub::{EthSubscribe, PubSubEvent}; -use self::state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber}; - /// Timeout for graceful shutdown logic within API servers. const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5); diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs index d59c25ddbe9..76e4f01ee47 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/debug.rs @@ -1,9 +1,7 @@ -use multivm::vm_latest::constants::BLOCK_GAS_LIMIT; -use once_cell::sync::OnceCell; use std::sync::Arc; -use multivm::interface::ExecutionResult; - +use multivm::{interface::ExecutionResult, vm_latest::constants::BLOCK_GAS_LIMIT}; +use once_cell::sync::OnceCell; use zksync_dal::ConnectionPool; use zksync_state::PostgresStorageCaches; use zksync_types::{ @@ -15,19 +13,21 @@ use zksync_types::{ }; use zksync_web3_decl::error::Web3Error; -use crate::api_server::{ - execution_sandbox::{ - execute_tx_eth_call, ApiTracer, BlockArgs, TxSharedArgs, VmConcurrencyLimiter, - }, - tx_sender::ApiContracts, - web3::{ - backend_jsonrpc::error::internal_error, - metrics::API_METRICS, - resolve_block, - state::{RpcState, SealedMiniblockNumber}, +use crate::{ + api_server::{ + execution_sandbox::{ + execute_tx_eth_call, ApiTracer, BlockArgs, TxSharedArgs, VmConcurrencyLimiter, + }, + tx_sender::ApiContracts, + web3::{ + backend_jsonrpc::error::internal_error, + metrics::API_METRICS, + resolve_block, + state::{RpcState, SealedMiniblockNumber}, + }, }, + l1_gas_price::L1GasPriceProvider, }; -use crate::l1_gas_price::L1GasPriceProvider; #[derive(Debug, Clone)] pub struct DebugNamespace { diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs index b43f5523938..97c6eb9e768 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/en.rs @@ -2,7 +2,7 @@ use zksync_types::{api::en::SyncBlock, MiniblockNumber}; use zksync_web3_decl::error::Web3Error; use crate::{ - api_server::{web3::backend_jsonrpc::error::internal_error, web3::state::RpcState}, + api_server::web3::{backend_jsonrpc::error::internal_error, state::RpcState}, l1_gas_price::L1GasPriceProvider, }; diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs index 9e3a90dde04..7f575c57414 100644 --- a/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs +++ b/core/lib/zksync_core/src/api_server/web3/namespaces/zks.rs @@ -2,7 +2,6 @@ use std::{collections::HashMap, convert::TryInto}; use bigdecimal::{BigDecimal, Zero}; use zksync_dal::StorageProcessor; - use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_types::{ api::{ @@ -25,11 +24,13 @@ use zksync_web3_decl::{ types::{Address, Filter, Log, Token, H256}, }; -use crate::api_server::{ - tree::TreeApiClient, - web3::{backend_jsonrpc::error::internal_error, metrics::API_METRICS, RpcState}, +use crate::{ + api_server::{ + tree::TreeApiClient, + web3::{backend_jsonrpc::error::internal_error, metrics::API_METRICS, RpcState}, + }, + l1_gas_price::L1GasPriceProvider, }; -use crate::l1_gas_price::L1GasPriceProvider; #[derive(Debug)] pub struct ZksNamespace { diff --git a/core/lib/zksync_core/src/api_server/web3/pubsub.rs b/core/lib/zksync_core/src/api_server/web3/pubsub.rs index 946c0744ba4..57e54cad10f 100644 --- a/core/lib/zksync_core/src/api_server/web3/pubsub.rs +++ b/core/lib/zksync_core/src/api_server/web3/pubsub.rs @@ -1,5 +1,7 @@ //! (Largely) backend-agnostic logic for dealing with Web3 subscriptions. +use std::{collections::HashMap, sync::Arc}; + use anyhow::Context as _; use jsonrpc_core::error::{Error, ErrorCode}; use jsonrpc_pubsub::{typed, SubscriptionId}; @@ -8,9 +10,6 @@ use tokio::{ task::JoinHandle, time::{interval, Duration}, }; - -use std::{collections::HashMap, sync::Arc}; - use zksync_dal::ConnectionPool; use zksync_types::{MiniblockNumber, H128, H256}; use zksync_web3_decl::types::{BlockHeader, Log, PubSubFilter, PubSubResult}; diff --git a/core/lib/zksync_core/src/api_server/web3/state.rs b/core/lib/zksync_core/src/api_server/web3/state.rs index ea52b2ae61c..75b41d58b20 100644 --- a/core/lib/zksync_core/src/api_server/web3/state.rs +++ b/core/lib/zksync_core/src/api_server/web3/state.rs @@ -1,5 +1,3 @@ -use zksync_utils::h256_to_u256; - use std::{ collections::HashMap, convert::TryFrom, @@ -10,9 +8,9 @@ use std::{ }, time::{Duration, Instant}, }; + use tokio::sync::Mutex; use vise::GaugeGuard; - use zksync_config::configs::{api::Web3JsonRpcConfig, chain::NetworkConfig, ContractsConfig}; use zksync_dal::ConnectionPool; use zksync_types::{ @@ -23,6 +21,7 @@ use zksync_types::{ AccountTreeId, Address, L1BatchNumber, L1ChainId, L2ChainId, MiniblockNumber, StorageKey, H256, SYSTEM_CONTEXT_ADDRESS, U256, U64, VIRTUIAL_BLOCK_UPGRADE_INFO_POSITION, }; +use zksync_utils::h256_to_u256; use zksync_web3_decl::{ error::Web3Error, types::{Filter, Log}, diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 1bb14df52fa..8743330710c 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -1,9 +1,8 @@ +use std::{sync::Arc, time::Instant}; + use assert_matches::assert_matches; use async_trait::async_trait; use tokio::sync::watch; - -use std::{sync::Arc, time::Instant}; - use zksync_config::configs::{ api::Web3JsonRpcConfig, chain::{NetworkConfig, StateKeeperConfig}, @@ -23,8 +22,6 @@ use zksync_web3_decl::{ types::FilterChanges, }; -mod ws; - use super::{metrics::ApiTransportLabel, *}; use crate::{ api_server::tx_sender::TxSenderConfig, @@ -32,6 +29,8 @@ use crate::{ state_keeper::tests::create_l2_transaction, }; +mod ws; + const TEST_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(50); diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs index 704dfef6700..58fcebeda0d 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs @@ -2,7 +2,6 @@ use async_trait::async_trait; use tokio::sync::watch; - use zksync_config::configs::chain::NetworkConfig; use zksync_dal::ConnectionPool; use zksync_types::{api, Address, L1BatchNumber, H256, U64}; diff --git a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs index e4d605d2545..d9295b413fc 100644 --- a/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs +++ b/core/lib/zksync_core/src/basic_witness_input_producer/mod.rs @@ -1,24 +1,22 @@ -use anyhow::Context; -use std::sync::Arc; -use std::time::Instant; +use std::{sync::Arc, time::Instant}; +use anyhow::Context; +use async_trait::async_trait; +use multivm::interface::{L2BlockEnv, VmInterface}; +use tokio::{runtime::Handle, task::JoinHandle}; use zksync_dal::{basic_witness_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool}; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::witness_block_state::WitnessBlockState; -use zksync_types::{L1BatchNumber, L2ChainId}; +use zksync_types::{witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId}; -use async_trait::async_trait; -use multivm::interface::{L2BlockEnv, VmInterface}; -use tokio::runtime::Handle; -use tokio::task::JoinHandle; +use self::{ + metrics::METRICS, + vm_interactions::{create_vm, execute_tx}, +}; mod metrics; mod vm_interactions; -use self::metrics::METRICS; -use self::vm_interactions::{create_vm, execute_tx}; - /// Component that extracts all data (from DB) necessary to run a Basic Witness Generator. /// Does this by rerunning an entire L1Batch and extracting information from both the VM run and DB. /// This component will upload Witness Inputs to the object store. diff --git a/core/lib/zksync_core/src/basic_witness_input_producer/vm_interactions.rs b/core/lib/zksync_core/src/basic_witness_input_producer/vm_interactions.rs index 464ab1f92d0..e655112fade 100644 --- a/core/lib/zksync_core/src/basic_witness_input_producer/vm_interactions.rs +++ b/core/lib/zksync_core/src/basic_witness_input_producer/vm_interactions.rs @@ -1,15 +1,16 @@ use anyhow::{anyhow, Context}; - -use crate::state_keeper::io::common::load_l1_batch_params; - -use multivm::interface::{VmInterface, VmInterfaceHistoryEnabled}; -use multivm::vm_latest::HistoryEnabled; -use multivm::VmInstance; +use multivm::{ + interface::{VmInterface, VmInterfaceHistoryEnabled}, + vm_latest::HistoryEnabled, + VmInstance, +}; use tokio::runtime::Handle; use zksync_dal::StorageProcessor; use zksync_state::{PostgresStorage, StoragePtr, StorageView, WriteStorage}; use zksync_types::{L1BatchNumber, L2ChainId, Transaction}; +use crate::state_keeper::io::common::load_l1_batch_params; + pub(super) type VmAndStorage<'a> = ( VmInstance>, HistoryEnabled>, StoragePtr>>, diff --git a/core/lib/zksync_core/src/block_reverter/mod.rs b/core/lib/zksync_core/src/block_reverter/mod.rs index 1170af9d5ba..09358ec9abc 100644 --- a/core/lib/zksync_core/src/block_reverter/mod.rs +++ b/core/lib/zksync_core/src/block_reverter/mod.rs @@ -1,27 +1,26 @@ +use std::{path::Path, time::Duration}; + use bitflags::bitflags; use serde::Serialize; use tokio::time::sleep; - -use std::path::Path; -use std::time::Duration; - use zksync_config::{ContractsConfig, ETHSenderConfig}; use zksync_contracts::zksync_contract; use zksync_dal::ConnectionPool; +use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; use zksync_merkle_tree::domain::ZkSyncTree; use zksync_state::RocksdbStorage; use zksync_storage::RocksDB; -use zksync_types::aggregated_operations::AggregatedActionType; -use zksync_types::ethabi::Token; -use zksync_types::web3::{ - contract::{Contract, Options}, - transports::Http, - types::{BlockId, BlockNumber}, - Web3, +use zksync_types::{ + aggregated_operations::AggregatedActionType, + ethabi::Token, + web3::{ + contract::{Contract, Options}, + transports::Http, + types::{BlockId, BlockNumber}, + Web3, + }, + L1BatchNumber, PackedEthSignature, H160, H256, U256, }; -use zksync_types::{L1BatchNumber, PackedEthSignature, H160, H256, U256}; - -use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; bitflags! { pub struct BlockReverterFlags: u32 { diff --git a/core/lib/zksync_core/src/consensus/payload.rs b/core/lib/zksync_core/src/consensus/payload.rs index dbe276196b0..8051d87ca58 100644 --- a/core/lib/zksync_core/src/consensus/payload.rs +++ b/core/lib/zksync_core/src/consensus/payload.rs @@ -1,9 +1,7 @@ use anyhow::Context as _; - use zksync_consensus_roles::validator; use zksync_protobuf::{required, ProtoFmt}; -use zksync_types::api::en::SyncBlock; -use zksync_types::{Address, L1BatchNumber, Transaction, H256}; +use zksync_types::{api::en::SyncBlock, Address, L1BatchNumber, Transaction, H256}; /// L2 block (= miniblock) payload. #[derive(Debug, PartialEq)] diff --git a/core/lib/zksync_core/src/data_fetchers/mod.rs b/core/lib/zksync_core/src/data_fetchers/mod.rs index f04a80c315e..850f2814d5e 100644 --- a/core/lib/zksync_core/src/data_fetchers/mod.rs +++ b/core/lib/zksync_core/src/data_fetchers/mod.rs @@ -9,8 +9,7 @@ //! Every data fetcher is represented by an autonomic routine, which spend most of the time sleeping; //! once in the configurable interval it fetches the data from an API and store it into the database. -use tokio::sync::watch; -use tokio::task::JoinHandle; +use tokio::{sync::watch, task::JoinHandle}; use zksync_config::FetcherConfig; use zksync_dal::ConnectionPool; diff --git a/core/lib/zksync_core/src/data_fetchers/token_list/mock.rs b/core/lib/zksync_core/src/data_fetchers/token_list/mock.rs index c813888cf52..4b2aaefafb8 100644 --- a/core/lib/zksync_core/src/data_fetchers/token_list/mock.rs +++ b/core/lib/zksync_core/src/data_fetchers/token_list/mock.rs @@ -2,16 +2,14 @@ use std::{collections::HashMap, fs::read_to_string, path::PathBuf, str::FromStr} use async_trait::async_trait; use serde::{Deserialize, Serialize}; - -use zksync_types::network::Network; use zksync_types::{ + network::Network, tokens::{TokenMetadata, ETHEREUM_ADDRESS}, Address, }; -use crate::data_fetchers::error::ApiFetchError; - use super::FetcherImpl; +use crate::data_fetchers::error::ApiFetchError; #[derive(Debug, Clone)] pub struct MockTokenListFetcher { diff --git a/core/lib/zksync_core/src/data_fetchers/token_list/mod.rs b/core/lib/zksync_core/src/data_fetchers/token_list/mod.rs index 3981ea8ea40..e213cf89a06 100644 --- a/core/lib/zksync_core/src/data_fetchers/token_list/mod.rs +++ b/core/lib/zksync_core/src/data_fetchers/token_list/mod.rs @@ -15,11 +15,9 @@ use std::{ use async_trait::async_trait; use tokio::sync::watch; - use zksync_config::{configs::fetcher::TokenListSource, FetcherConfig}; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_types::network::Network; -use zksync_types::{tokens::TokenMetadata, Address}; +use zksync_types::{network::Network, tokens::TokenMetadata, Address}; use super::error::{ApiFetchError, ErrorAnalyzer}; diff --git a/core/lib/zksync_core/src/data_fetchers/token_list/one_inch.rs b/core/lib/zksync_core/src/data_fetchers/token_list/one_inch.rs index 1d022e4700d..a21b942b472 100644 --- a/core/lib/zksync_core/src/data_fetchers/token_list/one_inch.rs +++ b/core/lib/zksync_core/src/data_fetchers/token_list/one_inch.rs @@ -3,13 +3,11 @@ use std::{collections::HashMap, str::FromStr}; use async_trait::async_trait; use reqwest::{Client, Url}; use serde::{Deserialize, Serialize}; - use zksync_config::FetcherConfig; use zksync_types::{tokens::TokenMetadata, Address}; -use crate::data_fetchers::error::ApiFetchError; - use super::FetcherImpl; +use crate::data_fetchers::error::ApiFetchError; #[derive(Debug, Clone)] pub struct OneInchTokenListFetcher { diff --git a/core/lib/zksync_core/src/data_fetchers/token_price/coingecko.rs b/core/lib/zksync_core/src/data_fetchers/token_price/coingecko.rs index a046c23ea2d..686410eed08 100644 --- a/core/lib/zksync_core/src/data_fetchers/token_price/coingecko.rs +++ b/core/lib/zksync_core/src/data_fetchers/token_price/coingecko.rs @@ -7,7 +7,6 @@ use itertools::Itertools; use num::{rational::Ratio, BigUint}; use reqwest::{Client, Url}; use serde::{Deserialize, Serialize}; - use zksync_config::FetcherConfig; use zksync_types::{ tokens::{TokenPrice, ETHEREUM_ADDRESS}, @@ -15,9 +14,8 @@ use zksync_types::{ }; use zksync_utils::UnsignedRatioSerializeAsDecimal; -use crate::data_fetchers::error::ApiFetchError; - use super::FetcherImpl; +use crate::data_fetchers::error::ApiFetchError; #[derive(Debug, Clone)] pub struct CoinGeckoFetcher { diff --git a/core/lib/zksync_core/src/data_fetchers/token_price/mock.rs b/core/lib/zksync_core/src/data_fetchers/token_price/mock.rs index 6e5f4893e53..3fde09f65f4 100644 --- a/core/lib/zksync_core/src/data_fetchers/token_price/mock.rs +++ b/core/lib/zksync_core/src/data_fetchers/token_price/mock.rs @@ -8,9 +8,8 @@ use zksync_types::{ Address, }; -use crate::data_fetchers::error::ApiFetchError; - use super::FetcherImpl; +use crate::data_fetchers::error::ApiFetchError; #[derive(Debug, Default, Clone)] pub struct MockPriceFetcher; diff --git a/core/lib/zksync_core/src/data_fetchers/token_price/mod.rs b/core/lib/zksync_core/src/data_fetchers/token_price/mod.rs index 8e7d5575f69..074f8d81aa6 100644 --- a/core/lib/zksync_core/src/data_fetchers/token_price/mod.rs +++ b/core/lib/zksync_core/src/data_fetchers/token_price/mod.rs @@ -3,15 +3,14 @@ use std::{collections::HashMap, time::Duration}; use async_trait::async_trait; - +use bigdecimal::FromPrimitive; +use num::{rational::Ratio, BigUint}; +use tokio::sync::watch; use zksync_config::{configs::fetcher::TokenPriceSource, FetcherConfig}; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{tokens::TokenPrice, Address}; use super::error::{ApiFetchError, ErrorAnalyzer}; -use bigdecimal::FromPrimitive; -use num::{rational::Ratio, BigUint}; -use tokio::sync::watch; pub mod coingecko; pub mod mock; diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs index 4cb40c475f9..43b1e51da10 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_aggregator.rs @@ -1,7 +1,6 @@ use std::convert::TryInto; use tokio::sync::watch; - use zksync_config::configs::eth_sender::SenderConfig; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; @@ -17,13 +16,15 @@ use zksync_types::{ Address, ProtocolVersionId, H256, U256, }; -use crate::eth_sender::{ - metrics::{PubdataKind, METRICS}, - zksync_functions::ZkSyncFunctions, - Aggregator, ETHSenderError, +use crate::{ + eth_sender::{ + metrics::{PubdataKind, METRICS}, + zksync_functions::ZkSyncFunctions, + Aggregator, ETHSenderError, + }, + gas_tracker::agg_l1_batch_base_cost, + metrics::BlockL1Stage, }; -use crate::gas_tracker::agg_l1_batch_base_cost; -use crate::metrics::BlockL1Stage; /// Data queried from L1 using multicall contract. #[derive(Debug)] diff --git a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs index 5aab4a2903c..2ef9ea87a7c 100644 --- a/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs +++ b/core/lib/zksync_core/src/eth_sender/eth_tx_manager.rs @@ -1,9 +1,7 @@ +use std::{sync::Arc, time::Duration}; + use anyhow::Context as _; use tokio::sync::watch; - -use std::sync::Arc; -use std::time::Duration; - use zksync_config::configs::eth_sender::SenderConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{ @@ -22,8 +20,7 @@ use zksync_types::{ use zksync_utils::time::seconds_since_epoch; use super::{metrics::METRICS, ETHSenderError}; -use crate::l1_gas_price::L1TxParamsProvider; -use crate::metrics::BlockL1Stage; +use crate::{l1_gas_price::L1TxParamsProvider, metrics::BlockL1Stage}; #[derive(Debug)] struct EthFee { diff --git a/core/lib/zksync_core/src/eth_sender/metrics.rs b/core/lib/zksync_core/src/eth_sender/metrics.rs index 950ff8bf6f7..4bce1bf1a1f 100644 --- a/core/lib/zksync_core/src/eth_sender/metrics.rs +++ b/core/lib/zksync_core/src/eth_sender/metrics.rs @@ -1,9 +1,8 @@ //! Metrics for the Ethereum sender component. -use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; - use std::{fmt, time::Duration}; +use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; use zksync_dal::StorageProcessor; use zksync_types::{aggregated_operations::AggregatedActionType, eth_sender::EthTx}; use zksync_utils::time::seconds_since_epoch; diff --git a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs index 33fd33ad577..85f6a46c960 100644 --- a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs +++ b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs @@ -1,8 +1,7 @@ -use async_trait::async_trait; -use chrono::Utc; - use std::fmt; +use async_trait::async_trait; +use chrono::Utc; use zksync_dal::StorageProcessor; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, L1BatchNumber, diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index 51166fc794a..01781a424f5 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -1,8 +1,7 @@ -use assert_matches::assert_matches; use std::sync::{atomic::Ordering, Arc}; +use assert_matches::assert_matches; use once_cell::sync::Lazy; - use zksync_config::{ configs::eth_sender::{ProofSendingMode, SenderConfig}, ContractsConfig, ETHSenderConfig, GasAdjusterConfig, @@ -23,10 +22,12 @@ use zksync_types::{ Address, L1BatchNumber, L1BlockNumber, ProtocolVersionId, H256, }; -use crate::eth_sender::{ - eth_tx_manager::L1BlockNumbers, Aggregator, ETHSenderError, EthTxAggregator, EthTxManager, +use crate::{ + eth_sender::{ + eth_tx_manager::L1BlockNumbers, Aggregator, ETHSenderError, EthTxAggregator, EthTxManager, + }, + l1_gas_price::GasAdjuster, }; -use crate::l1_gas_price::GasAdjuster; // Alias to conveniently call static methods of ETHSender. type MockEthTxManager = EthTxManager, GasAdjuster>>; diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs b/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs index b43fc6fb050..2f7e2e86b2c 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/governance_upgrades.rs @@ -1,15 +1,16 @@ -use crate::eth_watch::{ - client::{Error, EthClient}, - event_processors::EventProcessor, -}; -use std::convert::TryFrom; -use std::time::Instant; +use std::{convert::TryFrom, time::Instant}; + use zksync_dal::StorageProcessor; use zksync_types::{ ethabi::Contract, protocol_version::GovernanceOperation, web3::types::Log, Address, ProtocolUpgrade, ProtocolVersionId, H256, }; +use crate::eth_watch::{ + client::{Error, EthClient}, + event_processors::EventProcessor, +}; + /// Listens to operation events coming from the governance contract and saves new protocol upgrade proposals to the database. #[derive(Debug)] pub struct GovernanceUpgradesEventProcessor { diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs b/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs index 84ea1eeb04c..202b7efb586 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/mod.rs @@ -1,7 +1,8 @@ -use crate::eth_watch::client::{Error, EthClient}; use zksync_dal::StorageProcessor; use zksync_types::{web3::types::Log, H256}; +use crate::eth_watch::client::{Error, EthClient}; + pub mod governance_upgrades; pub mod priority_ops; pub mod upgrades; diff --git a/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs b/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs index 210b540c48e..497cb705ee1 100644 --- a/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs +++ b/core/lib/zksync_core/src/eth_watch/event_processors/upgrades.rs @@ -1,4 +1,5 @@ use std::convert::TryFrom; + use zksync_dal::StorageProcessor; use zksync_types::{web3::types::Log, ProtocolUpgrade, ProtocolVersionId, H256}; diff --git a/core/lib/zksync_core/src/eth_watch/metrics.rs b/core/lib/zksync_core/src/eth_watch/metrics.rs index e5166f137ca..c96b8c08483 100644 --- a/core/lib/zksync_core/src/eth_watch/metrics.rs +++ b/core/lib/zksync_core/src/eth_watch/metrics.rs @@ -1,9 +1,9 @@ //! Metrics for Ethereum watcher. -use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; - use std::time::Duration; +use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum PollStage { diff --git a/core/lib/zksync_core/src/eth_watch/mod.rs b/core/lib/zksync_core/src/eth_watch/mod.rs index fdb629bce28..69f447e7fba 100644 --- a/core/lib/zksync_core/src/eth_watch/mod.rs +++ b/core/lib/zksync_core/src/eth_watch/mod.rs @@ -4,10 +4,9 @@ //! Poll interval is configured using the `ETH_POLL_INTERVAL` constant. //! Number of confirmations is configured using the `CONFIRMATIONS_FOR_ETH_EVENT` environment variable. -use tokio::{sync::watch, task::JoinHandle}; - use std::time::Duration; +use tokio::{sync::watch, task::JoinHandle}; use zksync_config::ETHWatchConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::EthInterface; @@ -17,12 +16,6 @@ use zksync_types::{ ProtocolVersionId, }; -mod client; -mod event_processors; -mod metrics; -#[cfg(test)] -mod tests; - use self::{ client::{Error, EthClient, EthHttpQueryClient, RETRY_LIMIT}, event_processors::{ @@ -32,6 +25,12 @@ use self::{ metrics::{PollStage, METRICS}, }; +mod client; +mod event_processors; +mod metrics; +#[cfg(test)] +mod tests; + #[derive(Debug)] struct EthWatchState { last_seen_version_id: ProtocolVersionId, diff --git a/core/lib/zksync_core/src/eth_watch/tests.rs b/core/lib/zksync_core/src/eth_watch/tests.rs index d7627a56c13..31c46741929 100644 --- a/core/lib/zksync_core/src/eth_watch/tests.rs +++ b/core/lib/zksync_core/src/eth_watch/tests.rs @@ -1,17 +1,13 @@ -use std::collections::HashMap; -use std::convert::TryInto; -use std::sync::Arc; +use std::{collections::HashMap, convert::TryInto, sync::Arc}; use tokio::sync::RwLock; - use zksync_contracts::{governance_contract, zksync_contract}; use zksync_dal::{ConnectionPool, StorageProcessor}; -use zksync_types::protocol_version::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}; -use zksync_types::web3::types::{Address, BlockNumber}; use zksync_types::{ ethabi::{encode, Hash, Token}, l1::{L1Tx, OpProcessingType, PriorityQueueType}, - web3::types::Log, + protocol_version::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, + web3::types::{Address, BlockNumber, Log}, Execute, L1TxCommonData, PriorityOpId, ProtocolUpgrade, ProtocolVersion, ProtocolVersionId, Transaction, H256, U256, }; diff --git a/core/lib/zksync_core/src/genesis.rs b/core/lib/zksync_core/src/genesis.rs index 39a8645767d..01d4628caac 100644 --- a/core/lib/zksync_core/src/genesis.rs +++ b/core/lib/zksync_core/src/genesis.rs @@ -3,14 +3,13 @@ //! setups the required databases, and outputs the data required to initialize a smart contract. use anyhow::Context as _; - use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; use zksync_merkle_tree::domain::ZkSyncTree; - use zksync_types::{ - block::DeployedContract, - block::{legacy_miniblock_hash, BlockGasCount, L1BatchHeader, MiniblockHeader}, + block::{ + legacy_miniblock_hash, BlockGasCount, DeployedContract, L1BatchHeader, MiniblockHeader, + }, commitment::{L1BatchCommitment, L1BatchMetadata}, get_code_key, get_system_context_init_logs, protocol_version::{L1VerifierConfig, ProtocolVersion}, @@ -19,8 +18,7 @@ use zksync_types::{ AccountTreeId, Address, L1BatchNumber, L2ChainId, LogQuery, MiniblockNumber, ProtocolVersionId, StorageKey, StorageLog, StorageLogKind, Timestamp, H256, }; -use zksync_utils::{be_words_to_bytes, h256_to_u256}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; +use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::metadata_calculator::L1BatchWithLogs; diff --git a/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs b/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs index 6ba94cbac6d..190764ec57d 100644 --- a/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs +++ b/core/lib/zksync_core/src/house_keeper/blocks_state_reporter.rs @@ -1,5 +1,4 @@ use async_trait::async_trait; - use zksync_dal::ConnectionPool; use zksync_prover_utils::periodic_job::PeriodicJob; use zksync_utils::time::seconds_since_epoch; diff --git a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs index 769792b6a58..73c752b6955 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_proof_compressor_queue_monitor.rs @@ -1,8 +1,7 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; -use zksync_types::proofs::JobCountStatistics; - use zksync_prover_utils::periodic_job::PeriodicJob; +use zksync_types::proofs::JobCountStatistics; const PROOF_COMPRESSOR_SERVICE_NAME: &str = "proof_compressor"; diff --git a/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs b/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs index ab9eba1fc66..0adfdb47055 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_scheduler_circuit_queuer.rs @@ -1,6 +1,5 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; - use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] diff --git a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs index 15b56e16553..67f81295b44 100644 --- a/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/fri_witness_generator_queue_monitor.rs @@ -2,9 +2,8 @@ use std::collections::HashMap; use async_trait::async_trait; use zksync_dal::ConnectionPool; -use zksync_types::proofs::{AggregationRound, JobCountStatistics}; - use zksync_prover_utils::periodic_job::PeriodicJob; +use zksync_types::proofs::{AggregationRound, JobCountStatistics}; const FRI_WITNESS_GENERATOR_SERVICE_NAME: &str = "fri_witness_generator"; diff --git a/core/lib/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs index 7ddb1bd75dd..ab96b52bedc 100644 --- a/core/lib/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/gpu_prover_queue_monitor.rs @@ -1,6 +1,5 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; - use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] diff --git a/core/lib/zksync_core/src/house_keeper/prover_job_retry_manager.rs b/core/lib/zksync_core/src/house_keeper/prover_job_retry_manager.rs index 4142f1d5766..f7b630475ea 100644 --- a/core/lib/zksync_core/src/house_keeper/prover_job_retry_manager.rs +++ b/core/lib/zksync_core/src/house_keeper/prover_job_retry_manager.rs @@ -2,7 +2,6 @@ use std::time::Duration; use async_trait::async_trait; use zksync_dal::ConnectionPool; - use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] diff --git a/core/lib/zksync_core/src/house_keeper/prover_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/prover_queue_monitor.rs index 5b41ee74ac9..e0f598d5a59 100644 --- a/core/lib/zksync_core/src/house_keeper/prover_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/prover_queue_monitor.rs @@ -1,9 +1,7 @@ use async_trait::async_trait; use zksync_config::configs::ProverGroupConfig; use zksync_dal::ConnectionPool; -use zksync_prover_utils::circuit_name_to_numeric_index; - -use zksync_prover_utils::periodic_job::PeriodicJob; +use zksync_prover_utils::{circuit_name_to_numeric_index, periodic_job::PeriodicJob}; #[derive(Debug)] pub struct ProverStatsReporter { diff --git a/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs b/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs index 2fd00bcd6f6..1292ee3f44f 100644 --- a/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs +++ b/core/lib/zksync_core/src/house_keeper/waiting_to_queued_fri_witness_job_mover.rs @@ -1,6 +1,5 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; - use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] diff --git a/core/lib/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs b/core/lib/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs index c99603676ec..4521b4bfc47 100644 --- a/core/lib/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs +++ b/core/lib/zksync_core/src/house_keeper/waiting_to_queued_witness_job_mover.rs @@ -1,6 +1,5 @@ use async_trait::async_trait; use zksync_dal::ConnectionPool; - use zksync_prover_utils::periodic_job::PeriodicJob; #[derive(Debug)] diff --git a/core/lib/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs b/core/lib/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs index 40a8e2a6613..da816da3c66 100644 --- a/core/lib/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs +++ b/core/lib/zksync_core/src/house_keeper/witness_generator_queue_monitor.rs @@ -2,9 +2,8 @@ use std::collections::HashMap; use async_trait::async_trait; use zksync_dal::ConnectionPool; -use zksync_types::proofs::{AggregationRound, JobCountStatistics}; - use zksync_prover_utils::periodic_job::PeriodicJob; +use zksync_types::proofs::{AggregationRound, JobCountStatistics}; const WITNESS_GENERATOR_SERVICE_NAME: &str = "witness_generator"; diff --git a/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs b/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs index 9a8825190ee..dd9806f998c 100644 --- a/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/mod.rs @@ -1,23 +1,22 @@ //! This module determines the fees to pay in txs containing blocks submitted to the L1. -use tokio::sync::watch; - use std::{ collections::VecDeque, sync::{Arc, RwLock}, }; +use tokio::sync::watch; use zksync_config::GasAdjusterConfig; use zksync_eth_client::{types::Error, EthInterface}; +use self::metrics::METRICS; +use super::{L1GasPriceProvider, L1TxParamsProvider}; + pub mod bounded_gas_adjuster; mod metrics; #[cfg(test)] mod tests; -use self::metrics::METRICS; -use super::{L1GasPriceProvider, L1TxParamsProvider}; - /// This component keeps track of the median base_fee from the last `max_base_fee_samples` blocks. /// It is used to adjust the base_fee of transactions sent to L1. #[derive(Debug)] diff --git a/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs b/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs index a0c6dac365c..84ace37ecec 100644 --- a/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/lib/zksync_core/src/l1_gas_price/gas_adjuster/tests.rs @@ -1,9 +1,10 @@ -use super::{GasAdjuster, GasStatisticsInner}; -use std::collections::VecDeque; -use std::sync::Arc; +use std::{collections::VecDeque, sync::Arc}; + use zksync_config::GasAdjusterConfig; use zksync_eth_client::clients::mock::MockEthereum; +use super::{GasAdjuster, GasStatisticsInner}; + /// Check that we compute the median correctly #[test] fn median() { diff --git a/core/lib/zksync_core/src/l1_gas_price/main_node_fetcher.rs b/core/lib/zksync_core/src/l1_gas_price/main_node_fetcher.rs index 2244607a47e..a3f7b92f0e4 100644 --- a/core/lib/zksync_core/src/l1_gas_price/main_node_fetcher.rs +++ b/core/lib/zksync_core/src/l1_gas_price/main_node_fetcher.rs @@ -7,7 +7,6 @@ use std::{ }; use tokio::sync::watch::Receiver; - use zksync_web3_decl::{ jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, namespaces::ZksNamespaceClient, diff --git a/core/lib/zksync_core/src/l1_gas_price/mod.rs b/core/lib/zksync_core/src/l1_gas_price/mod.rs index 45e228d79c5..bab30c03584 100644 --- a/core/lib/zksync_core/src/l1_gas_price/mod.rs +++ b/core/lib/zksync_core/src/l1_gas_price/mod.rs @@ -1,7 +1,6 @@ //! This module determines the fees to pay in txs containing blocks submitted to the L1. -pub use gas_adjuster::bounded_gas_adjuster::BoundedGasAdjuster; -pub use gas_adjuster::GasAdjuster; +pub use gas_adjuster::{bounded_gas_adjuster::BoundedGasAdjuster, GasAdjuster}; pub use main_node_fetcher::MainNodeGasPriceFetcher; pub use singleton::GasAdjusterSingleton; diff --git a/core/lib/zksync_core/src/l1_gas_price/singleton.rs b/core/lib/zksync_core/src/l1_gas_price/singleton.rs index 4808dee548b..0c70ba2466c 100644 --- a/core/lib/zksync_core/src/l1_gas_price/singleton.rs +++ b/core/lib/zksync_core/src/l1_gas_price/singleton.rs @@ -1,11 +1,15 @@ -use crate::l1_gas_price::{BoundedGasAdjuster, GasAdjuster}; -use anyhow::Context as _; use std::sync::Arc; -use tokio::sync::{watch, OnceCell}; -use tokio::task::JoinHandle; + +use anyhow::Context as _; +use tokio::{ + sync::{watch, OnceCell}, + task::JoinHandle, +}; use zksync_config::GasAdjusterConfig; use zksync_eth_client::clients::http::QueryClient; +use crate::l1_gas_price::{BoundedGasAdjuster, GasAdjuster}; + /// Special struct for creating a singleton of `GasAdjuster`. /// This is needed only for running the server. #[derive(Debug)] diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 2389d576173..5406f0bbd89 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -7,7 +7,6 @@ use futures::channel::oneshot; use prometheus_exporter::PrometheusExporterConfig; use temp_config_store::TempConfigStore; use tokio::{sync::watch, task::JoinHandle}; - use zksync_circuit_breaker::{ l1_txs::FailedL1TransactionChecker, replication_lag::ReplicationLagChecker, CircuitBreaker, CircuitBreakerChecker, CircuitBreakerError, @@ -26,9 +25,10 @@ use zksync_config::{ }; use zksync_contracts::{governance_contract, BaseSystemContracts}; use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; -use zksync_eth_client::clients::http::QueryClient; -use zksync_eth_client::EthInterface; -use zksync_eth_client::{clients::http::PKSigningClient, BoundEthInterface}; +use zksync_eth_client::{ + clients::http::{PKSigningClient, QueryClient}, + BoundEthInterface, EthInterface, +}; use zksync_health_check::{CheckHealth, HealthStatus, ReactiveHealthCheck}; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_utils::periodic_job::PeriodicJob; @@ -42,6 +42,46 @@ use zksync_types::{ }; use zksync_verification_key_server::get_cached_commitments; +use crate::{ + api_server::{ + contract_verification, + execution_sandbox::{VmConcurrencyBarrier, VmConcurrencyLimiter}, + healthcheck::HealthCheckHandle, + tx_sender::{ApiContracts, TxSender, TxSenderBuilder, TxSenderConfig}, + web3, + web3::{state::InternalApiConfig, ApiServerHandles, Namespace}, + }, + basic_witness_input_producer::BasicWitnessInputProducer, + data_fetchers::run_data_fetchers, + eth_sender::{Aggregator, EthTxAggregator, EthTxManager}, + eth_watch::start_eth_watch, + house_keeper::{ + blocks_state_reporter::L1BatchMetricsReporter, + fri_proof_compressor_job_retry_manager::FriProofCompressorJobRetryManager, + fri_proof_compressor_queue_monitor::FriProofCompressorStatsReporter, + fri_prover_job_retry_manager::FriProverJobRetryManager, + fri_prover_queue_monitor::FriProverStatsReporter, + fri_scheduler_circuit_queuer::SchedulerCircuitQueuer, + fri_witness_generator_jobs_retry_manager::FriWitnessGeneratorJobRetryManager, + fri_witness_generator_queue_monitor::FriWitnessGeneratorStatsReporter, + gpu_prover_queue_monitor::GpuProverQueueMonitor, + prover_job_retry_manager::ProverJobRetryManager, prover_queue_monitor::ProverStatsReporter, + waiting_to_queued_fri_witness_job_mover::WaitingToQueuedFriWitnessJobMover, + waiting_to_queued_witness_job_mover::WaitingToQueuedWitnessJobMover, + witness_generator_queue_monitor::WitnessGeneratorStatsReporter, + }, + l1_gas_price::{GasAdjusterSingleton, L1GasPriceProvider}, + metadata_calculator::{ + MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig, + }, + metrics::{InitStage, APP_METRICS}, + state_keeper::{create_state_keeper, MempoolFetcher, MempoolGuard, MiniblockSealer}, + witness_generator::{ + basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, + node_aggregation::NodeAggregationWitnessGenerator, scheduler::SchedulerWitnessGenerator, + }, +}; + pub mod api_server; pub mod basic_witness_input_producer; pub mod block_reverter; @@ -63,47 +103,6 @@ pub mod sync_layer; pub mod temp_config_store; pub mod witness_generator; -use crate::api_server::healthcheck::HealthCheckHandle; -use crate::api_server::tx_sender::{TxSender, TxSenderBuilder, TxSenderConfig}; -use crate::api_server::web3::{state::InternalApiConfig, ApiServerHandles, Namespace}; -use crate::basic_witness_input_producer::BasicWitnessInputProducer; -use crate::eth_sender::{Aggregator, EthTxManager}; -use crate::house_keeper::fri_proof_compressor_job_retry_manager::FriProofCompressorJobRetryManager; -use crate::house_keeper::fri_proof_compressor_queue_monitor::FriProofCompressorStatsReporter; -use crate::house_keeper::fri_prover_job_retry_manager::FriProverJobRetryManager; -use crate::house_keeper::fri_prover_queue_monitor::FriProverStatsReporter; -use crate::house_keeper::fri_scheduler_circuit_queuer::SchedulerCircuitQueuer; -use crate::house_keeper::fri_witness_generator_jobs_retry_manager::FriWitnessGeneratorJobRetryManager; -use crate::house_keeper::fri_witness_generator_queue_monitor::FriWitnessGeneratorStatsReporter; -use crate::house_keeper::{ - blocks_state_reporter::L1BatchMetricsReporter, gpu_prover_queue_monitor::GpuProverQueueMonitor, - prover_job_retry_manager::ProverJobRetryManager, prover_queue_monitor::ProverStatsReporter, - waiting_to_queued_fri_witness_job_mover::WaitingToQueuedFriWitnessJobMover, - waiting_to_queued_witness_job_mover::WaitingToQueuedWitnessJobMover, - witness_generator_queue_monitor::WitnessGeneratorStatsReporter, -}; -use crate::l1_gas_price::{GasAdjusterSingleton, L1GasPriceProvider}; -use crate::metadata_calculator::{ - MetadataCalculator, MetadataCalculatorConfig, MetadataCalculatorModeConfig, -}; -use crate::state_keeper::{create_state_keeper, MempoolFetcher, MempoolGuard, MiniblockSealer}; -use crate::witness_generator::{ - basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, - node_aggregation::NodeAggregationWitnessGenerator, scheduler::SchedulerWitnessGenerator, -}; -use crate::{ - api_server::{ - contract_verification, - execution_sandbox::{VmConcurrencyBarrier, VmConcurrencyLimiter}, - tx_sender::ApiContracts, - web3, - }, - data_fetchers::run_data_fetchers, - eth_sender::EthTxAggregator, - eth_watch::start_eth_watch, - metrics::{InitStage, APP_METRICS}, -}; - /// Inserts the initial information about zkSync tokens into the database. pub async fn genesis_init( postgres_config: &PostgresConfig, diff --git a/core/lib/zksync_core/src/metadata_calculator/helpers.rs b/core/lib/zksync_core/src/metadata_calculator/helpers.rs index 9ae936febfe..790beca3070 100644 --- a/core/lib/zksync_core/src/metadata_calculator/helpers.rs +++ b/core/lib/zksync_core/src/metadata_calculator/helpers.rs @@ -1,9 +1,5 @@ //! Various helpers for the metadata calculator. -use serde::{Deserialize, Serialize}; -#[cfg(test)] -use tokio::sync::mpsc; - use std::{ collections::BTreeMap, future::Future, @@ -11,6 +7,9 @@ use std::{ time::Duration, }; +use serde::{Deserialize, Serialize}; +#[cfg(test)] +use tokio::sync::mpsc; use zksync_config::configs::database::MerkleTreeMode; use zksync_dal::StorageProcessor; use zksync_health_check::{Health, HealthStatus}; @@ -327,7 +326,6 @@ impl L1BatchWithLogs { #[cfg(test)] mod tests { use tempfile::TempDir; - use zksync_dal::ConnectionPool; use zksync_types::{proofs::PrepareBasicCircuitsJob, L2ChainId, StorageKey, StorageLog}; diff --git a/core/lib/zksync_core/src/metadata_calculator/metrics.rs b/core/lib/zksync_core/src/metadata_calculator/metrics.rs index f8ef8f85b64..da4995bdbf9 100644 --- a/core/lib/zksync_core/src/metadata_calculator/metrics.rs +++ b/core/lib/zksync_core/src/metadata_calculator/metrics.rs @@ -1,11 +1,10 @@ //! Metrics for `MetadataCalculator`. +use std::time::{Duration, Instant}; + use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; - -use std::time::{Duration, Instant}; - use zksync_types::block::L1BatchHeader; use zksync_utils::time::seconds_since_epoch; diff --git a/core/lib/zksync_core/src/metadata_calculator/mod.rs b/core/lib/zksync_core/src/metadata_calculator/mod.rs index 7289347fec0..31b39a90952 100644 --- a/core/lib/zksync_core/src/metadata_calculator/mod.rs +++ b/core/lib/zksync_core/src/metadata_calculator/mod.rs @@ -1,10 +1,9 @@ //! This module applies updates to the ZkSyncTree, calculates metadata for sealed blocks, and //! stores them in the DB. -use tokio::sync::watch; - use std::time::Duration; +use tokio::sync::watch; use zksync_config::configs::{ chain::OperationsManagerConfig, database::{MerkleTreeConfig, MerkleTreeMode}, @@ -19,12 +18,6 @@ use zksync_types::{ H256, }; -mod helpers; -mod metrics; -#[cfg(test)] -pub(crate) mod tests; -mod updater; - pub(crate) use self::helpers::{AsyncTreeReader, L1BatchWithLogs, MerkleTreeInfo}; use self::{ helpers::Delayer, @@ -33,6 +26,12 @@ use self::{ }; use crate::gas_tracker::commit_gas_count_for_l1_batch; +mod helpers; +mod metrics; +#[cfg(test)] +pub(crate) mod tests; +mod updater; + /// Part of [`MetadataCalculator`] related to the operation mode of the Merkle tree. #[derive(Debug, Clone, Copy)] pub enum MetadataCalculatorModeConfig<'a> { diff --git a/core/lib/zksync_core/src/metadata_calculator/tests.rs b/core/lib/zksync_core/src/metadata_calculator/tests.rs index 5e86db6087b..85d179fe3b0 100644 --- a/core/lib/zksync_core/src/metadata_calculator/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/tests.rs @@ -1,10 +1,9 @@ +use std::{future::Future, ops, panic, path::Path, time::Duration}; + use assert_matches::assert_matches; use itertools::Itertools; use tempfile::TempDir; use tokio::sync::{mpsc, watch}; - -use std::{future::Future, ops, panic, path::Path, time::Duration}; - use zksync_config::configs::{chain::OperationsManagerConfig, database::MerkleTreeConfig}; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, StorageProcessor}; diff --git a/core/lib/zksync_core/src/metadata_calculator/updater.rs b/core/lib/zksync_core/src/metadata_calculator/updater.rs index ed38dae14ed..87127947fbd 100644 --- a/core/lib/zksync_core/src/metadata_calculator/updater.rs +++ b/core/lib/zksync_core/src/metadata_calculator/updater.rs @@ -1,11 +1,10 @@ //! Tree updater trait and its implementations. +use std::{ops, time::Instant}; + use anyhow::Context as _; use futures::{future, FutureExt}; use tokio::sync::watch; - -use std::{ops, time::Instant}; - use zksync_commitment_utils::{bootloader_initial_content_commitment, events_queue_commitment}; use zksync_config::configs::database::MerkleTreeMode; use zksync_dal::{ConnectionPool, StorageProcessor}; diff --git a/core/lib/zksync_core/src/metrics.rs b/core/lib/zksync_core/src/metrics.rs index 539cbbbb2fb..0206c264759 100644 --- a/core/lib/zksync_core/src/metrics.rs +++ b/core/lib/zksync_core/src/metrics.rs @@ -1,9 +1,8 @@ //! Application-wide metrics. -use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; - use std::{fmt, time::Duration}; +use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; use zksync_dal::transactions_dal::L2TxSubmissionResult; use zksync_types::{aggregated_operations::AggregatedActionType, proofs::AggregationRound}; diff --git a/core/lib/zksync_core/src/proof_data_handler/mod.rs b/core/lib/zksync_core/src/proof_data_handler/mod.rs index 898ac4652ba..f1227d8298c 100644 --- a/core/lib/zksync_core/src/proof_data_handler/mod.rs +++ b/core/lib/zksync_core/src/proof_data_handler/mod.rs @@ -1,8 +1,7 @@ -use crate::proof_data_handler::request_processor::RequestProcessor; -use anyhow::Context as _; -use axum::extract::Path; -use axum::{routing::post, Json, Router}; use std::net::SocketAddr; + +use anyhow::Context as _; +use axum::{extract::Path, routing::post, Json, Router}; use tokio::sync::watch; use zksync_config::{ configs::{proof_data_handler::ProtocolVersionLoadingMode, ProofDataHandlerConfig}, @@ -16,6 +15,8 @@ use zksync_types::{ H256, }; +use crate::proof_data_handler::request_processor::RequestProcessor; + mod request_processor; fn fri_l1_verifier_config(contracts_config: &ContractsConfig) -> L1VerifierConfig { diff --git a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs index 866990b31c9..5a3302ee926 100644 --- a/core/lib/zksync_core/src/proof_data_handler/request_processor.rs +++ b/core/lib/zksync_core/src/proof_data_handler/request_processor.rs @@ -1,26 +1,27 @@ -use axum::extract::Path; -use axum::response::Response; -use axum::{http::StatusCode, response::IntoResponse, Json}; -use std::convert::TryFrom; -use std::sync::Arc; +use std::{convert::TryFrom, sync::Arc}; + +use axum::{ + extract::Path, + http::StatusCode, + response::{IntoResponse, Response}, + Json, +}; use zksync_config::configs::{ proof_data_handler::ProtocolVersionLoadingMode, ProofDataHandlerConfig, }; -use zksync_types::commitment::serialize_commitments; -use zksync_types::web3::signing::keccak256; -use zksync_utils::u256_to_h256; - use zksync_dal::{ConnectionPool, SqlxError}; use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_types::protocol_version::FriProtocolVersionId; use zksync_types::{ - protocol_version::L1VerifierConfig, + commitment::serialize_commitments, + protocol_version::{FriProtocolVersionId, L1VerifierConfig}, prover_server_api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, SubmitProofRequest, SubmitProofResponse, }, + web3::signing::keccak256, L1BatchNumber, H256, }; +use zksync_utils::u256_to_h256; #[derive(Clone)] pub(crate) struct RequestProcessor { diff --git a/core/lib/zksync_core/src/reorg_detector/mod.rs b/core/lib/zksync_core/src/reorg_detector/mod.rs index 16137d40af6..cd399716c20 100644 --- a/core/lib/zksync_core/src/reorg_detector/mod.rs +++ b/core/lib/zksync_core/src/reorg_detector/mod.rs @@ -4,8 +4,10 @@ use tokio::sync::watch; use zksync_dal::ConnectionPool; use zksync_types::{L1BatchNumber, MiniblockNumber}; use zksync_web3_decl::{ - jsonrpsee::core::Error as RpcError, - jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + jsonrpsee::{ + core::Error as RpcError, + http_client::{HttpClient, HttpClientBuilder}, + }, namespaces::{EthNamespaceClient, ZksNamespaceClient}, RpcResult, }; diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs index 389677b0439..2267792297f 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/mod.rs @@ -1,13 +1,6 @@ -use async_trait::async_trait; -use once_cell::sync::OnceCell; -use tokio::{ - sync::{mpsc, oneshot}, - task::JoinHandle, -}; - -use multivm::MultiVMTracer; use std::{fmt, sync::Arc}; +use async_trait::async_trait; use multivm::{ interface::{ ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, @@ -15,16 +8,18 @@ use multivm::{ }, tracers::CallTracer, vm_latest::HistoryEnabled, - VmInstance, + MultiVMTracer, VmInstance, +}; +use once_cell::sync::OnceCell; +use tokio::{ + sync::{mpsc, oneshot}, + task::JoinHandle, }; use zksync_dal::ConnectionPool; use zksync_state::{RocksdbStorage, StorageView, WriteStorage}; use zksync_types::{vm_trace::Call, witness_block_state::WitnessBlockState, Transaction, U256}; use zksync_utils::bytecode::CompressedBytecodeInfo; -#[cfg(test)] -mod tests; - use crate::{ gas_tracker::{gas_count_from_metrics, gas_count_from_tx_and_metrics}, metrics::{InteractionType, TxStage, APP_METRICS}, @@ -34,6 +29,9 @@ use crate::{ }, }; +#[cfg(test)] +mod tests; + /// Representation of a transaction executed in the virtual machine. #[derive(Debug, Clone)] pub(crate) enum TxExecutionResult { diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 05a8220bb83..362afe20437 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -1,15 +1,13 @@ use assert_matches::assert_matches; - use zksync_dal::ConnectionPool; +use zksync_test_account::Account; use zksync_types::PriorityOpId; -mod tester; - use self::tester::Tester; use super::TxExecutionResult; use crate::state_keeper::batch_executor::tests::tester::{AccountLoadNextExecutable, TestConfig}; -use zksync_test_account::Account; +mod tester; /// Ensures that the transaction was executed successfully. fn assert_executed(execution_result: &TxExecutionResult) { diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index cd72f3eeb07..6417c65a5f8 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -1,11 +1,11 @@ //! Testing harness for the batch executor. //! Contains helper functionality to initialize test context and perform tests without too much boilerplate. +use multivm::{ + interface::{L1BatchEnv, SystemEnv}, + vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, +}; use tempfile::TempDir; - -use multivm::interface::{L1BatchEnv, SystemEnv}; -use multivm::vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES; - use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; use zksync_dal::ConnectionPool; @@ -19,10 +19,12 @@ use zksync_types::{ }; use zksync_utils::u256_to_h256; -use crate::genesis::create_genesis_l1_batch; -use crate::state_keeper::{ - batch_executor::BatchExecutorHandle, - tests::{default_l1_batch_env, default_system_env, BASE_SYSTEM_CONTRACTS}, +use crate::{ + genesis::create_genesis_l1_batch, + state_keeper::{ + batch_executor::BatchExecutorHandle, + tests::{default_l1_batch_env, default_system_env, BASE_SYSTEM_CONTRACTS}, + }, }; const DEFAULT_GAS_PER_PUBDATA: u32 = 100; diff --git a/core/lib/zksync_core/src/state_keeper/extractors.rs b/core/lib/zksync_core/src/state_keeper/extractors.rs index e542b5b0959..e31020734f5 100644 --- a/core/lib/zksync_core/src/state_keeper/extractors.rs +++ b/core/lib/zksync_core/src/state_keeper/extractors.rs @@ -1,13 +1,12 @@ //! Pure functions that convert data as required by the state keeper. -use chrono::{DateTime, TimeZone, Utc}; - use std::{ convert::TryFrom, fmt, time::{Duration, Instant}, }; +use chrono::{DateTime, TimeZone, Utc}; use zksync_dal::StorageProcessor; use zksync_types::{L1BatchNumber, U256}; use zksync_utils::h256_to_u256; diff --git a/core/lib/zksync_core/src/state_keeper/io/common.rs b/core/lib/zksync_core/src/state_keeper/io/common.rs index c99508322ef..857c7618d11 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common.rs @@ -1,7 +1,9 @@ use std::time::Duration; -use multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; -use multivm::vm_latest::constants::BLOCK_GAS_LIMIT; +use multivm::{ + interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BLOCK_GAS_LIMIT, +}; use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; use zksync_types::{ diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 1d3ad506df6..f5a4df1c333 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -1,5 +1,3 @@ -use async_trait::async_trait; - use std::{ cmp, collections::HashMap, @@ -7,9 +5,11 @@ use std::{ time::{Duration, Instant}, }; -use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; -use multivm::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata; - +use async_trait::async_trait; +use multivm::{ + interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}, + vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata, +}; use zksync_config::configs::chain::StateKeeperConfig; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; @@ -526,7 +526,6 @@ impl MempoolIO { #[cfg(test)] mod tests { use tokio::time::timeout_at; - use zksync_utils::time::seconds_since_epoch; use super::*; diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index 313c363418d..d1366858116 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -1,13 +1,11 @@ -use async_trait::async_trait; -use tokio::sync::{mpsc, oneshot}; - use std::{ fmt, time::{Duration, Instant}, }; +use async_trait::async_trait; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; - +use tokio::sync::{mpsc, oneshot}; use zksync_dal::ConnectionPool; use zksync_types::{ block::MiniblockExecutionData, protocol_version::ProtocolUpgradeTx, @@ -15,10 +13,6 @@ use zksync_types::{ Transaction, }; -pub(crate) mod common; -pub(crate) mod mempool; -pub(crate) mod seal_logic; - pub(crate) use self::mempool::MempoolIO; use super::{ metrics::{MiniblockQueueStage, MINIBLOCK_METRICS}, @@ -26,6 +20,9 @@ use super::{ updates::{MiniblockSealCommand, UpdatesManager}, }; +pub(crate) mod common; +pub(crate) mod mempool; +pub(crate) mod seal_logic; #[cfg(test)] mod tests; diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 4501be62f78..e152709cff5 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -1,26 +1,21 @@ //! This module is a source-of-truth on what is expected to be done when sealing a block. //! It contains the logic of the block sealing, which is used by both the mempool-based and external node IO. -use itertools::Itertools; use std::{ collections::HashMap, time::{Duration, Instant}, }; +use itertools::Itertools; use multivm::interface::{FinishedL1Batch, L1BatchEnv}; -use zksync_dal::blocks_dal::ConsensusBlockFields; -use zksync_dal::StorageProcessor; +use zksync_dal::{blocks_dal::ConsensusBlockFields, StorageProcessor}; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_types::{ - block::unpack_block_info, - l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_ADDRESS, -}; -use zksync_types::{ - block::{L1BatchHeader, MiniblockHeader}, + block::{unpack_block_info, L1BatchHeader, MiniblockHeader}, event::{extract_added_tokens, extract_long_l2_to_l1_messages}, l1::L1Tx, l2::L2Tx, + l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, protocol_version::ProtocolUpgradeTx, storage_writes_deduplicator::{ModifiedSlot, StorageWritesDeduplicator}, tx::{ @@ -30,7 +25,7 @@ use zksync_types::{ zkevm_test_harness::witness::sort_storage_access::sort_storage_access_queries, AccountTreeId, Address, ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, LogQuery, MiniblockNumber, StorageKey, StorageLog, StorageLogQuery, StorageValue, Transaction, VmEvent, - H256, + CURRENT_VIRTUAL_BLOCK_INFO_POSITION, H256, SYSTEM_CONTEXT_ADDRESS, }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::{h256_to_u256, time::millis_since_epoch, u256_to_h256}; diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 2b924554f27..ef8c5424854 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -1,7 +1,6 @@ -use futures::FutureExt; - use std::time::Duration; +use futures::FutureExt; use multivm::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::ConnectionPool; @@ -12,22 +11,19 @@ use zksync_types::{ }; use zksync_utils::time::seconds_since_epoch; -use crate::state_keeper::tests::{create_l1_batch_metadata, default_l1_batch_env}; - +use self::tester::Tester; use crate::state_keeper::{ io::{MiniblockParams, MiniblockSealer, StateKeeperIO}, mempool_actor::l2_tx_filter, tests::{ - create_execution_result, create_transaction, create_updates_manager, - default_vm_block_result, Query, + create_execution_result, create_l1_batch_metadata, create_transaction, + create_updates_manager, default_l1_batch_env, default_vm_block_result, Query, }, updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }; mod tester; -use self::tester::Tester; - /// Ensure that MempoolIO.filter is correctly initialized right after mempool initialization. #[tokio::test] async fn test_filter_initialization() { diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 875bf89e048..5528ae9f206 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -1,14 +1,13 @@ //! Testing harness for the IO. -use multivm::vm_latest::constants::BLOCK_GAS_LIMIT; use std::{sync::Arc, time::Duration}; -use zksync_object_store::ObjectStoreFactory; -use zksync_config::configs::chain::StateKeeperConfig; -use zksync_config::GasAdjusterConfig; +use multivm::vm_latest::constants::BLOCK_GAS_LIMIT; +use zksync_config::{configs::chain::StateKeeperConfig, GasAdjusterConfig}; use zksync_contracts::BaseSystemContracts; use zksync_dal::ConnectionPool; use zksync_eth_client::clients::mock::MockEthereum; +use zksync_object_store::ObjectStoreFactory; use zksync_types::{ block::{L1BatchHeader, MiniblockHeader}, protocol_version::L1VerifierConfig, diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 761e186e7ae..3cc153120c2 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -1,10 +1,11 @@ -use anyhow::Context as _; -use tokio::sync::watch; - -use std::convert::Infallible; -use std::time::{Duration, Instant}; +use std::{ + convert::Infallible, + time::{Duration, Instant}, +}; +use anyhow::Context as _; use multivm::interface::{Halt, L1BatchEnv, SystemEnv}; +use tokio::sync::watch; use zksync_types::{ block::MiniblockExecutionData, l2::TransactionType, protocol_version::ProtocolUpgradeTx, storage_writes_deduplicator::StorageWritesDeduplicator, Transaction, diff --git a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs index 2c369d35a0f..4797ed0006c 100644 --- a/core/lib/zksync_core/src/state_keeper/mempool_actor.rs +++ b/core/lib/zksync_core/src/state_keeper/mempool_actor.rs @@ -1,8 +1,7 @@ -use tokio::sync::watch; - use std::{sync::Arc, time::Duration}; use multivm::vm_latest::utils::fee::derive_base_fee_and_gas_per_pubdata; +use tokio::sync::watch; use zksync_config::configs::chain::MempoolConfig; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; diff --git a/core/lib/zksync_core/src/state_keeper/metrics.rs b/core/lib/zksync_core/src/state_keeper/metrics.rs index 72b89c4a2b8..8daccb5a3aa 100644 --- a/core/lib/zksync_core/src/state_keeper/metrics.rs +++ b/core/lib/zksync_core/src/state_keeper/metrics.rs @@ -1,15 +1,14 @@ //! General-purpose state keeper metrics. -use vise::{ - Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, - Metrics, -}; - use std::{ sync::{Mutex, Weak}, time::Duration, }; +use vise::{ + Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, + Metrics, +}; use zksync_mempool::MempoolStore; use super::seal_criteria::SealResolution; diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index 5ec395267df..ee71a93bcf4 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -1,15 +1,25 @@ -use tokio::sync::watch; -use zksync_object_store::ObjectStore; - use std::sync::Arc; +use tokio::sync::watch; use zksync_config::{ configs::chain::{MempoolConfig, NetworkConfig, StateKeeperConfig}, ContractsConfig, DBConfig, }; use zksync_dal::ConnectionPool; +use zksync_object_store::ObjectStore; use zksync_system_constants::MAX_TXS_IN_BLOCK; +use self::io::MempoolIO; +pub use self::{ + batch_executor::{L1BatchExecutorBuilder, MainBatchExecutorBuilder}, + io::{MiniblockSealer, MiniblockSealerHandle}, + keeper::ZkSyncStateKeeper, +}; +pub(crate) use self::{ + mempool_actor::MempoolFetcher, seal_criteria::ConditionalSealer, types::MempoolGuard, +}; +use crate::l1_gas_price::L1GasPriceProvider; + mod batch_executor; pub(crate) mod extractors; pub(crate) mod io; @@ -22,18 +32,6 @@ pub(crate) mod tests; pub(crate) mod types; pub(crate) mod updates; -pub use self::{ - batch_executor::{L1BatchExecutorBuilder, MainBatchExecutorBuilder}, - io::{MiniblockSealer, MiniblockSealerHandle}, - keeper::ZkSyncStateKeeper, -}; -pub(crate) use self::{ - mempool_actor::MempoolFetcher, seal_criteria::ConditionalSealer, types::MempoolGuard, -}; - -use self::io::MempoolIO; -use crate::l1_gas_price::L1GasPriceProvider; - #[allow(clippy::too_many_arguments)] pub(crate) async fn create_state_keeper( contracts_config: &ContractsConfig, diff --git a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs index 1ec0c66e4d7..9f99554e58a 100644 --- a/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs +++ b/core/lib/zksync_core/src/state_keeper/seal_criteria/criteria/geometry_seal_criteria.rs @@ -1,5 +1,6 @@ -use multivm::vm_latest::constants::{ERGS_PER_CIRCUIT, MAX_CYCLES_FOR_TX}; use std::fmt; + +use multivm::vm_latest::constants::{ERGS_PER_CIRCUIT, MAX_CYCLES_FOR_TX}; use zksync_config::configs::chain::StateKeeperConfig; use zksync_types::{ circuit::{GEOMETRY_CONFIG, SCHEDULER_UPPER_BOUND}, diff --git a/core/lib/zksync_core/src/state_keeper/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/tests/mod.rs index c5841fd8b1b..06411ecaa55 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/mod.rs @@ -1,5 +1,3 @@ -use once_cell::sync::Lazy; - use std::{ sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, @@ -8,11 +6,14 @@ use std::{ time::Instant, }; -use multivm::interface::{ - CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, Refunds, - SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, +use multivm::{ + interface::{ + CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, Refunds, + SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, + }, + vm_latest::{constants::BLOCK_GAS_LIMIT, VmExecutionLogs}, }; -use multivm::vm_latest::{constants::BLOCK_GAS_LIMIT, VmExecutionLogs}; +use once_cell::sync::Lazy; use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; @@ -28,24 +29,26 @@ use zksync_types::{ StorageLogQuery, StorageLogQueryType, Timestamp, Transaction, H256, U256, }; -mod tester; - pub(crate) use self::tester::TestBatchExecutorBuilder; use self::tester::{ bootloader_tip_out_of_gas, pending_batch_data, random_tx, rejected_exec, successful_exec, successful_exec_with_metrics, TestScenario, }; -use crate::gas_tracker::l1_batch_base_cost; -use crate::state_keeper::{ - keeper::POLL_WAIT_DURATION, - seal_criteria::{ - criteria::{GasCriterion, SlotsCriterion}, - ConditionalSealer, +use crate::{ + gas_tracker::l1_batch_base_cost, + state_keeper::{ + keeper::POLL_WAIT_DURATION, + seal_criteria::{ + criteria::{GasCriterion, SlotsCriterion}, + ConditionalSealer, + }, + types::ExecutionMetricsForCriteria, + updates::UpdatesManager, }, - types::ExecutionMetricsForCriteria, - updates::UpdatesManager, }; +mod tester; + pub(super) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); diff --git a/core/lib/zksync_core/src/state_keeper/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/tests/tester.rs index 8d0d1fb047e..a2dc7f05c5e 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/tester.rs @@ -1,6 +1,3 @@ -use async_trait::async_trait; -use tokio::sync::{mpsc, watch}; - use std::{ collections::{HashMap, HashSet, VecDeque}, convert::TryInto, @@ -8,11 +5,15 @@ use std::{ time::{Duration, Instant}, }; -use multivm::interface::{ - ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, - VmExecutionResultAndLogs, +use async_trait::async_trait; +use multivm::{ + interface::{ + ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, + VmExecutionResultAndLogs, + }, + vm_latest::constants::BLOCK_GAS_LIMIT, }; -use multivm::vm_latest::constants::BLOCK_GAS_LIMIT; +use tokio::sync::{mpsc, watch}; use zksync_types::{ block::MiniblockExecutionData, protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, Address, L1BatchNumber, L2ChainId, MiniblockNumber, diff --git a/core/lib/zksync_core/src/state_keeper/updates/l1_batch_updates.rs b/core/lib/zksync_core/src/state_keeper/updates/l1_batch_updates.rs index fdaa0b036f9..584a0c835e7 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/l1_batch_updates.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/l1_batch_updates.rs @@ -1,9 +1,12 @@ +use zksync_types::{ + block::BlockGasCount, + priority_op_onchain_data::PriorityOpOnchainData, + tx::{tx_execution_info::ExecutionMetrics, TransactionExecutionResult}, + ExecuteTransactionCommon, +}; + use super::miniblock_updates::MiniblockUpdates; use crate::gas_tracker::new_block_gas_count; -use zksync_types::block::BlockGasCount; -use zksync_types::priority_op_onchain_data::PriorityOpOnchainData; -use zksync_types::tx::tx_execution_info::ExecutionMetrics; -use zksync_types::{tx::TransactionExecutionResult, ExecuteTransactionCommon}; #[derive(Debug, Clone, PartialEq)] pub struct L1BatchUpdates { @@ -44,6 +47,7 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { + use multivm::vm_latest::TransactionVmExt; use zksync_types::{ProtocolVersionId, H256}; use super::*; @@ -51,7 +55,6 @@ mod tests { gas_tracker::new_block_gas_count, state_keeper::tests::{create_execution_result, create_transaction}, }; - use multivm::vm_latest::TransactionVmExt; #[test] fn apply_miniblock_with_empty_tx() { diff --git a/core/lib/zksync_core/src/state_keeper/updates/miniblock_updates.rs b/core/lib/zksync_core/src/state_keeper/updates/miniblock_updates.rs index d0a4f035f51..0c8591a2898 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/miniblock_updates.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/miniblock_updates.rs @@ -1,18 +1,21 @@ -use multivm::interface::{ExecutionResult, L2BlockEnv, VmExecutionResultAndLogs}; -use multivm::vm_latest::TransactionVmExt; use std::collections::HashMap; -use zksync_types::l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}; +use multivm::{ + interface::{ExecutionResult, L2BlockEnv, VmExecutionResultAndLogs}, + vm_latest::TransactionVmExt, +}; use zksync_types::{ block::{legacy_miniblock_hash, miniblock_hash, BlockGasCount}, event::extract_bytecodes_marked_as_known, - tx::tx_execution_info::TxExecutionStatus, - tx::{ExecutionMetrics, TransactionExecutionResult}, + l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, + tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, vm_trace::Call, MiniblockNumber, ProtocolVersionId, StorageLogQuery, Transaction, VmEvent, H256, }; -use zksync_utils::bytecode::{hash_bytecode, CompressedBytecodeInfo}; -use zksync_utils::concat_and_hash; +use zksync_utils::{ + bytecode::{hash_bytecode, CompressedBytecodeInfo}, + concat_and_hash, +}; #[derive(Debug, Clone, PartialEq)] pub struct MiniblockUpdates { @@ -168,9 +171,10 @@ impl MiniblockUpdates { #[cfg(test)] mod tests { + use multivm::vm_latest::TransactionVmExt; + use super::*; use crate::state_keeper::tests::{create_execution_result, create_transaction}; - use multivm::vm_latest::TransactionVmExt; #[test] fn apply_empty_l2_tx() { diff --git a/core/lib/zksync_core/src/state_keeper/updates/mod.rs b/core/lib/zksync_core/src/state_keeper/updates/mod.rs index 3f09f7be30b..c34164557b5 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/mod.rs @@ -1,22 +1,19 @@ use multivm::interface::{L1BatchEnv, VmExecutionResultAndLogs}; - use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::blocks_dal::ConsensusBlockFields; -use zksync_types::vm_trace::Call; use zksync_types::{ block::BlockGasCount, storage_writes_deduplicator::StorageWritesDeduplicator, - tx::tx_execution_info::ExecutionMetrics, Address, L1BatchNumber, MiniblockNumber, - ProtocolVersionId, Transaction, + tx::tx_execution_info::ExecutionMetrics, vm_trace::Call, Address, L1BatchNumber, + MiniblockNumber, ProtocolVersionId, Transaction, }; use zksync_utils::bytecode::CompressedBytecodeInfo; -pub mod l1_batch_updates; -pub mod miniblock_updates; - pub(crate) use self::{l1_batch_updates::L1BatchUpdates, miniblock_updates::MiniblockUpdates}; - use super::io::MiniblockParams; +pub mod l1_batch_updates; +pub mod miniblock_updates; + /// Most of the information needed to seal the l1 batch/mini-block is contained within the VM, /// things that are not captured there are accumulated externally. /// `MiniblockUpdates` keeps updates for the pending mini-block. diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs index 8e7ebe7a985..8924fa5c5db 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater.rs @@ -1,8 +1,7 @@ -use chrono::{DateTime, Utc}; -use tokio::sync::watch::Receiver; - use std::time::Duration; +use chrono::{DateTime, Utc}; +use tokio::sync::watch::Receiver; use zksync_dal::ConnectionPool; use zksync_types::{ aggregated_operations::AggregatedActionType, api::BlockDetails, L1BatchNumber, MiniblockNumber, diff --git a/core/lib/zksync_core/src/sync_layer/client.rs b/core/lib/zksync_core/src/sync_layer/client.rs index 5d4f61a4f2a..a13fba2d65c 100644 --- a/core/lib/zksync_core/src/sync_layer/client.rs +++ b/core/lib/zksync_core/src/sync_layer/client.rs @@ -1,10 +1,9 @@ //! Client abstractions for syncing between the external node and the main node. -use anyhow::Context as _; -use async_trait::async_trait; - use std::{collections::HashMap, convert::TryInto, fmt}; +use anyhow::Context as _; +use async_trait::async_trait; use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SystemContractCode}; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_types::{ diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 4e870b95674..d751cdc8d01 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -1,8 +1,7 @@ -use async_trait::async_trait; -use futures::future; - use std::{collections::HashMap, convert::TryInto, iter::FromIterator, time::Duration}; +use async_trait::async_trait; +use futures::future; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::ConnectionPool; diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 3adbc8920bf..2989b6b70a3 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -1,8 +1,7 @@ -use anyhow::Context as _; -use tokio::sync::watch; - use std::time::Duration; +use anyhow::Context as _; +use tokio::sync::watch; use zksync_dal::{blocks_dal::ConsensusBlockFields, StorageProcessor}; use zksync_types::{ api::en::SyncBlock, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, diff --git a/core/lib/zksync_core/src/sync_layer/genesis.rs b/core/lib/zksync_core/src/sync_layer/genesis.rs index 4f7501fb0c3..77678a3b412 100644 --- a/core/lib/zksync_core/src/sync_layer/genesis.rs +++ b/core/lib/zksync_core/src/sync_layer/genesis.rs @@ -1,5 +1,4 @@ use anyhow::Context as _; - use zksync_dal::StorageProcessor; use zksync_types::{ block::DeployedContract, protocol_version::L1VerifierConfig, diff --git a/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs index 41ca50e1cf2..5f2308930a3 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs @@ -1,9 +1,8 @@ //! Buffered [`BlockStore`] implementation. -use async_trait::async_trait; - use std::{collections::BTreeMap, ops, time::Instant}; +use async_trait::async_trait; #[cfg(test)] use zksync_concurrency::ctx::channel; use zksync_concurrency::{ @@ -13,14 +12,14 @@ use zksync_concurrency::{ use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; use zksync_consensus_storage::{BlockStore, StorageError, StorageResult, WriteBlockStore}; -#[cfg(test)] -mod tests; - use super::{ metrics::{BlockResponseKind, METRICS}, utils::MissingBlockNumbers, }; +#[cfg(test)] +mod tests; + /// [`BlockStore`] variation that upholds additional invariants as to how blocks are processed. /// /// The invariants are as follows: diff --git a/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs index 62c81bca7ca..c5fd860ab87 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs @@ -1,12 +1,11 @@ //! Tests for buffered storage. +use std::{iter, ops}; + use assert_matches::assert_matches; use async_trait::async_trait; use rand::{rngs::StdRng, seq::SliceRandom, Rng}; use test_casing::test_casing; - -use std::{iter, ops}; - use zksync_concurrency::{ ctx::{self, channel}, scope, diff --git a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs index 00c6c651452..616a4283c73 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs @@ -1,6 +1,6 @@ //! Conversion logic between server and consensus types. -use anyhow::Context as _; +use anyhow::Context as _; use zksync_consensus_roles::validator::{BlockHeader, BlockNumber, FinalBlock}; use zksync_dal::blocks_dal::ConsensusBlockFields; use zksync_types::{api::en, MiniblockNumber, ProtocolVersionId}; diff --git a/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs b/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs index f67c150b99c..73caf510269 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs @@ -1,9 +1,9 @@ //! Metrics for gossip-powered syncing. -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; - use std::time::Duration; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "kind", rename_all = "snake_case")] pub(super) enum BlockResponseKind { diff --git a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs index 2ec9ca5b60e..9d769ab65f3 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs @@ -1,15 +1,17 @@ //! Consensus adapter for EN synchronization logic. -use anyhow::Context as _; -use tokio::sync::watch; - use std::sync::Arc; +use anyhow::Context as _; +use tokio::sync::watch; use zksync_concurrency::{ctx, scope}; use zksync_consensus_executor::{Executor, ExecutorConfig}; use zksync_consensus_roles::node; use zksync_dal::ConnectionPool; +use self::{buffered::Buffered, storage::PostgresBlockStorage}; +use super::{fetcher::FetcherCursor, sync_action::ActionQueueSender}; + mod buffered; mod conversions; mod metrics; @@ -18,9 +20,6 @@ mod storage; mod tests; mod utils; -use self::{buffered::Buffered, storage::PostgresBlockStorage}; -use super::{fetcher::FetcherCursor, sync_action::ActionQueueSender}; - /// Starts fetching L2 blocks using peer-to-peer gossip network. pub async fn run_gossip_fetcher( pool: ConnectionPool, diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs index db36f71d35c..1e35d17daaf 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs @@ -1,10 +1,9 @@ //! Storage implementation based on DAL. -use anyhow::Context as _; -use async_trait::async_trait; - use std::ops; +use anyhow::Context as _; +use async_trait::async_trait; use zksync_concurrency::{ ctx, sync::{self, watch, Mutex}, @@ -12,13 +11,9 @@ use zksync_concurrency::{ }; use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; use zksync_consensus_storage::{BlockStore, StorageError, StorageResult}; -use zksync_dal::blocks_dal::ConsensusBlockFields; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{blocks_dal::ConsensusBlockFields, ConnectionPool, StorageProcessor}; use zksync_types::{api::en::SyncBlock, Address, MiniblockNumber}; -#[cfg(test)] -mod tests; - use super::{buffered::ContiguousBlockStore, conversions::sync_block_to_consensus_block}; use crate::{ consensus, @@ -28,6 +23,9 @@ use crate::{ }, }; +#[cfg(test)] +mod tests; + #[derive(Debug)] struct CursorWithCachedBlock { inner: FetcherCursor, diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs index cfd14f78411..c7e53f6456e 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs @@ -1,7 +1,6 @@ //! Tests for Postgres storage implementation. use rand::{thread_rng, Rng}; - use zksync_concurrency::{scope, testonly::abort_on_panic}; use zksync_consensus_roles::validator; use zksync_types::L2ChainId; diff --git a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs index ddb97484968..338fc9016f4 100644 --- a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs @@ -1,16 +1,14 @@ //! Tests for consensus adapters for EN synchronization logic. -use assert_matches::assert_matches; -use test_casing::{test_casing, Product}; - use std::ops; +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; use zksync_concurrency::{ctx, scope, testonly::abort_on_panic, time}; use zksync_consensus_executor::testonly::FullValidatorConfig; use zksync_consensus_roles::validator::{self, FinalBlock}; use zksync_consensus_storage::{InMemoryStorage, WriteBlockStore}; -use zksync_dal::blocks_dal::ConsensusBlockFields; -use zksync_dal::{ConnectionPool, StorageProcessor}; +use zksync_dal::{blocks_dal::ConsensusBlockFields, ConnectionPool, StorageProcessor}; use zksync_types::{ api::en::SyncBlock, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, }; diff --git a/core/lib/zksync_core/src/sync_layer/metrics.rs b/core/lib/zksync_core/src/sync_layer/metrics.rs index c3082c51052..3a431294b25 100644 --- a/core/lib/zksync_core/src/sync_layer/metrics.rs +++ b/core/lib/zksync_core/src/sync_layer/metrics.rs @@ -1,9 +1,9 @@ //! Metrics for the synchronization layer of external node. -use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; - use std::time::Duration; +use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics}; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum FetchStage { diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 10582c7d9f9..3c76e05d93f 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -1,14 +1,13 @@ //! High-level sync layer tests. -use async_trait::async_trait; -use tokio::{sync::watch, task::JoinHandle}; - use std::{ collections::{HashMap, VecDeque}, iter, time::{Duration, Instant}, }; +use async_trait::async_trait; +use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::chain::NetworkConfig; use zksync_contracts::{BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::{ConnectionPool, StorageProcessor}; diff --git a/core/lib/zksync_core/src/witness_generator/basic_circuits.rs b/core/lib/zksync_core/src/witness_generator/basic_circuits.rs index c700d59120a..e4d8b01357d 100644 --- a/core/lib/zksync_core/src/witness_generator/basic_circuits.rs +++ b/core/lib/zksync_core/src/witness_generator/basic_circuits.rs @@ -1,7 +1,3 @@ -use async_trait::async_trait; -use rand::Rng; -use serde::{Deserialize, Serialize}; - use std::{ collections::{hash_map::DefaultHasher, HashMap, HashSet}, hash::{Hash, Hasher}, @@ -9,9 +5,12 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use multivm::vm_latest::{ constants::MAX_CYCLES_FOR_TX, HistoryDisabled, SimpleMemory, StorageOracle as VmStorageOracle, }; +use rand::Rng; +use serde::{Deserialize, Serialize}; use zksync_config::configs::{ witness_generator::BasicWitnessGeneratorDataSource, WitnessGeneratorConfig, }; @@ -23,12 +22,14 @@ use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::{ circuit::GEOMETRY_CONFIG, proofs::{AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, - zkevm_test_harness::toolset::GeometryConfig, zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, - witness::full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, - witness::oracle::VmWitnessOracle, + toolset::GeometryConfig, + witness::{ + full_block_artifact::{BlockBasicCircuits, BlockBasicCircuitsPublicInputs}, + oracle::VmWitnessOracle, + }, SchedulerCircuitInstanceWitness, }, Address, L1BatchNumber, ProtocolVersionId, H256, U256, USED_BOOTLOADER_MEMORY_BYTES, diff --git a/core/lib/zksync_core/src/witness_generator/leaf_aggregation.rs b/core/lib/zksync_core/src/witness_generator/leaf_aggregation.rs index 4c9201b65f6..94082c42158 100644 --- a/core/lib/zksync_core/src/witness_generator/leaf_aggregation.rs +++ b/core/lib/zksync_core/src/witness_generator/leaf_aggregation.rs @@ -1,7 +1,6 @@ -use async_trait::async_trait; - use std::{collections::HashMap, time::Instant}; +use async_trait::async_trait; use zksync_config::configs::WitnessGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; @@ -10,10 +9,12 @@ use zksync_types::{ circuit::LEAF_SPLITTING_FACTOR, proofs::{AggregationRound, PrepareLeafAggregationCircuitsJob, WitnessGeneratorJobMetadata}, zkevm_test_harness::{ - abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, - bellman::plonk::better_better_cs::setup::VerificationKey, - encodings::recursion_request::RecursionRequest, encodings::QueueSimulator, witness, - witness::oracle::VmWitnessOracle, LeafAggregationOutputDataWitness, + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{bn256::Bn256, plonk::better_better_cs::setup::VerificationKey}, + encodings::{recursion_request::RecursionRequest, QueueSimulator}, + witness, + witness::oracle::VmWitnessOracle, + LeafAggregationOutputDataWitness, }, L1BatchNumber, ProtocolVersionId, }; diff --git a/core/lib/zksync_core/src/witness_generator/mod.rs b/core/lib/zksync_core/src/witness_generator/mod.rs index 18b23866056..2fa941f0bda 100644 --- a/core/lib/zksync_core/src/witness_generator/mod.rs +++ b/core/lib/zksync_core/src/witness_generator/mod.rs @@ -38,10 +38,9 @@ //! Note that the very first input table (`basic_circuit_witness_jobs` (TODO SMA-1362: will be renamed from `witness_inputs`)) //! is populated by the tree (as the input artifact for the `WitnessGeneratorJobType::BasicCircuits` is the merkle proofs) -use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; - use std::{fmt, time::Duration}; +use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zksync_types::proofs::AggregationRound; pub mod basic_circuits; diff --git a/core/lib/zksync_core/src/witness_generator/node_aggregation.rs b/core/lib/zksync_core/src/witness_generator/node_aggregation.rs index 6d884563c9d..8ca86be00a5 100644 --- a/core/lib/zksync_core/src/witness_generator/node_aggregation.rs +++ b/core/lib/zksync_core/src/witness_generator/node_aggregation.rs @@ -1,7 +1,6 @@ -use async_trait::async_trait; - use std::{collections::HashMap, env, time::Instant}; +use async_trait::async_trait; use zksync_config::configs::WitnessGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; @@ -13,8 +12,7 @@ use zksync_types::{ proofs::{AggregationRound, PrepareNodeAggregationCircuitJob, WitnessGeneratorJobMetadata}, zkevm_test_harness::{ abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, - bellman::bn256::Bn256, - bellman::plonk::better_better_cs::setup::VerificationKey, + bellman::{bn256::Bn256, plonk::better_better_cs::setup::VerificationKey}, ff::to_hex, witness::{ self, diff --git a/core/lib/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs b/core/lib/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs index 96705de7e91..73f714d7314 100644 --- a/core/lib/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs +++ b/core/lib/zksync_core/src/witness_generator/precalculated_merkle_paths_provider.rs @@ -1,9 +1,13 @@ use serde::{Deserialize, Serialize}; -use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; -use zksync_types::zkevm_test_harness::blake2::Blake2s256; -use zksync_types::zkevm_test_harness::witness::tree::BinaryHasher; -use zksync_types::zkevm_test_harness::witness::tree::{ - BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, +use zksync_types::{ + proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, + zkevm_test_harness::{ + blake2::Blake2s256, + witness::tree::{ + BinaryHasher, BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, + ZkSyncStorageLeaf, + }, + }, }; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] diff --git a/core/lib/zksync_core/src/witness_generator/scheduler.rs b/core/lib/zksync_core/src/witness_generator/scheduler.rs index ae8c2daff73..a0f1b6b6d7a 100644 --- a/core/lib/zksync_core/src/witness_generator/scheduler.rs +++ b/core/lib/zksync_core/src/witness_generator/scheduler.rs @@ -1,7 +1,6 @@ -use async_trait::async_trait; - use std::{collections::HashMap, slice, time::Instant}; +use async_trait::async_trait; use zksync_config::configs::WitnessGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; diff --git a/core/lib/zksync_core/src/witness_generator/storage_oracle.rs b/core/lib/zksync_core/src/witness_generator/storage_oracle.rs index 112b4eb5988..f0b3203686f 100644 --- a/core/lib/zksync_core/src/witness_generator/storage_oracle.rs +++ b/core/lib/zksync_core/src/witness_generator/storage_oracle.rs @@ -1,7 +1,7 @@ -use zksync_types::zkevm_test_harness::zk_evm::abstractions::{ - RefundType, RefundedAmounts, Storage, +use zksync_types::{ + zkevm_test_harness::zk_evm::abstractions::{RefundType, RefundedAmounts, Storage}, + LogQuery, Timestamp, }; -use zksync_types::{LogQuery, Timestamp}; #[derive(Debug)] pub(super) struct StorageOracle { diff --git a/core/lib/zksync_core/src/witness_generator/tests.rs b/core/lib/zksync_core/src/witness_generator/tests.rs index 38a77331fa3..fb7b285b119 100644 --- a/core/lib/zksync_core/src/witness_generator/tests.rs +++ b/core/lib/zksync_core/src/witness_generator/tests.rs @@ -1,7 +1,11 @@ -use crate::witness_generator::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; use std::convert::TryInto; -use zksync_types::proofs::StorageLogMetadata; -use zksync_types::zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZkSyncStorageLeaf}; + +use zksync_types::{ + proofs::StorageLogMetadata, + zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZkSyncStorageLeaf}, +}; + +use crate::witness_generator::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; #[test] fn test_filter_renumerate_all_first_writes() { diff --git a/core/lib/zksync_core/src/witness_generator/utils.rs b/core/lib/zksync_core/src/witness_generator/utils.rs index 2135eddb3cc..35f5fd431ce 100644 --- a/core/lib/zksync_core/src/witness_generator/utils.rs +++ b/core/lib/zksync_core/src/witness_generator/utils.rs @@ -1,8 +1,12 @@ use zksync_object_store::{CircuitKey, ObjectStore}; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_types::{ + proofs::AggregationRound, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, + witness::oracle::VmWitnessOracle, + }, + L1BatchNumber, +}; pub async fn save_prover_input_artifacts( block_number: L1BatchNumber, diff --git a/core/tests/cross_external_nodes_checker/src/checker.rs b/core/tests/cross_external_nodes_checker/src/checker.rs index 61421816c60..be1dbd6faf6 100644 --- a/core/tests/cross_external_nodes_checker/src/checker.rs +++ b/core/tests/cross_external_nodes_checker/src/checker.rs @@ -7,7 +7,6 @@ use std::{ use serde_json::Value; use tokio::{sync::watch::Receiver, time::sleep}; - use zksync_types::{ api::{BlockDetails, BlockNumber, L1BatchDetails}, web3::types::U64, @@ -15,15 +14,17 @@ use zksync_types::{ }; use zksync_utils::wait_for_tasks::wait_for_tasks; use zksync_web3_decl::{ - jsonrpsee::core::Error, - jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, + jsonrpsee::{ + core::Error, + http_client::{HttpClient, HttpClientBuilder}, + }, namespaces::{EnNamespaceClient, EthNamespaceClient, ZksNamespaceClient}, types::FilterBuilder, RpcResult, }; -use crate::config::{CheckerConfig, RpcMode}; use crate::{ + config::{CheckerConfig, RpcMode}, divergence::{Divergence, DivergenceDetails}, helpers::compare_json, }; diff --git a/core/tests/cross_external_nodes_checker/src/config.rs b/core/tests/cross_external_nodes_checker/src/config.rs index 6273b3405a0..636a4fd9ae5 100644 --- a/core/tests/cross_external_nodes_checker/src/config.rs +++ b/core/tests/cross_external_nodes_checker/src/config.rs @@ -116,9 +116,10 @@ fn default_subscription_duration() -> Option { #[cfg(test)] mod tests { - use super::*; use std::env; + use super::*; + #[test] fn success() { let config = r#" diff --git a/core/tests/cross_external_nodes_checker/src/divergence.rs b/core/tests/cross_external_nodes_checker/src/divergence.rs index 7f18f5fa605..18c910349f7 100644 --- a/core/tests/cross_external_nodes_checker/src/divergence.rs +++ b/core/tests/cross_external_nodes_checker/src/divergence.rs @@ -1,4 +1,5 @@ use std::fmt; + use zksync_types::{web3::types::U64, MiniblockNumber}; #[derive(Debug, Clone)] diff --git a/core/tests/cross_external_nodes_checker/src/helpers.rs b/core/tests/cross_external_nodes_checker/src/helpers.rs index 14843e55868..6247b5e8c8a 100644 --- a/core/tests/cross_external_nodes_checker/src/helpers.rs +++ b/core/tests/cross_external_nodes_checker/src/helpers.rs @@ -1,7 +1,7 @@ +use std::{collections::HashMap, future::Future, time::Duration}; + use futures::channel::oneshot; use serde_json::{Map, Value}; -use std::future::Future; -use std::{collections::HashMap, time::Duration}; use tokio::time::sleep; /// Sets up an interrupt handler and returns a future that resolves once an interrupt signal is received. @@ -132,9 +132,10 @@ impl ExponentialBackoff { #[cfg(test)] mod tests { - use super::*; use serde_json::json; + use super::*; + #[test] fn test_same_json() { let json1 = json!({ diff --git a/core/tests/cross_external_nodes_checker/src/main.rs b/core/tests/cross_external_nodes_checker/src/main.rs index 45192fe20fa..7199c1cbd32 100644 --- a/core/tests/cross_external_nodes_checker/src/main.rs +++ b/core/tests/cross_external_nodes_checker/src/main.rs @@ -1,4 +1,8 @@ -extern crate core; +use tokio::sync::watch; +use zksync_utils::wait_for_tasks::wait_for_tasks; + +use self::{checker::Checker, pubsub_checker::PubSubChecker}; +use crate::{config::CheckerConfig, helpers::setup_sigint_handler}; mod checker; mod config; @@ -6,13 +10,6 @@ mod divergence; mod helpers; mod pubsub_checker; -use crate::config::CheckerConfig; -use crate::helpers::setup_sigint_handler; -use checker::Checker; -use pubsub_checker::PubSubChecker; -use tokio::sync::watch; -use zksync_utils::wait_for_tasks::wait_for_tasks; - #[tokio::main] async fn main() -> anyhow::Result<()> { #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach. diff --git a/core/tests/cross_external_nodes_checker/src/pubsub_checker.rs b/core/tests/cross_external_nodes_checker/src/pubsub_checker.rs index 78860210297..8a3bc765ace 100644 --- a/core/tests/cross_external_nodes_checker/src/pubsub_checker.rs +++ b/core/tests/cross_external_nodes_checker/src/pubsub_checker.rs @@ -1,14 +1,10 @@ -use crate::{ - config::CheckerConfig, - divergence::{Divergence, DivergenceDetails}, - helpers::{compare_json, ExponentialBackoff}, -}; -use anyhow::Context as _; use std::{ collections::HashMap, sync::Arc, time::{Duration, Instant}, }; + +use anyhow::Context as _; use tokio::{ select, spawn, sync::{watch::Receiver, Mutex as TokioMutex}, @@ -28,6 +24,12 @@ use zksync_web3_decl::{ types::{BlockHeader, PubSubResult}, }; +use crate::{ + config::CheckerConfig, + divergence::{Divergence, DivergenceDetails}, + helpers::{compare_json, ExponentialBackoff}, +}; + const MAX_RETRIES: u32 = 6; const GRACE_PERIOD: Duration = Duration::from_secs(60); const SUBSCRIPTION_TIMEOUT: Duration = Duration::from_secs(120); diff --git a/core/tests/loadnext/src/account/api_request_executor.rs b/core/tests/loadnext/src/account/api_request_executor.rs index e1e09004d4e..18d25a1da9c 100644 --- a/core/tests/loadnext/src/account/api_request_executor.rs +++ b/core/tests/loadnext/src/account/api_request_executor.rs @@ -2,7 +2,6 @@ use std::time::Instant; use rand::seq::IteratorRandom; use regex::Regex; - use zksync::{ error::{ClientError, RpcError}, types::FilterBuilder, diff --git a/core/tests/loadnext/src/account/mod.rs b/core/tests/loadnext/src/account/mod.rs index 14aa23b5031..42afd28d87e 100644 --- a/core/tests/loadnext/src/account/mod.rs +++ b/core/tests/loadnext/src/account/mod.rs @@ -1,18 +1,16 @@ -use futures::{channel::mpsc, SinkExt}; use std::{ collections::VecDeque, sync::Arc, time::{Duration, Instant}, }; -use tokio::sync::RwLock; +use futures::{channel::mpsc, SinkExt}; +use tokio::sync::RwLock; use zksync::{error::ClientError, operations::SyncTransactionHandle, HttpClient}; +use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_types::{api::TransactionReceipt, Address, Nonce, H256, U256, U64}; use zksync_web3_decl::jsonrpsee::core::Error as CoreError; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use crate::utils::format_gwei; use crate::{ account::tx_command_executor::SubmitResult, account_pool::{AddressPool, TestWallet}, @@ -20,6 +18,7 @@ use crate::{ config::{LoadtestConfig, RequestLimiters}, constants::{MAX_L1_TRANSACTIONS, POLLING_INTERVAL}, report::{Report, ReportBuilder, ReportLabel}, + utils::format_gwei, }; mod api_request_executor; diff --git a/core/tests/loadnext/src/account/pubsub_executor.rs b/core/tests/loadnext/src/account/pubsub_executor.rs index 2dec5dbd8c6..d3c9d7144f1 100644 --- a/core/tests/loadnext/src/account/pubsub_executor.rs +++ b/core/tests/loadnext/src/account/pubsub_executor.rs @@ -1,9 +1,7 @@ -use futures::{stream, TryStreamExt}; - use std::time::{Duration, Instant}; -use zksync::error::ClientError; -use zksync::types::PubSubFilterBuilder; +use futures::{stream, TryStreamExt}; +use zksync::{error::ClientError, types::PubSubFilterBuilder}; use zksync_web3_decl::{ jsonrpsee::{ core::client::{Subscription, SubscriptionClientT}, diff --git a/core/tests/loadnext/src/account/tx_command_executor.rs b/core/tests/loadnext/src/account/tx_command_executor.rs index 9fb8631fdc1..f1ace035547 100644 --- a/core/tests/loadnext/src/account/tx_command_executor.rs +++ b/core/tests/loadnext/src/account/tx_command_executor.rs @@ -1,12 +1,13 @@ use std::time::Instant; -use zksync::web3::ethabi; -use zksync::EthNamespaceClient; + use zksync::{ error::ClientError, ethereum::PriorityOpHolder, utils::{ get_approval_based_paymaster_input, get_approval_based_paymaster_input_for_estimation, }, + web3::ethabi, + EthNamespaceClient, }; use zksync_eth_client::EthInterface; use zksync_system_constants::MAX_L1_TRANSACTION_GAS_LIMIT; @@ -16,14 +17,13 @@ use zksync_types::{ Address, H256, U256, }; -use crate::account::ExecutionType; -use crate::utils::format_gwei; use crate::{ - account::AccountLifespan, + account::{AccountLifespan, ExecutionType}, command::{IncorrectnessModifier, TxCommand, TxType}, constants::{ETH_CONFIRMATION_TIMEOUT, ETH_POLLING_INTERVAL}, corrupted_tx::Corrupted, report::ReportLabel, + utils::format_gwei, }; #[derive(Debug)] diff --git a/core/tests/loadnext/src/account_pool.rs b/core/tests/loadnext/src/account_pool.rs index e4ded62dcf1..730a6d07b48 100644 --- a/core/tests/loadnext/src/account_pool.rs +++ b/core/tests/loadnext/src/account_pool.rs @@ -3,7 +3,6 @@ use std::{collections::VecDeque, convert::TryFrom, str::FromStr, sync::Arc, time use once_cell::sync::OnceCell; use rand::Rng; use tokio::time::timeout; - use zksync::{signer::Signer, HttpClient, HttpClientBuilder, Wallet, ZksNamespaceClient}; use zksync_eth_signer::PrivateKeySigner; use zksync_types::{tx::primitives::PackedEthSignature, Address, L2ChainId, H256}; diff --git a/core/tests/loadnext/src/command/api.rs b/core/tests/loadnext/src/command/api.rs index 1e520d7c195..76ed5db5747 100644 --- a/core/tests/loadnext/src/command/api.rs +++ b/core/tests/loadnext/src/command/api.rs @@ -1,6 +1,5 @@ use num::Integer; use rand::RngCore; - use zksync::EthNamespaceClient; use zksync_types::api; diff --git a/core/tests/loadnext/src/command/tx_command.rs b/core/tests/loadnext/src/command/tx_command.rs index 945a7ca16bb..84e07d1f0d2 100644 --- a/core/tests/loadnext/src/command/tx_command.rs +++ b/core/tests/loadnext/src/command/tx_command.rs @@ -1,7 +1,6 @@ use once_cell::sync::OnceCell; use rand::Rng; use static_assertions::const_assert; - use zksync_types::{Address, U256}; use crate::{ diff --git a/core/tests/loadnext/src/config.rs b/core/tests/loadnext/src/config.rs index d62f4cdb63e..b31cb5d3d8a 100644 --- a/core/tests/loadnext/src/config.rs +++ b/core/tests/loadnext/src/config.rs @@ -1,12 +1,9 @@ +use std::{path::PathBuf, time::Duration}; + use serde::Deserialize; use tokio::sync::Semaphore; - -use std::path::PathBuf; -use std::time::Duration; - use zksync_contracts::test_contracts::LoadnextContractExecutionParams; -use zksync_types::network::Network; -use zksync_types::{Address, L2ChainId, H160}; +use zksync_types::{network::Network, Address, L2ChainId, H160}; use crate::fs_utils::read_tokens; diff --git a/core/tests/loadnext/src/corrupted_tx.rs b/core/tests/loadnext/src/corrupted_tx.rs index c3ada60472e..c51b0c88d02 100644 --- a/core/tests/loadnext/src/corrupted_tx.rs +++ b/core/tests/loadnext/src/corrupted_tx.rs @@ -1,13 +1,13 @@ use async_trait::async_trait; - use zksync::signer::Signer; -use zksync_eth_signer::{error::SignerError, EthereumSigner}; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain, PackedEthSignature, H256}; +use zksync_eth_signer::{ + error::SignerError, raw_ethereum_tx::TransactionParameters, EthereumSigner, +}; +use zksync_types::{ + fee::Fee, l2::L2Tx, Address, EIP712TypedStructure, Eip712Domain, PackedEthSignature, H256, +}; use crate::command::IncorrectnessModifier; -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; /// Trait that exists solely to extend the signed zkSync transaction interface, providing the ability /// to modify transaction in a way that will make it invalid. @@ -94,14 +94,14 @@ impl EthereumSigner for CorruptedSigner { #[cfg(test)] mod tests { - use super::*; use zksync_eth_signer::PrivateKeySigner; - use zksync_types::fee::Fee; - use zksync_types::L2ChainId; use zksync_types::{ - tokens::ETHEREUM_ADDRESS, tx::primitives::PackedEthSignature, Address, Nonce, H256, + fee::Fee, tokens::ETHEREUM_ADDRESS, tx::primitives::PackedEthSignature, Address, L2ChainId, + Nonce, H256, }; + use super::*; + const AMOUNT: u64 = 100; const FEE: u64 = 100; const NONCE: Nonce = Nonce(1); diff --git a/core/tests/loadnext/src/executor.rs b/core/tests/loadnext/src/executor.rs index 08d1ce47d6a..5c64f1b61be 100644 --- a/core/tests/loadnext/src/executor.rs +++ b/core/tests/loadnext/src/executor.rs @@ -1,14 +1,15 @@ -use anyhow::anyhow; -use futures::{channel::mpsc, future, SinkExt}; - use std::sync::Arc; -use zksync::ethereum::{PriorityOpHolder, DEFAULT_PRIORITY_FEE}; -use zksync::utils::{ - get_approval_based_paymaster_input, get_approval_based_paymaster_input_for_estimation, +use anyhow::anyhow; +use futures::{channel::mpsc, future, SinkExt}; +use zksync::{ + ethereum::{PriorityOpHolder, DEFAULT_PRIORITY_FEE}, + utils::{ + get_approval_based_paymaster_input, get_approval_based_paymaster_input_for_estimation, + }, + web3::{contract::Options, types::TransactionReceipt}, + EthNamespaceClient, EthereumProvider, ZksNamespaceClient, }; -use zksync::web3::{contract::Options, types::TransactionReceipt}; -use zksync::{EthNamespaceClient, EthereumProvider, ZksNamespaceClient}; use zksync_eth_client::{BoundEthInterface, EthInterface}; use zksync_eth_signer::PrivateKeySigner; use zksync_system_constants::MAX_L1_TRANSACTION_GAS_LIMIT; @@ -17,14 +18,14 @@ use zksync_types::{ REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, U64, }; -use crate::report::ReportBuilder; -use crate::utils::format_eth; use crate::{ account::AccountLifespan, account_pool::AccountPool, config::{ExecutionConfig, LoadtestConfig, RequestLimiters}, constants::*, + report::ReportBuilder, report_collector::{LoadtestResult, ReportCollector}, + utils::format_eth, }; /// Executor is the entity capable of running the loadtest flow. diff --git a/core/tests/loadnext/src/fs_utils.rs b/core/tests/loadnext/src/fs_utils.rs index d5b92b3c7a9..9fee9916f91 100644 --- a/core/tests/loadnext/src/fs_utils.rs +++ b/core/tests/loadnext/src/fs_utils.rs @@ -1,14 +1,10 @@ //! Utilities used for reading tokens, contracts bytecode and ABI from the //! filesystem. -use std::fs::File; -use std::io::BufReader; -use std::path::Path; +use std::{fs::File, io::BufReader, path::Path}; use serde::Deserialize; - -use zksync_types::network::Network; -use zksync_types::{ethabi::Contract, Address}; +use zksync_types::{ethabi::Contract, network::Network, Address}; /// A token stored in `etc/tokens/{network}.json` files. #[derive(Debug, Deserialize)] @@ -93,9 +89,10 @@ pub fn loadnext_contract(path: &Path) -> anyhow::Result { #[cfg(test)] mod tests { - use super::*; use std::path::PathBuf; + use super::*; + #[test] fn check_read_test_contract() { let test_contracts_path = { diff --git a/core/tests/loadnext/src/main.rs b/core/tests/loadnext/src/main.rs index 5d35c4e7f79..595532706c7 100644 --- a/core/tests/loadnext/src/main.rs +++ b/core/tests/loadnext/src/main.rs @@ -4,19 +4,17 @@ //! Without required variables provided, test is launched in the localhost/development mode with some hard-coded //! values to check the local zkSync deployment. -use tokio::sync::watch; - use std::time::Duration; -use prometheus_exporter::PrometheusExporterConfig; -use zksync_config::configs::api::PrometheusConfig; - use loadnext::{ command::TxType, config::{ExecutionConfig, LoadtestConfig}, executor::Executor, report_collector::LoadtestResult, }; +use prometheus_exporter::PrometheusExporterConfig; +use tokio::sync::watch; +use zksync_config::configs::api::PrometheusConfig; #[tokio::main] async fn main() -> anyhow::Result<()> { diff --git a/core/tests/loadnext/src/report.rs b/core/tests/loadnext/src/report.rs index 0ea86a49de0..e6c6bfdb551 100644 --- a/core/tests/loadnext/src/report.rs +++ b/core/tests/loadnext/src/report.rs @@ -2,8 +2,8 @@ use std::time::Duration; use zksync_types::Address; -use crate::account::ExecutionType; use crate::{ + account::ExecutionType, all::All, command::{ApiRequest, ApiRequestType, SubscriptionType, TxCommand, TxType}, }; diff --git a/core/tests/loadnext/src/report_collector/mod.rs b/core/tests/loadnext/src/report_collector/mod.rs index a7798e0bea7..6a7a5de39ba 100644 --- a/core/tests/loadnext/src/report_collector/mod.rs +++ b/core/tests/loadnext/src/report_collector/mod.rs @@ -1,8 +1,8 @@ +use std::time::{Duration, Instant}; + use futures::{channel::mpsc::Receiver, StreamExt}; use operation_results_collector::OperationResultsCollector; -use std::time::{Duration, Instant}; - use crate::{ report::{ActionType, Report, ReportLabel}, report_collector::metrics_collector::MetricsCollector, diff --git a/core/tests/loadnext/src/report_collector/operation_results_collector.rs b/core/tests/loadnext/src/report_collector/operation_results_collector.rs index 63f2bb7dbf9..ab460af839b 100644 --- a/core/tests/loadnext/src/report_collector/operation_results_collector.rs +++ b/core/tests/loadnext/src/report_collector/operation_results_collector.rs @@ -1,7 +1,7 @@ -use crate::report::{ActionType, ReportLabel}; - use std::{fmt, time::Duration}; +use crate::report::{ActionType, ReportLabel}; + /// Collector that analyzes the outcomes of the performed operations. /// Currently it's solely capable of deciding whether test was failed or not. /// API requests are counted separately. diff --git a/core/tests/loadnext/src/rng.rs b/core/tests/loadnext/src/rng.rs index 4d5ab84c714..3612a7c2a84 100644 --- a/core/tests/loadnext/src/rng.rs +++ b/core/tests/loadnext/src/rng.rs @@ -1,7 +1,6 @@ use std::convert::TryInto; use rand::{rngs::SmallRng, seq::SliceRandom, thread_rng, RngCore, SeedableRng}; - use zksync::web3::signing::keccak256; use zksync_types::H256; diff --git a/core/tests/loadnext/src/utils.rs b/core/tests/loadnext/src/utils.rs index 3f528e97e34..95f61c8cee8 100644 --- a/core/tests/loadnext/src/utils.rs +++ b/core/tests/loadnext/src/utils.rs @@ -1,4 +1,5 @@ use std::ops::Div; + use zksync_types::U256; pub fn format_eth(value: U256) -> String { diff --git a/core/tests/vm-benchmark/benches/diy_benchmark.rs b/core/tests/vm-benchmark/benches/diy_benchmark.rs index 8f5b6cd685b..b99837d8eab 100644 --- a/core/tests/vm-benchmark/benches/diy_benchmark.rs +++ b/core/tests/vm-benchmark/benches/diy_benchmark.rs @@ -1,5 +1,6 @@ -use criterion::black_box; use std::time::{Duration, Instant}; + +use criterion::black_box; use vm_benchmark_harness::{cut_to_allowed_bytecode_size, get_deploy_tx, BenchmarkingVm}; fn main() { diff --git a/core/tests/vm-benchmark/harness/src/lib.rs b/core/tests/vm-benchmark/harness/src/lib.rs index b7da44aed92..00da5bcca9f 100644 --- a/core/tests/vm-benchmark/harness/src/lib.rs +++ b/core/tests/vm-benchmark/harness/src/lib.rs @@ -1,9 +1,12 @@ -use multivm::interface::{ - L2BlockEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, +use std::{cell::RefCell, rc::Rc}; + +use multivm::{ + interface::{ + L2BlockEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, + }, + vm_latest::{constants::BLOCK_GAS_LIMIT, HistoryEnabled, Vm}, }; -use multivm::vm_latest::{constants::BLOCK_GAS_LIMIT, HistoryEnabled, Vm}; use once_cell::sync::Lazy; -use std::{cell::RefCell, rc::Rc}; use zksync_contracts::{deployer_contract, BaseSystemContracts}; use zksync_state::{InMemoryStorage, StorageView}; use zksync_system_constants::ethereum::MAX_GAS_PER_PUBDATA_BYTE; @@ -133,9 +136,10 @@ pub fn get_deploy_tx(code: &[u8]) -> Transaction { #[cfg(test)] mod tests { - use crate::*; use zksync_contracts::read_bytecode; + use crate::*; + #[test] fn can_deploy_contract() { let test_contract = read_bytecode( diff --git a/core/tests/vm-benchmark/src/compare_iai_results.rs b/core/tests/vm-benchmark/src/compare_iai_results.rs index d67d7238683..d903d727117 100644 --- a/core/tests/vm-benchmark/src/compare_iai_results.rs +++ b/core/tests/vm-benchmark/src/compare_iai_results.rs @@ -1,6 +1,5 @@ -use std::collections::HashMap; -use std::fs::File; -use std::io::BufReader; +use std::{collections::HashMap, fs::File, io::BufReader}; + use vm_benchmark::parse_iai::parse_iai; fn main() { diff --git a/core/tests/vm-benchmark/src/find_slowest.rs b/core/tests/vm-benchmark/src/find_slowest.rs index 947f944541c..2bc2a894d2d 100644 --- a/core/tests/vm-benchmark/src/find_slowest.rs +++ b/core/tests/vm-benchmark/src/find_slowest.rs @@ -2,6 +2,7 @@ use std::{ io::Write, time::{Duration, Instant}, }; + use vm_benchmark_harness::*; fn main() { diff --git a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs b/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs index dc3c8f6d98f..396d59948a8 100644 --- a/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs +++ b/core/tests/vm-benchmark/src/iai_results_to_prometheus.rs @@ -1,4 +1,5 @@ use std::io::BufReader; + use vm_benchmark::parse_iai::IaiResult; fn main() { diff --git a/core/tests/vm-benchmark/src/with_prometheus.rs b/core/tests/vm-benchmark/src/with_prometheus.rs index e9d4f2e57ed..1fcf5652c6d 100644 --- a/core/tests/vm-benchmark/src/with_prometheus.rs +++ b/core/tests/vm-benchmark/src/with_prometheus.rs @@ -1,6 +1,7 @@ -use metrics_exporter_prometheus::PrometheusBuilder; use std::time::Duration; +use metrics_exporter_prometheus::PrometheusBuilder; + pub fn with_prometheus(f: F) { println!("Pushing results to Prometheus"); diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index fa8b5e79691..896ef28dbbd 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -50,7 +50,12 @@ async function prettierSystemContracts(check: boolean = false) { export async function rustfmt(check: boolean = false) { process.chdir(process.env.ZKSYNC_HOME as string); - const command = check ? 'cargo fmt -- --check' : 'cargo fmt'; + + // We rely on a supposedly undocumented bug/feature of `rustfmt` that allows us to use unstable features on stable Rust. + // Please note that this only works with CLI flags, and if you happened to visit this place after things suddenly stopped working, + // it is certainly possible that the feature was deemed a bug and was fixed. Then welp. + const config = '--config imports_granularity=Crate --config group_imports=StdExternalCrate'; + const command = check ? `cargo fmt -- --check ${config}` : `cargo fmt -- ${config}`; await utils.spawn(command); process.chdir('./prover'); await utils.spawn(command); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 95ff42d7052..d6ea72d2c8b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -971,12 +971,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5241cd7938b1b415942e943ea96f615953d500b50347b505b0b507080bad5a6f" -[[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - [[package]] name = "const-oid" version = "0.9.5" @@ -1245,16 +1239,6 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "crypto-bigint" version = "0.4.9" @@ -1454,24 +1438,13 @@ dependencies = [ "uuid", ] -[[package]] -name = "der" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" -dependencies = [ - "const-oid 0.7.1", - "crypto-bigint 0.3.2", - "pem-rfc7468", -] - [[package]] name = "der" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" dependencies = [ - "const-oid 0.9.5", + "const-oid", "zeroize", ] @@ -1481,7 +1454,8 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" dependencies = [ - "const-oid 0.9.5", + "const-oid", + "pem-rfc7468", "zeroize", ] @@ -2186,9 +2160,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "google-cloud-auth" -version = "0.11.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f40175857d0b8d7b6cad6cd9594284da5041387fa2ddff30ab6d8faef65eb" +checksum = "af1087f1fbd2dd3f58c17c7574ddd99cd61cbbbc2c4dc81114b8687209b196cb" dependencies = [ "async-trait", "base64 0.21.5", @@ -2208,9 +2182,9 @@ dependencies = [ [[package]] name = "google-cloud-metadata" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96e4ad0802d3f416f62e7ce01ac1460898ee0efc98f8b45cd4aab7611607012f" +checksum = "cc279bfb50487d7bcd900e8688406475fc750fe474a835b2ab9ade9eb1fc90e2" dependencies = [ "reqwest", "thiserror", @@ -2219,11 +2193,12 @@ dependencies = [ [[package]] name = "google-cloud-storage" -version = "0.12.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "215abab97e07d144428425509c1dad07e57ea72b84b21bcdb6a8a5f12a5c4932" +checksum = "ac04b29849ebdeb9fb008988cc1c4d1f0c9d121b4c7f1ddeb8061df124580e93" dependencies = [ "async-stream", + "async-trait", "base64 0.21.5", "bytes", "futures-util", @@ -2233,10 +2208,10 @@ dependencies = [ "hex", "once_cell", "percent-encoding", + "pkcs8 0.10.2", "regex", "reqwest", - "ring 0.16.20", - "rsa", + "ring 0.17.5", "serde", "serde_json", "sha2 0.10.8", @@ -2811,9 +2786,6 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" -dependencies = [ - "spin 0.5.2", -] [[package]] name = "lazycell" @@ -3323,23 +3295,6 @@ dependencies = [ "serde", ] -[[package]] -name = "num-bigint-dig" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" -dependencies = [ - "byteorder", - "lazy_static", - "libm", - "num-integer", - "num-iter", - "num-traits", - "rand 0.8.5", - "smallvec", - "zeroize", -] - [[package]] name = "num-complex" version = "0.3.1" @@ -3801,9 +3756,9 @@ dependencies = [ [[package]] name = "pem-rfc7468" -version = "0.3.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01de5d978f34aa4b2296576379fcc416034702fd94117c56ffd8a1a767cefb30" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" dependencies = [ "base64ct", ] @@ -3901,28 +3856,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs1" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78f66c04ccc83dd4486fd46c33896f4e17b24a7a3a6400dedc48ed0ddd72320" -dependencies = [ - "der 0.5.1", - "pkcs8 0.8.0", - "zeroize", -] - -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der 0.5.1", - "spki 0.5.4", - "zeroize", -] - [[package]] name = "pkcs8" version = "0.9.0" @@ -4808,26 +4741,6 @@ dependencies = [ "librocksdb-sys", ] -[[package]] -name = "rsa" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cf22754c49613d2b3b119f0e5d46e34a2c628a937e3024b8762de4e7d8c710b" -dependencies = [ - "byteorder", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", - "num-iter", - "num-traits", - "pkcs1", - "pkcs8 0.8.0", - "rand_core 0.6.4", - "smallvec", - "subtle", - "zeroize", -] - [[package]] name = "rustc-demangle" version = "0.1.23" @@ -5515,16 +5428,6 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -[[package]] -name = "spki" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" -dependencies = [ - "base64ct", - "der 0.5.1", -] - [[package]] name = "spki" version = "0.6.0" @@ -7168,6 +7071,7 @@ dependencies = [ "itertools 0.10.5", "num 0.3.1", "once_cell", + "prost", "rand 0.8.5", "serde", "serde_json", @@ -7178,8 +7082,11 @@ dependencies = [ "tracing", "url", "vise", + "zksync_consensus_roles", "zksync_contracts", "zksync_health_check", + "zksync_protobuf", + "zksync_protobuf_build", "zksync_system_constants", "zksync_types", "zksync_utils", @@ -7539,7 +7446,6 @@ dependencies = [ "num_enum", "once_cell", "parity-crypto", - "prost", "rlp", "serde", "serde_json", diff --git a/prover/circuit_synthesizer/src/circuit_synthesizer.rs b/prover/circuit_synthesizer/src/circuit_synthesizer.rs index 55da03949a7..96a164c69c1 100644 --- a/prover/circuit_synthesizer/src/circuit_synthesizer.rs +++ b/prover/circuit_synthesizer/src/circuit_synthesizer.rs @@ -1,34 +1,40 @@ -use std::option::Option; -use std::time::Duration; -use std::time::Instant; +use std::{ + option::Option, + time::{Duration, Instant}, +}; use anyhow::Context as _; use local_ip_address::local_ip; -use prover_service::prover::{Prover, ProvingAssembly}; -use prover_service::remote_synth::serialize_job; -use tokio::task::JoinHandle; -use tokio::time::sleep; -use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zkevm_test_harness::bellman::plonk::better_better_cs::cs::Circuit; -use zkevm_test_harness::pairing::bn256::Bn256; -use zkevm_test_harness::witness::oracle::VmWitnessOracle; - -use crate::metrics::METRICS; -use zksync_config::configs::prover_group::ProverGroupConfig; -use zksync_config::configs::CircuitSynthesizerConfig; -use zksync_config::ProverConfigs; +use prover_service::{ + prover::{Prover, ProvingAssembly}, + remote_synth::serialize_job, +}; +use tokio::{task::JoinHandle, time::sleep}; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::plonk::better_better_cs::cs::Circuit, pairing::bn256::Bn256, + witness::oracle::VmWitnessOracle, +}; +use zksync_config::{ + configs::{prover_group::ProverGroupConfig, CircuitSynthesizerConfig}, + ProverConfigs, +}; use zksync_dal::ConnectionPool; use zksync_env_config::FromEnv; use zksync_object_store::{CircuitKey, ObjectStore, ObjectStoreError, ObjectStoreFactory}; use zksync_prover_fri_utils::socket_utils::send_assembly; -use zksync_prover_utils::numeric_index_to_circuit_name; -use zksync_prover_utils::region_fetcher::{get_region, get_zone}; +use zksync_prover_utils::{ + numeric_index_to_circuit_name, + region_fetcher::{get_region, get_zone}, +}; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ proofs::{GpuProverInstanceStatus, SocketAddress}, protocol_version::L1VerifierConfig, }; +use crate::metrics::METRICS; + #[derive(thiserror::Error, Debug)] pub enum CircuitSynthesizerError { #[error("InvalidaGroupCircuits: {0}")] diff --git a/prover/circuit_synthesizer/src/main.rs b/prover/circuit_synthesizer/src/main.rs index 5592885dcdd..a4ac19e18d7 100644 --- a/prover/circuit_synthesizer/src/main.rs +++ b/prover/circuit_synthesizer/src/main.rs @@ -1,8 +1,7 @@ use anyhow::Context as _; use prometheus_exporter::PrometheusExporterConfig; use structopt::StructOpt; -use tokio::{sync::oneshot, sync::watch}; - +use tokio::sync::{oneshot, watch}; use zksync_config::configs::{ AlertsConfig, CircuitSynthesizerConfig, ObjectStoreConfig, PostgresConfig, ProverGroupConfig, }; diff --git a/prover/circuit_synthesizer/src/metrics.rs b/prover/circuit_synthesizer/src/metrics.rs index b9ee5b10c15..78049d6cf78 100644 --- a/prover/circuit_synthesizer/src/metrics.rs +++ b/prover/circuit_synthesizer/src/metrics.rs @@ -1,4 +1,5 @@ use std::time::Duration; + use vise::{Buckets, Histogram, LabeledFamily, Metrics}; #[derive(Debug, Metrics)] diff --git a/prover/proof_fri_compressor/src/compressor.rs b/prover/proof_fri_compressor/src/compressor.rs index f0f8efc6102..b4346305b9f 100644 --- a/prover/proof_fri_compressor/src/compressor.rs +++ b/prover/proof_fri_compressor/src/compressor.rs @@ -1,30 +1,37 @@ +use std::time::Instant; + use anyhow::Context as _; use async_trait::async_trait; -use std::time::Instant; use tokio::task::JoinHandle; - -use crate::metrics::METRICS; use zkevm_test_harness::proof_wrapper_utils::{wrap_proof, WrapperConfig}; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::field::goldilocks::GoldilocksField, + circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, + }, + zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness, + }, + get_current_pod_name, AuxOutputWitnessWrapper, FriProofWrapper, }; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness; -use zksync_prover_fri_types::{get_current_pod_name, AuxOutputWitnessWrapper, FriProofWrapper}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::aggregated_operations::L1BatchProofForL1; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncVerificationKey; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::{ - ZkSyncCircuit, ZkSyncProof, +use zksync_types::{ + aggregated_operations::L1BatchProofForL1, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::{ + ZkSyncCircuit, ZkSyncProof, ZkSyncVerificationKey, + }, + bellman::{bn256::Bn256, plonk::better_better_cs::proof::Proof}, + witness::oracle::VmWitnessOracle, + }, + L1BatchNumber, }; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::proof::Proof; -use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle; -use zksync_types::L1BatchNumber; use zksync_vk_setup_data_server_fri::{get_recursive_layer_vk_for_circuit_type, get_snark_vk}; +use crate::metrics::METRICS; + pub struct ProofCompressor { blob_store: Box, pool: ConnectionPool, diff --git a/prover/proof_fri_compressor/src/main.rs b/prover/proof_fri_compressor/src/main.rs index c8396803339..04f2935ead0 100644 --- a/prover/proof_fri_compressor/src/main.rs +++ b/prover/proof_fri_compressor/src/main.rs @@ -1,11 +1,9 @@ -use anyhow::Context as _; -use std::env; -use structopt::StructOpt; -use tokio::{sync::oneshot, sync::watch}; - -use std::time::Duration; +use std::{env, time::Duration}; +use anyhow::Context as _; use prometheus_exporter::PrometheusExporterConfig; +use structopt::StructOpt; +use tokio::sync::{oneshot, watch}; use zksync_config::configs::{FriProofCompressorConfig, PostgresConfig}; use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; diff --git a/prover/proof_fri_compressor/src/metrics.rs b/prover/proof_fri_compressor/src/metrics.rs index 5891da2f416..724da6d73b8 100644 --- a/prover/proof_fri_compressor/src/metrics.rs +++ b/prover/proof_fri_compressor/src/metrics.rs @@ -1,4 +1,5 @@ use std::time::Duration; + use vise::{Buckets, Histogram, Metrics}; #[derive(Debug, Metrics)] diff --git a/prover/prover/src/artifact_provider.rs b/prover/prover/src/artifact_provider.rs index 8ee065be5a5..9af365d95f4 100644 --- a/prover/prover/src/artifact_provider.rs +++ b/prover/prover/src/artifact_provider.rs @@ -1,8 +1,10 @@ +use std::io::Read; + use anyhow::Context as _; use prover_service::ArtifactProvider; -use std::io::Read; -use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncVerificationKey; -use zkevm_test_harness::pairing::bn256::Bn256; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncVerificationKey, pairing::bn256::Bn256, +}; use zksync_setup_key_server::get_setup_for_circuit_type; use zksync_verification_key_server::get_vk_for_circuit_type; diff --git a/prover/prover/src/metrics.rs b/prover/prover/src/metrics.rs index 4544ae9bfa7..ab18c59bcf7 100644 --- a/prover/prover/src/metrics.rs +++ b/prover/prover/src/metrics.rs @@ -1,4 +1,5 @@ use std::time::Duration; + use vise::{Buckets, Counter, Histogram, LabeledFamily, Metrics}; const PROVER_LATENCY_BUCKETS: Buckets = Buckets::values(&[ diff --git a/prover/prover/src/prover.rs b/prover/prover/src/prover.rs index 1885d815332..efb570050b5 100644 --- a/prover/prover/src/prover.rs +++ b/prover/prover/src/prover.rs @@ -1,21 +1,21 @@ -use anyhow::Context as _; use std::{env, time::Duration}; +use anyhow::Context as _; use prover_service::{ JobReporter, JobResult::{self, Failure, ProofGenerated}, }; use tokio::runtime::Handle; -use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncProof; -use zkevm_test_harness::pairing::bn256::Bn256; - -use crate::metrics::METRICS; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncProof, pairing::bn256::Bn256, +}; use zksync_config::{PostgresConfig, ProverConfig}; -use zksync_dal::ConnectionPool; -use zksync_dal::StorageProcessor; +use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_object_store::{Bucket, ObjectStore, ObjectStoreFactory}; use zksync_types::proofs::ProverJobMetadata; +use crate::metrics::METRICS; + #[derive(Debug)] pub struct ProverReporter { rt_handle: Handle, diff --git a/prover/prover/src/prover_params.rs b/prover/prover/src/prover_params.rs index 558e9058ed6..fc59b88ddf7 100644 --- a/prover/prover/src/prover_params.rs +++ b/prover/prover/src/prover_params.rs @@ -1,7 +1,6 @@ use std::time::Duration; use prover_service::Params; - use zksync_config::ProverConfig; #[derive(Debug)] diff --git a/prover/prover/src/run.rs b/prover/prover/src/run.rs index 9342cd554e6..9784b2f1b66 100644 --- a/prover/prover/src/run.rs +++ b/prover/prover/src/run.rs @@ -1,11 +1,10 @@ -use anyhow::Context as _; use std::{env, future::Future, sync::Arc, time::Instant}; -use tokio::sync::{oneshot, Mutex}; +use anyhow::Context as _; use local_ip_address::local_ip; -use queues::Buffer; - use prometheus_exporter::PrometheusExporterConfig; +use queues::Buffer; +use tokio::sync::{oneshot, Mutex}; use zksync_config::{ configs::{ api::PrometheusConfig, prover_group::ProverGroupConfig, AlertsConfig, ObjectStoreConfig, @@ -19,12 +18,11 @@ use zksync_prover_utils::region_fetcher::{get_region, get_zone}; use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; use zksync_utils::wait_for_tasks::wait_for_tasks; -use crate::artifact_provider::ProverArtifactProvider; -use crate::metrics::METRICS; -use crate::prover::ProverReporter; -use crate::prover_params::ProverParams; -use crate::socket_listener::incoming_socket_listener; -use crate::synthesized_circuit_provider::SynthesizedCircuitProvider; +use crate::{ + artifact_provider::ProverArtifactProvider, metrics::METRICS, prover::ProverReporter, + prover_params::ProverParams, socket_listener::incoming_socket_listener, + synthesized_circuit_provider::SynthesizedCircuitProvider, +}; async fn graceful_shutdown() -> anyhow::Result> { let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; diff --git a/prover/prover/src/socket_listener.rs b/prover/prover/src/socket_listener.rs index d8dbaff74dc..95a369e7078 100644 --- a/prover/prover/src/socket_listener.rs +++ b/prover/prover/src/socket_listener.rs @@ -1,15 +1,18 @@ -use crate::synthesized_circuit_provider::SharedAssemblyQueue; -use queues::IsQueue; -use std::net::{IpAddr, SocketAddr}; -use std::time::Instant; -use zksync_dal::ConnectionPool; -use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; +use std::{ + net::{IpAddr, SocketAddr}, + time::Instant, +}; use anyhow::Context as _; +use queues::IsQueue; use tokio::{ io::copy, net::{TcpListener, TcpStream}, }; +use zksync_dal::ConnectionPool; +use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; + +use crate::synthesized_circuit_provider::SharedAssemblyQueue; #[allow(clippy::too_many_arguments)] pub async fn incoming_socket_listener( diff --git a/prover/prover/src/synthesized_circuit_provider.rs b/prover/prover/src/synthesized_circuit_provider.rs index 3c6939dc6aa..e1cec64162b 100644 --- a/prover/prover/src/synthesized_circuit_provider.rs +++ b/prover/prover/src/synthesized_circuit_provider.rs @@ -1,16 +1,16 @@ -use std::io::Cursor; -use std::io::Read; -use std::sync::Arc; -use tokio::sync::Mutex; +use std::{ + io::{Cursor, Read}, + sync::Arc, +}; use prover_service::RemoteSynthesizer; use queues::{Buffer, IsQueue}; - -use crate::metrics::METRICS; -use tokio::runtime::Handle; +use tokio::{runtime::Handle, sync::Mutex}; use zksync_dal::ConnectionPool; use zksync_types::proofs::SocketAddress; +use crate::metrics::METRICS; + pub type SharedAssemblyQueue = Arc>>>; pub struct SynthesizedCircuitProvider { diff --git a/prover/prover_fri/src/gpu_prover_job_processor.rs b/prover/prover_fri/src/gpu_prover_job_processor.rs index 9d7eda1202a..b56388bce05 100644 --- a/prover/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/prover_fri/src/gpu_prover_job_processor.rs @@ -1,39 +1,42 @@ #[cfg(feature = "gpu")] pub mod gpu_prover { - use std::collections::HashMap; - use std::{sync::Arc, time::Instant}; + use std::{collections::HashMap, sync::Arc, time::Instant}; use anyhow::Context as _; + use shivini::{gpu_prove_from_external_witness_data, ProverContext}; use tokio::task::JoinHandle; - use zksync_prover_fri_types::circuit_definitions::base_layer_proof_config; - use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::round_function::AbsorptionModeOverwrite; - use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::sponge::GoldilocksPoseidon2Sponge; - use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::pow::NoPow; - - use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::transcript::GoldilocksPoisedon2Transcript; - use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; - use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerProof; - use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerProof; - use zksync_prover_fri_types::WitnessVectorArtifacts; - - use crate::metrics::METRICS; - use zksync_config::configs::fri_prover_group::FriProverGroupConfig; - use zksync_config::configs::FriProverConfig; + use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; use zksync_dal::ConnectionPool; use zksync_env_config::FromEnv; use zksync_object_store::ObjectStore; - use zksync_prover_fri_types::{CircuitWrapper, FriProofWrapper, ProverServiceDataKey}; + use zksync_prover_fri_types::{ + circuit_definitions::{ + base_layer_proof_config, + boojum::{ + algebraic_props::{ + round_function::AbsorptionModeOverwrite, sponge::GoldilocksPoseidon2Sponge, + }, + cs::implementations::{pow::NoPow, transcript::GoldilocksPoisedon2Transcript}, + worker::Worker, + }, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerProof, recursion_layer::ZkSyncRecursionLayerProof, + }, + }, + CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, + }; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{basic_fri_types::CircuitIdRoundTuple, proofs::SocketAddress}; - use zksync_vk_setup_data_server_fri::get_setup_data_for_circuit_type; - use { - shivini::gpu_prove_from_external_witness_data, shivini::ProverContext, - zksync_vk_setup_data_server_fri::GoldilocksGpuProverSetupData, + use zksync_vk_setup_data_server_fri::{ + get_setup_data_for_circuit_type, GoldilocksGpuProverSetupData, }; - use crate::utils::{ - get_setup_data_key, save_proof, setup_metadata_to_setup_data_key, verify_proof, - GpuProverJob, ProverArtifacts, SharedWitnessVectorQueue, + use crate::{ + metrics::METRICS, + utils::{ + get_setup_data_key, save_proof, setup_metadata_to_setup_data_key, verify_proof, + GpuProverJob, ProverArtifacts, SharedWitnessVectorQueue, + }, }; type DefaultTranscript = GoldilocksPoisedon2Transcript; diff --git a/prover/prover_fri/src/main.rs b/prover/prover_fri/src/main.rs index ab0994a3648..ff59fdbabfc 100644 --- a/prover/prover_fri/src/main.rs +++ b/prover/prover_fri/src/main.rs @@ -1,13 +1,16 @@ #![feature(generic_const_exprs)] -use anyhow::Context as _; use std::future::Future; -use tokio::sync::oneshot; -use tokio::sync::watch::Receiver; -use tokio::task::JoinHandle; +use anyhow::Context as _; +use local_ip_address::local_ip; use prometheus_exporter::PrometheusExporterConfig; -use zksync_config::configs::fri_prover_group::FriProverGroupConfig; -use zksync_config::configs::{FriProverConfig, PostgresConfig, ProverGroupConfig}; +use tokio::{ + sync::{oneshot, watch::Receiver}, + task::JoinHandle, +}; +use zksync_config::configs::{ + fri_prover_group::FriProverGroupConfig, FriProverConfig, PostgresConfig, ProverGroupConfig, +}; use zksync_dal::ConnectionPool; use zksync_env_config::{ object_store::{ProverObjectStoreConfig, PublicObjectStoreConfig}, @@ -15,22 +18,20 @@ use zksync_env_config::{ }; use zksync_object_store::{ObjectStore, ObjectStoreFactory}; use zksync_prover_fri_utils::get_all_circuit_id_round_tuples_for; - -use local_ip_address::local_ip; use zksync_prover_utils::region_fetcher::get_zone; use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::CircuitIdRoundTuple; -use zksync_types::proofs::GpuProverInstanceStatus; -use zksync_types::proofs::SocketAddress; +use zksync_types::{ + basic_fri_types::CircuitIdRoundTuple, + proofs::{GpuProverInstanceStatus, SocketAddress}, +}; use zksync_utils::wait_for_tasks::wait_for_tasks; mod gpu_prover_job_processor; +mod metrics; mod prover_job_processor; mod socket_listener; mod utils; -mod metrics; - async fn graceful_shutdown(port: u16) -> anyhow::Result> { let postgres_config = PostgresConfig::from_env().context("PostgresConfig::from_env()")?; let pool = ConnectionPool::singleton(postgres_config.prover_url()?) @@ -170,9 +171,10 @@ async fn get_prover_tasks( pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, ) -> anyhow::Result>>> { - use crate::prover_job_processor::{load_setup_data_cache, Prover}; use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; + use crate::prover_job_processor::{load_setup_data_cache, Prover}; + let vk_commitments = get_cached_commitments(); tracing::info!( @@ -203,9 +205,10 @@ async fn get_prover_tasks( pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, ) -> anyhow::Result>>> { + use std::sync::Arc; + use gpu_prover_job_processor::gpu_prover; use socket_listener::gpu_socket_listener; - use std::sync::Arc; use tokio::sync::Mutex; use zksync_prover_fri_types::queue::FixedSizeQueue; diff --git a/prover/prover_fri/src/metrics.rs b/prover/prover_fri/src/metrics.rs index 27ddce54d6c..f6f7adb817d 100644 --- a/prover/prover_fri/src/metrics.rs +++ b/prover/prover_fri/src/metrics.rs @@ -1,4 +1,5 @@ use std::time::Duration; + use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, LabeledFamily, Metrics}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet)] diff --git a/prover/prover_fri/src/prover_job_processor.rs b/prover/prover_fri/src/prover_job_processor.rs index 30beae5bc82..dbe4bee0c86 100644 --- a/prover/prover_fri/src/prover_job_processor.rs +++ b/prover/prover_fri/src/prover_job_processor.rs @@ -1,31 +1,27 @@ -use std::collections::HashMap; -use std::{sync::Arc, time::Instant}; +use std::{collections::HashMap, sync::Arc, time::Instant}; use anyhow::Context as _; use tokio::task::JoinHandle; -use zksync_prover_fri_types::circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::pow::NoPow; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ - ZkSyncBaseLayerCircuit, ZkSyncBaseLayerProof, -}; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerProof, ZkSyncRecursiveLayerCircuit, -}; -use zksync_prover_fri_types::circuit_definitions::{ - base_layer_proof_config, recursion_layer_proof_config, ZkSyncDefaultRoundFunction, -}; - use zkevm_test_harness::prover_utils::{prove_base_layer_circuit, prove_recursion_layer_circuit}; - -use crate::metrics::{CircuitLabels, Layer, METRICS}; -use zksync_config::configs::fri_prover_group::FriProverGroupConfig; -use zksync_config::configs::FriProverConfig; +use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; use zksync_dal::ConnectionPool; use zksync_env_config::FromEnv; use zksync_object_store::ObjectStore; -use zksync_prover_fri_types::{CircuitWrapper, FriProofWrapper, ProverJob, ProverServiceDataKey}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + aux_definitions::witness_oracle::VmWitnessOracle, + base_layer_proof_config, + boojum::{ + cs::implementations::pow::NoPow, field::goldilocks::GoldilocksField, worker::Worker, + }, + circuit_definitions::{ + base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerProof}, + recursion_layer::{ZkSyncRecursionLayerProof, ZkSyncRecursiveLayerCircuit}, + }, + recursion_layer_proof_config, ZkSyncDefaultRoundFunction, + }, + CircuitWrapper, FriProofWrapper, ProverJob, ProverServiceDataKey, +}; use zksync_prover_fri_utils::fetch_next_circuit; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{basic_fri_types::CircuitIdRoundTuple, protocol_version::L1VerifierConfig}; @@ -33,8 +29,12 @@ use zksync_vk_setup_data_server_fri::{ get_cpu_setup_data_for_circuit_type, GoldilocksProverSetupData, }; -use crate::utils::{ - get_setup_data_key, save_proof, setup_metadata_to_setup_data_key, verify_proof, ProverArtifacts, +use crate::{ + metrics::{CircuitLabels, Layer, METRICS}, + utils::{ + get_setup_data_key, save_proof, setup_metadata_to_setup_data_key, verify_proof, + ProverArtifacts, + }, }; pub enum SetupLoadMode { diff --git a/prover/prover_fri/src/socket_listener.rs b/prover/prover_fri/src/socket_listener.rs index e9ecbd1e60b..653fb4eb8da 100644 --- a/prover/prover_fri/src/socket_listener.rs +++ b/prover/prover_fri/src/socket_listener.rs @@ -1,27 +1,28 @@ #[cfg(feature = "gpu")] pub mod gpu_socket_listener { + use std::{net::SocketAddr, time::Instant}; + + use anyhow::Context as _; use shivini::synthesis_utils::{ init_base_layer_cs_for_repeated_proving, init_recursive_layer_cs_for_repeated_proving, }; - use std::net::SocketAddr; - use std::time::Instant; - use zksync_dal::ConnectionPool; - use zksync_types::proofs::AggregationRound; - use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; - use zksync_vk_setup_data_server_fri::{ - get_finalization_hints, get_round_for_recursive_circuit_type, - }; - - use crate::metrics::METRICS; - use crate::utils::{GpuProverJob, ProvingAssembly, SharedWitnessVectorQueue}; - use anyhow::Context as _; - use tokio::sync::watch; use tokio::{ io::copy, net::{TcpListener, TcpStream}, + sync::watch, }; + use zksync_dal::ConnectionPool; use zksync_object_store::bincode; use zksync_prover_fri_types::{CircuitWrapper, ProverServiceDataKey, WitnessVectorArtifacts}; + use zksync_types::proofs::{AggregationRound, GpuProverInstanceStatus, SocketAddress}; + use zksync_vk_setup_data_server_fri::{ + get_finalization_hints, get_round_for_recursive_circuit_type, + }; + + use crate::{ + metrics::METRICS, + utils::{GpuProverJob, ProvingAssembly, SharedWitnessVectorQueue}, + }; pub(crate) struct SocketListener { address: SocketAddress, diff --git a/prover/prover_fri/src/utils.rs b/prover/prover_fri/src/utils.rs index c67ee9149f1..d86adbf4e89 100644 --- a/prover/prover_fri/src/utils.rs +++ b/prover/prover_fri/src/utils.rs @@ -1,33 +1,35 @@ #![cfg_attr(not(feature = "gpu"), allow(unused_imports))] -use std::sync::Arc; -use std::time::Instant; -use zksync_prover_fri_types::circuit_definitions::boojum::config::ProvingCSConfig; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::reference_cs::CSReferenceAssembly; +use std::{sync::Arc, time::Instant}; use tokio::sync::Mutex; use zkevm_test_harness::prover_utils::{verify_base_layer_proof, verify_recursion_layer_proof}; use zksync_dal::StorageProcessor; use zksync_object_store::ObjectStore; -use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::round_function::AbsorptionModeOverwrite; -use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::sponge::GoldilocksPoseidon2Sponge; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::pow::NoPow; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::proof::Proof; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::verifier::VerificationKey; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::{ - GoldilocksExt2, GoldilocksField, -}; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, -}; -use zksync_prover_fri_types::queue::FixedSizeQueue; use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + algebraic_props::{ + round_function::AbsorptionModeOverwrite, sponge::GoldilocksPoseidon2Sponge, + }, + config::ProvingCSConfig, + cs::implementations::{ + pow::NoPow, proof::Proof, reference_cs::CSReferenceAssembly, + verifier::VerificationKey, + }, + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + }, + circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, + }, + }, + queue::FixedSizeQueue, CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; use zksync_prover_fri_utils::get_base_layer_circuit_id_for_recursive_layer; +use zksync_types::{basic_fri_types::CircuitIdRoundTuple, proofs::AggregationRound, L1BatchNumber}; use crate::metrics::METRICS; -use zksync_types::{basic_fri_types::CircuitIdRoundTuple, proofs::AggregationRound, L1BatchNumber}; pub type F = GoldilocksField; pub type H = GoldilocksPoseidon2Sponge; diff --git a/prover/prover_fri/tests/basic_test.rs b/prover/prover_fri/tests/basic_test.rs index 133598d66db..89089ac8249 100644 --- a/prover/prover_fri/tests/basic_test.rs +++ b/prover/prover_fri/tests/basic_test.rs @@ -1,16 +1,13 @@ -use anyhow::Context as _; use std::sync::Arc; -use zksync_config::configs::FriProverConfig; -use zksync_config::ObjectStoreConfig; +use anyhow::Context as _; +use serde::Serialize; +use zksync_config::{configs::FriProverConfig, ObjectStoreConfig}; use zksync_env_config::FromEnv; use zksync_object_store::{bincode, FriCircuitKey, ObjectStoreFactory}; -use zksync_types::proofs::AggregationRound; -use zksync_types::L1BatchNumber; - -use serde::Serialize; use zksync_prover_fri::prover_job_processor::Prover; use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; +use zksync_types::{proofs::AggregationRound, L1BatchNumber}; use zksync_vk_setup_data_server_fri::generate_cpu_base_layer_setup_data; fn compare_serialized(expected: &T, actual: &T) { diff --git a/prover/prover_fri_gateway/src/api_data_fetcher.rs b/prover/prover_fri_gateway/src/api_data_fetcher.rs index 339a7bec9e6..f56a9af4cc8 100644 --- a/prover/prover_fri_gateway/src/api_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/api_data_fetcher.rs @@ -1,14 +1,14 @@ use std::time::Duration; -use crate::metrics::METRICS; use async_trait::async_trait; use reqwest::Client; use serde::{de::DeserializeOwned, Serialize}; -use tokio::sync::watch; -use tokio::time::sleep; +use tokio::{sync::watch, time::sleep}; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; +use crate::metrics::METRICS; + /// The path to the API endpoint that returns the next proof generation data. pub(crate) const PROOF_GENERATION_DATA_PATH: &str = "/proof_generation_data"; diff --git a/prover/prover_fri_gateway/src/main.rs b/prover/prover_fri_gateway/src/main.rs index 3a3f8b42ae0..15329ce955a 100644 --- a/prover/prover_fri_gateway/src/main.rs +++ b/prover/prover_fri_gateway/src/main.rs @@ -1,9 +1,7 @@ use anyhow::Context as _; -use reqwest::Client; -use tokio::{sync::oneshot, sync::watch}; - -use crate::api_data_fetcher::{PeriodicApiStruct, PROOF_GENERATION_DATA_PATH, SUBMIT_PROOF_PATH}; use prometheus_exporter::PrometheusExporterConfig; +use reqwest::Client; +use tokio::sync::{oneshot, watch}; use zksync_config::configs::{FriProverGatewayConfig, PostgresConfig}; use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; @@ -11,6 +9,8 @@ use zksync_object_store::ObjectStoreFactory; use zksync_types::prover_server_api::{ProofGenerationDataRequest, SubmitProofRequest}; use zksync_utils::wait_for_tasks::wait_for_tasks; +use crate::api_data_fetcher::{PeriodicApiStruct, PROOF_GENERATION_DATA_PATH, SUBMIT_PROOF_PATH}; + mod api_data_fetcher; mod metrics; mod proof_gen_data_fetcher; diff --git a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs index 1f00c7f7429..a25d447ad22 100644 --- a/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs +++ b/prover/prover_fri_gateway/src/proof_gen_data_fetcher.rs @@ -1,5 +1,4 @@ use async_trait::async_trait; - use zksync_types::prover_server_api::{ ProofGenerationData, ProofGenerationDataRequest, ProofGenerationDataResponse, }; diff --git a/prover/prover_fri_gateway/src/proof_submitter.rs b/prover/prover_fri_gateway/src/proof_submitter.rs index 86b2e4004b3..78c7a6a6d8e 100644 --- a/prover/prover_fri_gateway/src/proof_submitter.rs +++ b/prover/prover_fri_gateway/src/proof_submitter.rs @@ -1,8 +1,9 @@ use async_trait::async_trait; use zksync_dal::fri_proof_compressor_dal::ProofCompressionJobStatus; - -use zksync_types::prover_server_api::{SubmitProofRequest, SubmitProofResponse}; -use zksync_types::L1BatchNumber; +use zksync_types::{ + prover_server_api::{SubmitProofRequest, SubmitProofResponse}, + L1BatchNumber, +}; use crate::api_data_fetcher::{PeriodicApi, PeriodicApiStruct}; diff --git a/prover/prover_fri_types/src/lib.rs b/prover/prover_fri_types/src/lib.rs index 9e84c02f057..c244cb99f5a 100644 --- a/prover/prover_fri_types/src/lib.rs +++ b/prover/prover_fri_types/src/lib.rs @@ -1,24 +1,20 @@ -pub mod queue; +use std::env; pub use circuit_definitions; -use std::env; +use circuit_definitions::{ + aux_definitions::witness_oracle::VmWitnessOracle, + boojum::{cs::implementations::witness::WitnessVec, field::goldilocks::GoldilocksField}, + circuit_definitions::{ + base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerProof}, + recursion_layer::{ZkSyncRecursionLayerProof, ZkSyncRecursiveLayerCircuit}, + }, + zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness, + ZkSyncDefaultRoundFunction, +}; +use zksync_object_store::{serialize_using_bincode, Bucket, FriCircuitKey, StoredObject}; +use zksync_types::{proofs::AggregationRound, L1BatchNumber}; -use circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use circuit_definitions::boojum::cs::implementations::witness::WitnessVec; -use circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; -use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerProof; -use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerProof; -use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; -use circuit_definitions::zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness; -use circuit_definitions::ZkSyncDefaultRoundFunction; - -use zksync_object_store::serialize_using_bincode; -use zksync_object_store::Bucket; -use zksync_object_store::FriCircuitKey; -use zksync_object_store::StoredObject; -use zksync_types::proofs::AggregationRound; -use zksync_types::L1BatchNumber; +pub mod queue; #[derive(serde::Serialize, serde::Deserialize, Clone)] #[allow(clippy::large_enum_variant)] diff --git a/prover/prover_fri_utils/src/lib.rs b/prover/prover_fri_utils/src/lib.rs index f0edcd07902..eee7293b591 100644 --- a/prover/prover_fri_utils/src/lib.rs +++ b/prover/prover_fri_utils/src/lib.rs @@ -2,18 +2,21 @@ use std::time::Instant; use zksync_dal::StorageProcessor; use zksync_object_store::{FriCircuitKey, ObjectStore}; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; -use zksync_types::basic_fri_types::CircuitIdRoundTuple; - use zksync_prover_fri_types::{ + circuit_definitions::{ + circuit_definitions::recursion_layer::{ + base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, + }, + zkevm_circuits::scheduler::aux::BaseLayerCircuitType, + }, get_current_pod_name, CircuitWrapper, ProverJob, ProverServiceDataKey, }; +use zksync_types::{ + basic_fri_types::CircuitIdRoundTuple, proofs::AggregationRound, + protocol_version::L1VerifierConfig, +}; use crate::metrics::{CircuitLabels, PROVER_FRI_UTILS_METRICS}; -use zksync_types::proofs::AggregationRound; -use zksync_types::protocol_version::L1VerifierConfig; pub mod metrics; pub mod socket_utils; diff --git a/prover/prover_fri_utils/src/metrics.rs b/prover/prover_fri_utils/src/metrics.rs index 767e2c25fc1..acb48bacb3e 100644 --- a/prover/prover_fri_utils/src/metrics.rs +++ b/prover/prover_fri_utils/src/metrics.rs @@ -1,4 +1,5 @@ use std::time::Duration; + use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zksync_types::proofs::AggregationRound; diff --git a/prover/prover_fri_utils/src/socket_utils.rs b/prover/prover_fri_utils/src/socket_utils.rs index 14e31e56f00..c0c5ddcbcb9 100644 --- a/prover/prover_fri_utils/src/socket_utils.rs +++ b/prover/prover_fri_utils/src/socket_utils.rs @@ -1,9 +1,9 @@ -use std::io::copy; -use std::io::ErrorKind; -use std::io::Read; -use std::net::SocketAddr; -use std::net::TcpStream; -use std::time::{Duration, Instant}; +use std::{ + io::{copy, ErrorKind, Read}, + net::{SocketAddr, TcpStream}, + time::{Duration, Instant}, +}; + use zksync_types::proofs::SocketAddress; pub fn send_assembly( diff --git a/prover/setup_key_generator_and_server/src/lib.rs b/prover/setup_key_generator_and_server/src/lib.rs index a2e4f0998f8..34b4896cefe 100644 --- a/prover/setup_key_generator_and_server/src/lib.rs +++ b/prover/setup_key_generator_and_server/src/lib.rs @@ -1,18 +1,20 @@ -use anyhow::Context as _; -use std::fs::File; -use std::io::Read; -use std::path::Path; - -use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zkevm_test_harness::bellman::bn256::Bn256; -use zkevm_test_harness::witness::oracle::VmWitnessOracle; -use zkevm_test_harness::witness::recursive_aggregation::padding_aggregations; -use zkevm_test_harness::witness::vk_set_generator::circuits_for_vk_generation; -use zksync_types::circuit::GEOMETRY_CONFIG; +use std::{fs::File, io::Read, path::Path}; +use anyhow::Context as _; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::bn256::Bn256, + witness::{ + oracle::VmWitnessOracle, recursive_aggregation::padding_aggregations, + vk_set_generator::circuits_for_vk_generation, + }, +}; use zksync_config::ProverConfigs; use zksync_env_config::FromEnv; -use zksync_types::circuit::{LEAF_SPLITTING_FACTOR, NODE_SPLITTING_FACTOR, SCHEDULER_UPPER_BOUND}; +use zksync_types::circuit::{ + GEOMETRY_CONFIG, LEAF_SPLITTING_FACTOR, NODE_SPLITTING_FACTOR, SCHEDULER_UPPER_BOUND, +}; + pub fn get_setup_for_circuit_type(circuit_type: u8) -> anyhow::Result> { let filepath = get_setup_key_file_path(circuit_type).context("get_setup_key_file_path()")?; tracing::info!("Fetching setup key from path: {}", filepath); diff --git a/prover/setup_key_generator_and_server/src/main.rs b/prover/setup_key_generator_and_server/src/main.rs index ea56a15aed7..9eee0aa5c09 100644 --- a/prover/setup_key_generator_and_server/src/main.rs +++ b/prover/setup_key_generator_and_server/src/main.rs @@ -1,12 +1,13 @@ #![cfg_attr(not(feature = "gpu"), allow(unused_imports))] +use std::{env, fs::File}; + use anyhow::Context as _; -use std::env; -use std::fs::File; use structopt::StructOpt; -use zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zkevm_test_harness::bellman::bn256::Bn256; -use zkevm_test_harness::witness::oracle::VmWitnessOracle; +use zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, bellman::bn256::Bn256, + witness::oracle::VmWitnessOracle, +}; use zksync_setup_key_server::{get_circuits_for_vk, get_setup_key_write_file_path}; #[cfg(feature = "gpu")] diff --git a/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs b/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs index d04943dd1d6..d6efbb29c71 100644 --- a/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs +++ b/prover/vk_setup_data_generator_server_fri/src/commitment_utils.rs @@ -1,16 +1,21 @@ -use crate::get_recursive_layer_vk_for_circuit_type; -use crate::utils::get_leaf_vk_params; +use std::str::FromStr; + use anyhow::Context as _; use once_cell::sync::Lazy; -use std::str::FromStr; use structopt::lazy_static::lazy_static; use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_vks_and_params_commitment, compute_node_vk_commitment, }; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; -use zksync_types::protocol_version::{L1VerifierConfig, VerifierParams}; -use zksync_types::H256; +use zksync_prover_fri_types::circuit_definitions::{ + boojum::field::goldilocks::GoldilocksField, + circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType, +}; +use zksync_types::{ + protocol_version::{L1VerifierConfig, VerifierParams}, + H256, +}; + +use crate::{get_recursive_layer_vk_for_circuit_type, utils::get_leaf_vk_params}; lazy_static! { // TODO: do not initialize a static const with data read in runtime. diff --git a/prover/vk_setup_data_generator_server_fri/src/lib.rs b/prover/vk_setup_data_generator_server_fri/src/lib.rs index 75125aa8150..19fab9af470 100644 --- a/prover/vk_setup_data_generator_server_fri/src/lib.rs +++ b/prover/vk_setup_data_generator_server_fri/src/lib.rs @@ -1,62 +1,65 @@ #![feature(generic_const_exprs)] #![feature(allocator_api)] +use std::{fs, fs::File, io::Read}; + use anyhow::Context as _; use circuit_definitions::circuit_definitions::aux_layer::{ ZkSyncCompressionLayerStorageType, ZkSyncSnarkWrapperVK, }; -use std::fs; -use std::fs::File; -use std::io::Read; -use zksync_prover_fri_types::circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::round_function::AbsorptionModeOverwrite; -use zksync_prover_fri_types::circuit_definitions::boojum::algebraic_props::sponge::GenericAlgebraicSponge; - -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::hints::{ - DenseVariablesCopyHint, DenseWitnessCopyHint, -}; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::polynomial_storage::{ - SetupBaseStorage, SetupStorage, -}; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::setup::FinalizationHintsForProver; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::implementations::verifier::VerificationKey; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::oracle::merkle_tree::MerkleTreeWithCap; -use zksync_prover_fri_types::circuit_definitions::boojum::cs::oracle::TreeHasher; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use zksync_prover_fri_types::circuit_definitions::boojum::field::{PrimeField, SmallField}; - -use zksync_prover_fri_types::circuit_definitions::boojum::field::traits::field_like::PrimeFieldLikeVectorized; -use zksync_prover_fri_types::circuit_definitions::boojum::implementations::poseidon2::Poseidon2Goldilocks; -use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; - -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ - ZkSyncBaseLayerCircuit, ZkSyncBaseLayerVerificationKey, -}; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, -}; -use zksync_prover_fri_types::circuit_definitions::{ - ZkSyncDefaultRoundFunction, BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, -}; - -use serde::de::DeserializeOwned; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use zkevm_test_harness::prover_utils::create_base_layer_setup_data; use zksync_config::configs::FriProverConfig; use zksync_env_config::FromEnv; -use zksync_types::proofs::AggregationRound; -use zksync_types::zkevm_test_harness::abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit; -use zksync_types::zkevm_test_harness::bellman::bn256::Bn256; -use zksync_types::zkevm_test_harness::bellman::plonk::better_better_cs::setup::VerificationKey as SnarkVerificationKey; -use zksync_types::zkevm_test_harness::witness::oracle::VmWitnessOracle as SnarkWitnessOracle; +use zksync_prover_fri_types::{ + circuit_definitions::{ + aux_definitions::witness_oracle::VmWitnessOracle, + boojum::{ + algebraic_props::{ + round_function::AbsorptionModeOverwrite, sponge::GenericAlgebraicSponge, + }, + cs::{ + implementations::{ + hints::{DenseVariablesCopyHint, DenseWitnessCopyHint}, + polynomial_storage::{SetupBaseStorage, SetupStorage}, + setup::FinalizationHintsForProver, + verifier::VerificationKey, + }, + oracle::{merkle_tree::MerkleTreeWithCap, TreeHasher}, + }, + field::{ + goldilocks::GoldilocksField, traits::field_like::PrimeFieldLikeVectorized, + PrimeField, SmallField, + }, + implementations::poseidon2::Poseidon2Goldilocks, + worker::Worker, + }, + circuit_definitions::{ + base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerVerificationKey}, + recursion_layer::{ + ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, + }, + }, + ZkSyncDefaultRoundFunction, BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, + }, + ProverServiceDataKey, +}; +use zksync_types::{ + proofs::AggregationRound, + zkevm_test_harness::{ + abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit, + bellman::{ + bn256::Bn256, plonk::better_better_cs::setup::VerificationKey as SnarkVerificationKey, + }, + witness::oracle::VmWitnessOracle as SnarkWitnessOracle, + }, +}; +#[cfg(feature = "gpu")] +use {shivini::cs::GpuSetup, std::alloc::Global}; pub mod commitment_utils; pub mod utils; -use zksync_prover_fri_types::ProverServiceDataKey; -#[cfg(feature = "gpu")] -use {shivini::cs::GpuSetup, std::alloc::Global}; - #[derive(Debug, Serialize, Deserialize)] #[serde( bound = "F: serde::Serialize + serde::de::DeserializeOwned, P: serde::Serialize + serde::de::DeserializeOwned" diff --git a/prover/vk_setup_data_generator_server_fri/src/main.rs b/prover/vk_setup_data_generator_server_fri/src/main.rs index 6d2e1d0712a..158a4390a96 100644 --- a/prover/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/vk_setup_data_generator_server_fri/src/main.rs @@ -1,17 +1,19 @@ #![feature(generic_const_exprs)] + use anyhow::Context as _; use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerVerificationKey; -use zkevm_test_harness::compute_setups::{ - generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs, +use zkevm_test_harness::{ + compute_setups::{generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs}, + data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}, + proof_wrapper_utils::{get_wrapper_setup_and_vk_from_scheduler_vk, WrapperConfig}, }; -use zkevm_test_harness::data_source::in_memory_data_source::InMemoryDataSource; -use zkevm_test_harness::data_source::SetupDataSource; -use zkevm_test_harness::proof_wrapper_utils::{ - get_wrapper_setup_and_vk_from_scheduler_vk, WrapperConfig, +use zksync_prover_fri_types::{ + circuit_definitions::{ + circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType, + zkevm_circuits::scheduler::aux::BaseLayerCircuitType, + }, + ProverServiceDataKey, }; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; -use zksync_prover_fri_types::ProverServiceDataKey; use zksync_types::proofs::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_round_for_recursive_circuit_type, save_base_layer_vk, save_finalization_hints, diff --git a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs index 354594a556a..5df4b75b3a6 100644 --- a/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs +++ b/prover/vk_setup_data_generator_server_fri/src/setup_data_generator.rs @@ -1,28 +1,28 @@ use anyhow::Context as _; -use zksync_prover_fri_types::circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; -use zksync_prover_fri_types::circuit_definitions::{ - ZkSyncDefaultRoundFunction, BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, -}; - use structopt::StructOpt; -use zkevm_test_harness::geometry_config::get_geometry_config; -use zkevm_test_harness::prover_utils::create_recursive_layer_setup_data; -use zksync_types::proofs::AggregationRound; -use zksync_vk_setup_data_server_fri::generate_cpu_base_layer_setup_data; -use zksync_vk_setup_data_server_fri::utils::{ - get_basic_circuits, get_leaf_circuits, get_node_circuit, get_scheduler_circuit, CYCLE_LIMIT, +use zkevm_test_harness::{ + geometry_config::get_geometry_config, prover_utils::create_recursive_layer_setup_data, }; +use zksync_prover_fri_types::{ + circuit_definitions::{ + aux_definitions::witness_oracle::VmWitnessOracle, + boojum::{field::goldilocks::GoldilocksField, worker::Worker}, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerCircuit, recursion_layer::ZkSyncRecursiveLayerCircuit, + }, + ZkSyncDefaultRoundFunction, BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, + }, + ProverServiceDataKey, +}; +use zksync_types::proofs::AggregationRound; use zksync_vk_setup_data_server_fri::{ - get_finalization_hints, get_recursive_layer_vk_for_circuit_type, - get_round_for_recursive_circuit_type, save_setup_data, GoldilocksProverSetupData, - ProverSetupData, + generate_cpu_base_layer_setup_data, get_finalization_hints, + get_recursive_layer_vk_for_circuit_type, get_round_for_recursive_circuit_type, save_setup_data, + utils::{ + get_basic_circuits, get_leaf_circuits, get_node_circuit, get_scheduler_circuit, CYCLE_LIMIT, + }, + GoldilocksProverSetupData, ProverSetupData, }; - -use zksync_prover_fri_types::ProverServiceDataKey; #[cfg(feature = "gpu")] use { shivini::cs::setup::GpuSetup, shivini::ProverContext, diff --git a/prover/vk_setup_data_generator_server_fri/src/tests.rs b/prover/vk_setup_data_generator_server_fri/src/tests.rs index 46cdc94562b..3d6a203ed97 100644 --- a/prover/vk_setup_data_generator_server_fri/src/tests.rs +++ b/prover/vk_setup_data_generator_server_fri/src/tests.rs @@ -1,9 +1,13 @@ use proptest::prelude::*; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, +use zksync_prover_fri_types::{ + circuit_definitions::{ + circuit_definitions::recursion_layer::{ + base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, + }, + zkevm_circuits::scheduler::aux::BaseLayerCircuitType, + }, + ProverServiceDataKey, }; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; -use zksync_prover_fri_types::ProverServiceDataKey; use zksync_types::proofs::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_base_layer_vk_for_circuit_type, get_base_path, get_file_path, get_finalization_hints, diff --git a/prover/vk_setup_data_generator_server_fri/src/utils.rs b/prover/vk_setup_data_generator_server_fri/src/utils.rs index 03c0fb4a210..01b40de3394 100644 --- a/prover/vk_setup_data_generator_server_fri/src/utils.rs +++ b/prover/vk_setup_data_generator_server_fri/src/utils.rs @@ -1,57 +1,74 @@ -use crate::{ - get_base_layer_vk_for_circuit_type, get_base_path, get_recursive_layer_vk_for_circuit_type, -}; -use zksync_prover_fri_types::circuit_definitions::aux_definitions::witness_oracle::VmWitnessOracle; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; -use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness; -use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; -use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::traits::allocatable::CSAllocatable; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::leaf_layer::ZkSyncLeafLayerRecursiveCircuit; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::node_layer::ZkSyncNodeLayerRecursiveCircuit; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::scheduler::SchedulerCircuit; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - base_circuit_type_into_recursive_leaf_circuit_type, ZkSyncRecursionLayerStorageType, - ZkSyncRecursionProof, ZkSyncRecursiveLayerCircuit, RECURSION_ARITY, SCHEDULER_CAPACITY, -}; -use zksync_prover_fri_types::circuit_definitions::zk_evm::bytecode_to_code_hash; -use zksync_prover_fri_types::circuit_definitions::zk_evm::testing::storage::InMemoryStorage; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::{ - RecursionLeafInput, RecursionLeafInstanceWitness, -}; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::LeafLayerRecursionConfig; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::node_layer::input::{ - RecursionNodeInput, RecursionNodeInstanceWitness, -}; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::node_layer::NodeLayerRecursionConfig; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::SchedulerConfig; -use zksync_prover_fri_types::circuit_definitions::{ - base_layer_proof_config, recursion_layer_proof_config, zk_evm, ZkSyncDefaultRoundFunction, +use std::{ + collections::{HashMap, VecDeque}, + fs, }; + use anyhow::Context as _; use itertools::Itertools; -use std::collections::{HashMap, VecDeque}; -use std::fs; -use zkevm_test_harness::compute_setups::{ - generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs, +use zkevm_test_harness::{ + compute_setups::{generate_base_layer_vks_and_proofs, generate_recursive_layer_vks_and_proofs}, + data_source::{in_memory_data_source::InMemoryDataSource, BlockDataSource}, + ethereum_types::{Address, U256}, + external_calls::run, + helper::artifact_utils::{save_predeployed_contracts, TestArtifact}, + sha3::{Digest, Keccak256}, + toolset::GeometryConfig, + witness::{ + full_block_artifact::{ + BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, + BlockBasicCircuitsPublicInputs, + }, + recursive_aggregation::compute_leaf_params, + tree::{BinarySparseStorageTree, ZKSyncTestingTree}, + }, }; -use zkevm_test_harness::data_source::BlockDataSource; -use zkevm_test_harness::ethereum_types::{Address, U256}; -use zkevm_test_harness::external_calls::run; -use zkevm_test_harness::helper::artifact_utils::{save_predeployed_contracts, TestArtifact}; -use zkevm_test_harness::sha3::{Digest, Keccak256}; -use zkevm_test_harness::toolset::GeometryConfig; -use zkevm_test_harness::witness::full_block_artifact::{ - BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, - BlockBasicCircuitsPublicInputs, +use zksync_prover_fri_types::circuit_definitions::{ + aux_definitions::witness_oracle::VmWitnessOracle, + base_layer_proof_config, + boojum::{ + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + gadgets::{ + queue::full_state_queue::FullStateCircuitQueueRawWitness, + recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, + traits::allocatable::CSAllocatable, + }, + }, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerCircuit, + recursion_layer::{ + base_circuit_type_into_recursive_leaf_circuit_type, + leaf_layer::ZkSyncLeafLayerRecursiveCircuit, + node_layer::ZkSyncNodeLayerRecursiveCircuit, scheduler::SchedulerCircuit, + ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof, ZkSyncRecursiveLayerCircuit, + RECURSION_ARITY, SCHEDULER_CAPACITY, + }, + }, + recursion_layer_proof_config, zk_evm, + zk_evm::{bytecode_to_code_hash, testing::storage::InMemoryStorage}, + zkevm_circuits::{ + recursion::{ + leaf_layer::{ + input::{ + RecursionLeafInput, RecursionLeafInstanceWitness, + RecursionLeafParametersWitness, + }, + LeafLayerRecursionConfig, + }, + node_layer::{ + input::{RecursionNodeInput, RecursionNodeInstanceWitness}, + NodeLayerRecursionConfig, + }, + }, + scheduler::{ + aux::BaseLayerCircuitType, input::SchedulerCircuitInstanceWitness, SchedulerConfig, + }, + }, + ZkSyncDefaultRoundFunction, }; -use zkevm_test_harness::witness::recursive_aggregation::compute_leaf_params; -use zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZKSyncTestingTree}; -use zkevm_test_harness::data_source::in_memory_data_source::InMemoryDataSource; +use crate::{ + get_base_layer_vk_for_circuit_type, get_base_path, get_recursive_layer_vk_for_circuit_type, +}; pub const CYCLE_LIMIT: usize = 20000; diff --git a/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs b/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs index 2a94b53eb63..2b633bc6d08 100644 --- a/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs +++ b/prover/vk_setup_data_generator_server_fri/src/vk_generator.rs @@ -1,23 +1,26 @@ use anyhow::Context as _; -use zkevm_test_harness::geometry_config::get_geometry_config; -use zkevm_test_harness::prover_utils::{ - create_base_layer_setup_data, create_recursive_layer_setup_data, +use zkevm_test_harness::{ + geometry_config::get_geometry_config, + prover_utils::{create_base_layer_setup_data, create_recursive_layer_setup_data}, }; -use zksync_prover_fri_types::circuit_definitions::boojum::worker::Worker; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerVerificationKey; -use zksync_prover_fri_types::circuit_definitions::{ - BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::worker::Worker, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerVerificationKey, + recursion_layer::ZkSyncRecursionLayerVerificationKey, + }, + BASE_LAYER_CAP_SIZE, BASE_LAYER_FRI_LDE_FACTOR, + }, + ProverServiceDataKey, }; -use zksync_vk_setup_data_server_fri::utils::{get_basic_circuits, get_leaf_circuits, CYCLE_LIMIT}; +use zksync_types::proofs::AggregationRound; use zksync_vk_setup_data_server_fri::{ get_round_for_recursive_circuit_type, save_base_layer_vk, save_finalization_hints, save_recursive_layer_vk, + utils::{get_basic_circuits, get_leaf_circuits, CYCLE_LIMIT}, }; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerVerificationKey; -use zksync_prover_fri_types::ProverServiceDataKey; -use zksync_types::proofs::AggregationRound; - #[allow(dead_code)] fn main() -> anyhow::Result<()> { tracing::info!("starting vk generator"); diff --git a/prover/witness_generator/src/basic_circuits.rs b/prover/witness_generator/src/basic_circuits.rs index 963ec034b26..b883c4bb35a 100644 --- a/prover/witness_generator/src/basic_circuits.rs +++ b/prover/witness_generator/src/basic_circuits.rs @@ -1,54 +1,61 @@ -use std::hash::Hash; -use std::sync::Arc; use std::{ collections::{hash_map::DefaultHasher, HashMap, HashSet}, - hash::Hasher, + hash::{Hash, Hasher}, + sync::Arc, time::Instant, }; use anyhow::Context as _; use async_trait::async_trait; -use zksync_prover_fri_types::circuit_definitions::ZkSyncDefaultRoundFunction; -use rand::Rng; -use serde::{Deserialize, Serialize}; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; -use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; -use zkevm_test_harness::geometry_config::get_geometry_config; -use zkevm_test_harness::toolset::GeometryConfig; -use zkevm_test_harness::witness::full_block_artifact::{ - BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, - BlockBasicCircuitsPublicInputs, -}; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::block_header::BlockAuxilaryOutputWitness; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; -use zksync_prover_fri_types::{AuxOutputWitnessWrapper, get_current_pod_name}; - -use crate::metrics::WITNESS_GENERATOR_METRICS; -use crate::storage_oracle::StorageOracle; use multivm::vm_latest::{ constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle, }; +use rand::Rng; +use serde::{Deserialize, Serialize}; +use zkevm_test_harness::{ + geometry_config::get_geometry_config, + toolset::GeometryConfig, + witness::full_block_artifact::{ + BlockBasicCircuits, BlockBasicCircuitsPublicCompactFormsWitnesses, + BlockBasicCircuitsPublicInputs, + }, +}; use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::fri_witness_generator_dal::FriWitnessJobStatus; -use zksync_dal::ConnectionPool; +use zksync_dal::{fri_witness_generator_dal::FriWitnessJobStatus, ConnectionPool}; use zksync_object_store::{ Bucket, ClosedFormInputKey, ObjectStore, ObjectStoreFactory, StoredObject, }; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, + }, + zkevm_circuits::scheduler::{ + block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, + }, + ZkSyncDefaultRoundFunction, + }, + get_current_pod_name, AuxOutputWitnessWrapper, +}; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_queued_job_processor::JobProcessor; use zksync_state::{PostgresStorage, StorageView}; -use zksync_types::proofs::AggregationRound; -use zksync_types::protocol_version::FriProtocolVersionId; use zksync_types::{ - proofs::{BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, + proofs::{AggregationRound, BasicCircuitWitnessGeneratorInput, PrepareBasicCircuitsJob}, + protocol_version::FriProtocolVersionId, Address, L1BatchNumber, BOOTLOADER_ADDRESS, H256, U256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; -use crate::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; -use crate::utils::{ - expand_bootloader_contents, save_base_prover_input_artifacts, ClosedFormInputWrapper, - SchedulerPartialInputWrapper, +use crate::{ + metrics::WITNESS_GENERATOR_METRICS, + precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, + storage_oracle::StorageOracle, + utils::{ + expand_bootloader_contents, save_base_prover_input_artifacts, ClosedFormInputWrapper, + SchedulerPartialInputWrapper, + }, }; pub struct BasicCircuitArtifacts { diff --git a/prover/witness_generator/src/leaf_aggregation.rs b/prover/witness_generator/src/leaf_aggregation.rs index d90520a19e6..f190aeb2164 100644 --- a/prover/witness_generator/src/leaf_aggregation.rs +++ b/prover/witness_generator/src/leaf_aggregation.rs @@ -1,36 +1,46 @@ -use zkevm_test_harness::witness::recursive_aggregation::{ - compute_leaf_params, create_leaf_witnesses, -}; - -use anyhow::Context as _; use std::time::Instant; +use anyhow::Context as _; use async_trait::async_trait; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ - ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerProof, ZkSyncBaseLayerVerificationKey, +use zkevm_test_harness::witness::recursive_aggregation::{ + compute_leaf_params, create_leaf_witnesses, +}; +use zksync_config::configs::FriWitnessGeneratorConfig; +use zksync_dal::ConnectionPool; +use zksync_object_store::{ClosedFormInputKey, ObjectStore, ObjectStoreFactory}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::field::goldilocks::GoldilocksField, + circuit_definitions::{ + base_layer::{ + ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerProof, + ZkSyncBaseLayerVerificationKey, + }, + recursion_layer::ZkSyncRecursiveLayerCircuit, + }, + encodings::recursion_request::RecursionQueueSimulator, + zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, + }, + get_current_pod_name, FriProofWrapper, }; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursiveLayerCircuit; -use zksync_prover_fri_types::circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; -use zksync_prover_fri_types::{get_current_pod_name, FriProofWrapper}; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::{ + proofs::{AggregationRound, LeafAggregationJobMetadata}, + protocol_version::FriProtocolVersionId, + L1BatchNumber, +}; use zksync_vk_setup_data_server_fri::{ get_base_layer_vk_for_circuit_type, get_recursive_layer_vk_for_circuit_type, }; -use crate::metrics::WITNESS_GENERATOR_METRICS; -use crate::utils::{ - load_proofs_for_job_ids, save_node_aggregations_artifacts, - save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, +use crate::{ + metrics::WITNESS_GENERATOR_METRICS, + utils::{ + load_proofs_for_job_ids, save_node_aggregations_artifacts, + save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, + }, }; -use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_dal::ConnectionPool; -use zksync_object_store::{ClosedFormInputKey, ObjectStore, ObjectStoreFactory}; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::proofs::{AggregationRound, LeafAggregationJobMetadata}; -use zksync_types::protocol_version::FriProtocolVersionId; -use zksync_types::L1BatchNumber; pub struct LeafAggregationArtifacts { circuit_id: u8, diff --git a/prover/witness_generator/src/main.rs b/prover/witness_generator/src/main.rs index a8d0bda48f2..c977f216104 100644 --- a/prover/witness_generator/src/main.rs +++ b/prover/witness_generator/src/main.rs @@ -1,37 +1,37 @@ #![feature(generic_const_exprs)] +use std::time::Instant; + use anyhow::{anyhow, Context as _}; use prometheus_exporter::PrometheusExporterConfig; -use std::time::Instant; use structopt::StructOpt; use tokio::sync::watch; -use zksync_config::configs::{FriWitnessGeneratorConfig, PostgresConfig, PrometheusConfig}; -use zksync_config::ObjectStoreConfig; +use zksync_config::{ + configs::{FriWitnessGeneratorConfig, PostgresConfig, PrometheusConfig}, + ObjectStoreConfig, +}; use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; use zksync_prover_utils::get_stop_signal_receiver; use zksync_queued_job_processor::JobProcessor; -use zksync_types::proofs::AggregationRound; -use zksync_types::web3::futures::StreamExt; +use zksync_types::{proofs::AggregationRound, web3::futures::StreamExt}; use zksync_utils::wait_for_tasks::wait_for_tasks; use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; -use crate::basic_circuits::BasicWitnessGenerator; -use crate::leaf_aggregation::LeafAggregationWitnessGenerator; -use crate::metrics::SERVER_METRICS; -use crate::node_aggregation::NodeAggregationWitnessGenerator; -use crate::scheduler::SchedulerWitnessGenerator; +use crate::{ + basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, + metrics::SERVER_METRICS, node_aggregation::NodeAggregationWitnessGenerator, + scheduler::SchedulerWitnessGenerator, +}; mod basic_circuits; mod leaf_aggregation; +mod metrics; mod node_aggregation; mod precalculated_merkle_paths_provider; mod scheduler; mod storage_oracle; - -mod metrics; - mod utils; #[derive(Debug, StructOpt)] diff --git a/prover/witness_generator/src/metrics.rs b/prover/witness_generator/src/metrics.rs index 3bddefc00c4..f0497dd23a1 100644 --- a/prover/witness_generator/src/metrics.rs +++ b/prover/witness_generator/src/metrics.rs @@ -1,4 +1,5 @@ use std::time::Duration; + use vise::{Buckets, Counter, Family, Gauge, Histogram, LabeledFamily, Metrics}; use zksync_prover_fri_utils::metrics::StageLabel; diff --git a/prover/witness_generator/src/node_aggregation.rs b/prover/witness_generator/src/node_aggregation.rs index 1ae5a255197..be9e5d0d622 100644 --- a/prover/witness_generator/src/node_aggregation.rs +++ b/prover/witness_generator/src/node_aggregation.rs @@ -2,33 +2,41 @@ use std::time::Instant; use anyhow::Context as _; use async_trait::async_trait; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, - ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, -}; -use zksync_prover_fri_types::circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; - use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witnesses, }; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness; -use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; -use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; - -use crate::metrics::WITNESS_GENERATOR_METRICS; -use crate::utils::{ - load_proofs_for_job_ids, save_node_aggregations_artifacts, - save_recursive_layer_prover_input_artifacts, AggregationWrapper, -}; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::{AggregationsKey, ObjectStore, ObjectStoreFactory}; -use zksync_prover_fri_types::{get_current_pod_name, FriProofWrapper}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::field::goldilocks::GoldilocksField, + circuit_definitions::recursion_layer::{ + ZkSyncRecursionLayerProof, ZkSyncRecursionLayerStorageType, + ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, + }, + encodings::recursion_request::RecursionQueueSimulator, + zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, + }, + get_current_pod_name, FriProofWrapper, +}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::proofs::NodeAggregationJobMetadata; -use zksync_types::protocol_version::FriProtocolVersionId; -use zksync_types::{proofs::AggregationRound, L1BatchNumber}; +use zksync_types::{ + proofs::{AggregationRound, NodeAggregationJobMetadata}, + protocol_version::FriProtocolVersionId, + L1BatchNumber, +}; +use zksync_vk_setup_data_server_fri::{ + get_recursive_layer_vk_for_circuit_type, utils::get_leaf_vk_params, +}; + +use crate::{ + metrics::WITNESS_GENERATOR_METRICS, + utils::{ + load_proofs_for_job_ids, save_node_aggregations_artifacts, + save_recursive_layer_prover_input_artifacts, AggregationWrapper, + }, +}; pub struct NodeAggregationArtifacts { circuit_id: u8, diff --git a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs index 01bfd78fb28..89f5ca408aa 100644 --- a/prover/witness_generator/src/precalculated_merkle_paths_provider.rs +++ b/prover/witness_generator/src/precalculated_merkle_paths_provider.rs @@ -1,12 +1,9 @@ use serde::{Deserialize, Serialize}; - -use zkevm_test_harness::witness::tree::{BinaryHasher, EnumeratedBinaryLeaf, LeafQuery}; - -use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; - use zk_evm::blake2::Blake2s256; -use zkevm_test_harness::witness::tree::BinarySparseStorageTree; -use zkevm_test_harness::witness::tree::ZkSyncStorageLeaf; +use zkevm_test_harness::witness::tree::{ + BinaryHasher, BinarySparseStorageTree, EnumeratedBinaryLeaf, LeafQuery, ZkSyncStorageLeaf, +}; +use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] pub struct PrecalculatedMerklePathsProvider { diff --git a/prover/witness_generator/src/scheduler.rs b/prover/witness_generator/src/scheduler.rs index 5036aa188ec..921ba68f402 100644 --- a/prover/witness_generator/src/scheduler.rs +++ b/prover/witness_generator/src/scheduler.rs @@ -1,31 +1,37 @@ -use std::convert::TryInto; -use std::time::Instant; +use std::{convert::TryInto, time::Instant}; use anyhow::Context as _; use async_trait::async_trait; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::{GoldilocksExt2, GoldilocksField}; -use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::scheduler::SchedulerCircuit; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursionLayerStorageType, ZkSyncRecursionLayerVerificationKey, - ZkSyncRecursiveLayerCircuit, SCHEDULER_CAPACITY, -}; -use zksync_prover_fri_types::circuit_definitions::recursion_layer_proof_config; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::SchedulerConfig; -use zksync_vk_setup_data_server_fri::get_recursive_layer_vk_for_circuit_type; -use zksync_vk_setup_data_server_fri::utils::get_leaf_vk_params; - -use crate::metrics::WITNESS_GENERATOR_METRICS; -use crate::utils::{load_proofs_for_job_ids, SchedulerPartialInputWrapper}; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::{FriCircuitKey, ObjectStore, ObjectStoreFactory}; -use zksync_prover_fri_types::{get_current_pod_name, CircuitWrapper, FriProofWrapper}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, + }, + circuit_definitions::recursion_layer::{ + scheduler::SchedulerCircuit, ZkSyncRecursionLayerStorageType, + ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, SCHEDULER_CAPACITY, + }, + recursion_layer_proof_config, + zkevm_circuits::scheduler::{input::SchedulerCircuitInstanceWitness, SchedulerConfig}, + }, + get_current_pod_name, CircuitWrapper, FriProofWrapper, +}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::proofs::AggregationRound; -use zksync_types::protocol_version::FriProtocolVersionId; -use zksync_types::L1BatchNumber; +use zksync_types::{ + proofs::AggregationRound, protocol_version::FriProtocolVersionId, L1BatchNumber, +}; +use zksync_vk_setup_data_server_fri::{ + get_recursive_layer_vk_for_circuit_type, utils::get_leaf_vk_params, +}; + +use crate::{ + metrics::WITNESS_GENERATOR_METRICS, + utils::{load_proofs_for_job_ids, SchedulerPartialInputWrapper}, +}; pub struct SchedulerArtifacts { pub scheduler_circuit: ZkSyncRecursiveLayerCircuit, diff --git a/prover/witness_generator/src/storage_oracle.rs b/prover/witness_generator/src/storage_oracle.rs index a23a08aa6ee..6771a025213 100644 --- a/prover/witness_generator/src/storage_oracle.rs +++ b/prover/witness_generator/src/storage_oracle.rs @@ -1,7 +1,7 @@ -use zksync_types::zkevm_test_harness::zk_evm::abstractions::{ - RefundType, RefundedAmounts, Storage, +use zksync_types::{ + zkevm_test_harness::zk_evm::abstractions::{RefundType, RefundedAmounts, Storage}, + LogQuery, Timestamp, }; -use zksync_types::{LogQuery, Timestamp}; #[derive(Debug)] pub struct StorageOracle { diff --git a/prover/witness_generator/src/tests.rs b/prover/witness_generator/src/tests.rs index 27f5637c0b3..7fd95a7c7d8 100644 --- a/prover/witness_generator/src/tests.rs +++ b/prover/witness_generator/src/tests.rs @@ -1,11 +1,13 @@ -use const_decoder::Decoder::Hex; - use std::iter; -use super::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; +use const_decoder::Decoder::Hex; use zkevm_test_harness::witness::tree::{BinarySparseStorageTree, ZkSyncStorageLeaf}; -use zksync_types::proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}; -use zksync_types::U256; +use zksync_types::{ + proofs::{PrepareBasicCircuitsJob, StorageLogMetadata}, + U256, +}; + +use super::precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider; // Sample `StorageLogMetadata` entries. Since we cannot allocate in constants, we store // the only Merkle path hash separately. diff --git a/prover/witness_generator/src/utils.rs b/prover/witness_generator/src/utils.rs index 0a70858977f..6efa333a819 100644 --- a/prover/witness_generator/src/utils.rs +++ b/prover/witness_generator/src/utils.rs @@ -1,26 +1,28 @@ -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksExt2; -use zksync_prover_fri_types::circuit_definitions::boojum::gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::base_layer::{ - ZkSyncBaseLayerClosedFormInput, +use zkevm_test_harness::{ + boojum::field::goldilocks::GoldilocksField, witness::full_block_artifact::BlockBasicCircuits, }; -use zksync_prover_fri_types::circuit_definitions::circuit_definitions::recursion_layer::{ - ZkSyncRecursiveLayerCircuit, -}; - -use zksync_prover_fri_types::circuit_definitions::encodings::recursion_request::RecursionQueueSimulator; - -use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; -use zkevm_test_harness::witness::full_block_artifact::BlockBasicCircuits; use zksync_object_store::{ serialize_using_bincode, AggregationsKey, Bucket, ClosedFormInputKey, FriCircuitKey, ObjectStore, StoredObject, }; -use zksync_prover_fri_types::circuit_definitions::zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness; -use zksync_prover_fri_types::circuit_definitions::ZkSyncDefaultRoundFunction; -use zksync_prover_fri_types::{CircuitWrapper, FriProofWrapper}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + field::goldilocks::GoldilocksExt2, + gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, + }, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerClosedFormInput, + recursion_layer::ZkSyncRecursiveLayerCircuit, + }, + encodings::recursion_request::RecursionQueueSimulator, + zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness, + ZkSyncDefaultRoundFunction, + }, + CircuitWrapper, FriProofWrapper, +}; use zksync_system_constants::USED_BOOTLOADER_MEMORY_BYTES; -use zksync_types::proofs::AggregationRound; -use zksync_types::{L1BatchNumber, U256}; +use zksync_types::{proofs::AggregationRound, L1BatchNumber, U256}; pub fn expand_bootloader_contents(packed: &[(usize, U256)]) -> Vec { let mut result = vec![0u8; USED_BOOTLOADER_MEMORY_BYTES]; diff --git a/prover/witness_generator/tests/basic_test.rs b/prover/witness_generator/tests/basic_test.rs index 1c8d00ff35e..16cce19929d 100644 --- a/prover/witness_generator/tests/basic_test.rs +++ b/prover/witness_generator/tests/basic_test.rs @@ -4,20 +4,20 @@ use serde::Serialize; use zksync_config::ObjectStoreConfig; use zksync_env_config::FromEnv; use zksync_object_store::{AggregationsKey, FriCircuitKey, ObjectStoreFactory}; -use zksync_types::proofs::{ - AggregationRound, LeafAggregationJobMetadata, NodeAggregationJobMetadata, -}; -use zksync_types::L1BatchNumber; - use zksync_prover_fri_types::CircuitWrapper; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; -use zksync_witness_generator::leaf_aggregation::{ - prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator, +use zksync_types::{ + proofs::{AggregationRound, LeafAggregationJobMetadata, NodeAggregationJobMetadata}, + L1BatchNumber, +}; +use zksync_witness_generator::{ + leaf_aggregation::{prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator}, + node_aggregation, + node_aggregation::NodeAggregationWitnessGenerator, + scheduler, + scheduler::SchedulerWitnessGenerator, + utils::AggregationWrapper, }; -use zksync_witness_generator::node_aggregation::NodeAggregationWitnessGenerator; -use zksync_witness_generator::scheduler::SchedulerWitnessGenerator; -use zksync_witness_generator::utils::AggregationWrapper; -use zksync_witness_generator::{node_aggregation, scheduler}; fn compare_serialized(expected: &T, actual: &T) { let serialized_expected = bincode::serialize(expected).unwrap(); diff --git a/prover/witness_vector_generator/src/generator.rs b/prover/witness_vector_generator/src/generator.rs index ee17e0edaa7..74e25b38988 100644 --- a/prover/witness_vector_generator/src/generator.rs +++ b/prover/witness_vector_generator/src/generator.rs @@ -2,24 +2,27 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; use async_trait::async_trait; -use tokio::task::JoinHandle; - -use crate::metrics::METRICS; -use tokio::time::sleep; +use tokio::{task::JoinHandle, time::sleep}; use zksync_config::configs::FriWitnessVectorGeneratorConfig; use zksync_dal::ConnectionPool; use zksync_object_store::ObjectStore; -use zksync_prover_fri_types::circuit_definitions::boojum::field::goldilocks::GoldilocksField; -use zksync_prover_fri_types::{CircuitWrapper, ProverJob, WitnessVectorArtifacts}; -use zksync_prover_fri_utils::fetch_next_circuit; -use zksync_prover_fri_utils::get_numeric_circuit_id; -use zksync_prover_fri_utils::socket_utils::send_assembly; +use zksync_prover_fri_types::{ + circuit_definitions::boojum::field::goldilocks::GoldilocksField, CircuitWrapper, ProverJob, + WitnessVectorArtifacts, +}; +use zksync_prover_fri_utils::{ + fetch_next_circuit, get_numeric_circuit_id, socket_utils::send_assembly, +}; use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::CircuitIdRoundTuple; -use zksync_types::proofs::{GpuProverInstanceStatus, SocketAddress}; -use zksync_types::protocol_version::L1VerifierConfig; +use zksync_types::{ + basic_fri_types::CircuitIdRoundTuple, + proofs::{GpuProverInstanceStatus, SocketAddress}, + protocol_version::L1VerifierConfig, +}; use zksync_vk_setup_data_server_fri::get_finalization_hints; +use crate::metrics::METRICS; + pub struct WitnessVectorGenerator { blob_store: Box, pool: ConnectionPool, diff --git a/prover/witness_vector_generator/src/main.rs b/prover/witness_vector_generator/src/main.rs index 7c1aa8e0b89..1358680d3a1 100644 --- a/prover/witness_vector_generator/src/main.rs +++ b/prover/witness_vector_generator/src/main.rs @@ -3,12 +3,10 @@ use anyhow::Context as _; use prometheus_exporter::PrometheusExporterConfig; use structopt::StructOpt; -use tokio::{sync::oneshot, sync::watch}; - -use crate::generator::WitnessVectorGenerator; -use zksync_config::configs::fri_prover_group::FriProverGroupConfig; +use tokio::sync::{oneshot, watch}; use zksync_config::configs::{ - FriProverConfig, FriWitnessVectorGeneratorConfig, PostgresConfig, ProverGroupConfig, + fri_prover_group::FriProverGroupConfig, FriProverConfig, FriWitnessVectorGeneratorConfig, + PostgresConfig, ProverGroupConfig, }; use zksync_dal::ConnectionPool; use zksync_env_config::{object_store::ProverObjectStoreConfig, FromEnv}; @@ -19,6 +17,8 @@ use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::wait_for_tasks; use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; +use crate::generator::WitnessVectorGenerator; + mod generator; mod metrics; diff --git a/prover/witness_vector_generator/src/metrics.rs b/prover/witness_vector_generator/src/metrics.rs index 4bc11ff401b..7c8284244b6 100644 --- a/prover/witness_vector_generator/src/metrics.rs +++ b/prover/witness_vector_generator/src/metrics.rs @@ -1,4 +1,5 @@ use std::time::Duration; + use vise::{Buckets, Histogram, LabeledFamily, Metrics}; #[derive(Debug, Metrics)] diff --git a/prover/witness_vector_generator/tests/basic_test.rs b/prover/witness_vector_generator/tests/basic_test.rs index 5ed0769d416..648b1ee4d9e 100644 --- a/prover/witness_vector_generator/tests/basic_test.rs +++ b/prover/witness_vector_generator/tests/basic_test.rs @@ -1,7 +1,7 @@ use std::fs; + use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; -use zksync_types::proofs::AggregationRound; -use zksync_types::L1BatchNumber; +use zksync_types::{proofs::AggregationRound, L1BatchNumber}; use zksync_witness_vector_generator::generator::WitnessVectorGenerator; #[test] diff --git a/sdk/zksync-rs/src/ethereum/mod.rs b/sdk/zksync-rs/src/ethereum/mod.rs index 3371c720149..ac91e358983 100644 --- a/sdk/zksync-rs/src/ethereum/mod.rs +++ b/sdk/zksync-rs/src/ethereum/mod.rs @@ -1,32 +1,34 @@ //! Utilities for the on-chain operations, such as `Deposit` and `FullExit`. -use core::{convert::TryFrom, time::Duration}; +use std::{ + convert::TryFrom, + time::{Duration, Instant}, +}; + use serde_json::{Map, Value}; -use std::time::Instant; +use zksync_eth_client::{ + clients::http::SigningClient, types::Error, BoundEthInterface, EthInterface, +}; +use zksync_eth_signer::EthereumSigner; use zksync_types::{ api::BridgeAddresses, + l1::L1Tx, + network::Network, web3::{ contract::{tokens::Tokenize, Options}, ethabi, transports::Http, types::{TransactionReceipt, H160, H256, U256}, }, - L1ChainId, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, + Address, L1ChainId, L1TxCommonData, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; use zksync_web3_decl::namespaces::{EthNamespaceClient, ZksNamespaceClient}; -use zksync_eth_client::{ - clients::http::SigningClient, types::Error, BoundEthInterface, EthInterface, -}; -use zksync_eth_signer::EthereumSigner; -use zksync_types::network::Network; -use zksync_types::{l1::L1Tx, Address, L1TxCommonData}; - -use crate::web3::ethabi::Bytes; use crate::{ error::ClientError, operations::SyncTransactionHandle, utils::{is_token_eth, load_contract}, + web3::ethabi::Bytes, }; const IERC20_INTERFACE: &str = include_str!("../abi/IERC20.json"); diff --git a/sdk/zksync-rs/src/lib.rs b/sdk/zksync-rs/src/lib.rs index 4ee03e8ff9d..aa4a158ef06 100644 --- a/sdk/zksync-rs/src/lib.rs +++ b/sdk/zksync-rs/src/lib.rs @@ -1,18 +1,15 @@ +pub use zksync_types::{self, network::Network, web3}; +pub use zksync_web3_decl::{ + jsonrpsee::http_client::*, + namespaces::{EthNamespaceClient, NetNamespaceClient, Web3NamespaceClient, ZksNamespaceClient}, + types, +}; + +pub use crate::{ethereum::EthereumProvider, wallet::Wallet}; + pub mod error; pub mod ethereum; pub mod operations; pub mod signer; pub mod utils; pub mod wallet; - -pub use crate::{ethereum::EthereumProvider, wallet::Wallet}; -pub use zksync_types::network::Network; - -pub use zksync_types; -pub use zksync_types::web3; - -pub use zksync_web3_decl::{ - jsonrpsee::http_client::*, - namespaces::{EthNamespaceClient, NetNamespaceClient, Web3NamespaceClient, ZksNamespaceClient}, - types, -}; diff --git a/sdk/zksync-rs/src/operations/mod.rs b/sdk/zksync-rs/src/operations/mod.rs index a59bd57a2dd..36a0d2b29b8 100644 --- a/sdk/zksync-rs/src/operations/mod.rs +++ b/sdk/zksync-rs/src/operations/mod.rs @@ -2,10 +2,9 @@ use std::time::{Duration, Instant}; -use crate::{error::ClientError, EthNamespaceClient}; -use zksync_types::l2::L2Tx; use zksync_types::{ api::{BlockNumber, TransactionReceipt}, + l2::L2Tx, Bytes, L2ChainId, H256, }; @@ -15,6 +14,7 @@ pub use self::{ transfer::{create_transfer_calldata, TransferBuilder}, withdraw::WithdrawBuilder, }; +use crate::{error::ClientError, EthNamespaceClient}; mod deploy_contract; mod execute_contract; diff --git a/sdk/zksync-rs/src/operations/transfer.rs b/sdk/zksync-rs/src/operations/transfer.rs index 85d90c61fd2..f0f7525e8f6 100644 --- a/sdk/zksync-rs/src/operations/transfer.rs +++ b/sdk/zksync-rs/src/operations/transfer.rs @@ -1,13 +1,14 @@ use zksync_eth_signer::EthereumSigner; -use zksync_types::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{fee::Fee, l2::L2Tx, Address, Nonce, U256}; +use zksync_types::{fee::Fee, l2::L2Tx, Address, Nonce, L2_ETH_TOKEN_ADDRESS, U256}; -use crate::ethereum::ierc20_contract; -use crate::web3::contract::tokens::Tokenize; -use crate::zksync_types::{transaction_request::PaymasterParams, Execute, L2TxCommonData}; use crate::{ - error::ClientError, operations::SyncTransactionHandle, wallet::Wallet, EthNamespaceClient, - ZksNamespaceClient, + error::ClientError, + ethereum::ierc20_contract, + operations::SyncTransactionHandle, + wallet::Wallet, + web3::contract::tokens::Tokenize, + zksync_types::{transaction_request::PaymasterParams, Execute, L2TxCommonData}, + EthNamespaceClient, ZksNamespaceClient, }; pub struct TransferBuilder<'a, S: EthereumSigner, P> { diff --git a/sdk/zksync-rs/src/operations/withdraw.rs b/sdk/zksync-rs/src/operations/withdraw.rs index 0037deacd63..a580a0c35e0 100644 --- a/sdk/zksync-rs/src/operations/withdraw.rs +++ b/sdk/zksync-rs/src/operations/withdraw.rs @@ -1,9 +1,7 @@ use zksync_eth_signer::EthereumSigner; - -use zksync_types::l2::L2Tx; use zksync_types::{ - fee::Fee, tokens::ETHEREUM_ADDRESS, transaction_request::PaymasterParams, web3::ethabi, - Address, Nonce, L2_ETH_TOKEN_ADDRESS, U256, + fee::Fee, l2::L2Tx, tokens::ETHEREUM_ADDRESS, transaction_request::PaymasterParams, + web3::ethabi, Address, Nonce, L2_ETH_TOKEN_ADDRESS, U256, }; use crate::{ diff --git a/sdk/zksync-rs/src/signer.rs b/sdk/zksync-rs/src/signer.rs index 0c92a354ce5..445c5172ff6 100644 --- a/sdk/zksync-rs/src/signer.rs +++ b/sdk/zksync-rs/src/signer.rs @@ -1,15 +1,12 @@ -// Built-in imports use std::fmt::Debug; -// Workspace uses + use zksync_eth_signer::{error::SignerError, EthereumSigner}; -use zksync_types::L2_ETH_TOKEN_ADDRESS; use zksync_types::{ fee::Fee, l2::L2Tx, transaction_request::PaymasterParams, Address, Eip712Domain, L2ChainId, - Nonce, PackedEthSignature, U256, + Nonce, PackedEthSignature, L2_ETH_TOKEN_ADDRESS, U256, }; -// Local imports -use crate::operations::create_transfer_calldata; -use crate::types::TransactionRequest; + +use crate::{operations::create_transfer_calldata, types::TransactionRequest}; fn signing_failed_error(err: impl ToString) -> SignerError { SignerError::SigningFailed(err.to_string()) diff --git a/sdk/zksync-rs/src/utils.rs b/sdk/zksync-rs/src/utils.rs index c97fe42d47e..5137dbca6db 100644 --- a/sdk/zksync-rs/src/utils.rs +++ b/sdk/zksync-rs/src/utils.rs @@ -1,7 +1,6 @@ use std::str::FromStr; use num::BigUint; - use zksync_types::{transaction_request::PaymasterParams, Address, U256}; use crate::web3::ethabi::{Contract, Token}; diff --git a/sdk/zksync-rs/src/wallet.rs b/sdk/zksync-rs/src/wallet.rs index 7d665b4f42e..dd2cdf14208 100644 --- a/sdk/zksync-rs/src/wallet.rs +++ b/sdk/zksync-rs/src/wallet.rs @@ -1,23 +1,22 @@ use zksync_eth_signer::EthereumSigner; -use zksync_types::transaction_request::CallRequest; use zksync_types::{ api::{BlockIdVariant, BlockNumber, TransactionRequest}, l2::L2Tx, tokens::ETHEREUM_ADDRESS, + transaction_request::CallRequest, Address, Bytes, Eip712Domain, U256, }; - use zksync_web3_decl::{ jsonrpsee::http_client::{HttpClient, HttpClientBuilder}, namespaces::{EthNamespaceClient, NetNamespaceClient, Web3NamespaceClient, ZksNamespaceClient}, }; -use crate::web3::contract::tokens::Tokenizable; use crate::{ error::ClientError, ethereum::{ierc20_contract, EthereumProvider}, operations::*, signer::Signer, + web3::contract::tokens::Tokenizable, }; #[derive(Debug)] From dd9b308be9b0a6e37aad75f6f54b98e30a2ae14e Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 7 Dec 2023 12:07:00 +0200 Subject: [PATCH 115/115] fix(job-processor): `max_attepts_reached` metric (#626) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ `max_attepts_reached` metric is now reported on job start rather failure. With this change metric will be reported not only if last attempt failed but also if it got stuck/stopped/etc. ## Why ❔ Reporting `max_attepts_reached` metric for all cases. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. - [x] Spellcheck has been run via `cargo spellcheck --cfg=./spellcheck/era.cfg --code 1`. --- core/lib/queued_job_processor/src/lib.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/core/lib/queued_job_processor/src/lib.rs b/core/lib/queued_job_processor/src/lib.rs index 2966fba49ca..49ec8b348ee 100644 --- a/core/lib/queued_job_processor/src/lib.rs +++ b/core/lib/queued_job_processor/src/lib.rs @@ -109,6 +109,16 @@ pub trait JobProcessor: Sync + Send { task: JoinHandle>, ) -> anyhow::Result<()> { let attempts = self.get_job_attempts(&job_id).await?; + let max_attempts = self.max_attempts(); + if attempts == max_attempts { + METRICS.max_attempts_reached[&(Self::SERVICE_NAME, format!("{job_id:?}"))].inc(); + tracing::error!( + "Max attempts ({max_attempts}) reached for {} job {:?}", + Self::SERVICE_NAME, + job_id, + ); + } + let result = loop { tracing::trace!( "Polling {} task with id {:?}. Is finished: {}", @@ -144,15 +154,6 @@ pub trait JobProcessor: Sync + Send { error_message ); - let max_attempts = self.max_attempts(); - if attempts == max_attempts { - METRICS.max_attempts_reached[&(Self::SERVICE_NAME, format!("{job_id:?}"))].inc(); - tracing::error!( - "Max attempts ({max_attempts}) reached for {} job {:?}", - Self::SERVICE_NAME, - job_id, - ); - } self.save_failure(job_id, started_at, error_message).await; Ok(()) }