From ebca913b1b0ed89b755c0ca6d170cf2613374861 Mon Sep 17 00:00:00 2001 From: ilitteri Date: Fri, 2 Feb 2024 21:36:17 -0300 Subject: [PATCH] Reintroduce L1BatchCommitDataGenerator trait --- core/bin/external_node/src/main.rs | 3 +- .../src/i_executor/methods/commit_batches.rs | 13 ++- .../structures/commit_batch_info.rs | 65 ++------------- .../src/l1_batch_commit_data_generator.rs | 80 +++++++++++++++++++ core/lib/types/src/lib.rs | 2 + .../src/consistency_checker/mod.rs | 11 +-- .../src/consistency_checker/tests/mod.rs | 27 ++++--- .../zksync_core/src/eth_sender/aggregator.rs | 19 ++--- .../src/eth_sender/publish_criterion.rs | 17 ++-- core/lib/zksync_core/src/eth_sender/tests.rs | 32 ++++---- core/lib/zksync_core/src/lib.rs | 19 ++++- 11 files changed, 168 insertions(+), 120 deletions(-) create mode 100644 core/lib/types/src/l1_batch_commit_data_generator.rs diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index ee15d1a63ca..d298e7ed05e 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -36,6 +36,7 @@ use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; use zksync_health_check::CheckHealth; use zksync_state::PostgresStorageCaches; use zksync_storage::RocksDB; +use zksync_types::l1_batch_commit_data_generator::RollupModeL1BatchCommitDataGenerator; use zksync_utils::wait_for_tasks::wait_for_tasks; mod config; @@ -267,7 +268,7 @@ async fn init_tasks( .context("failed to build a tree_pool")?; let tree_handle = task::spawn(metadata_calculator.run(tree_pool, tree_stop_receiver)); - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let consistency_checker_handle = tokio::spawn( consistency_checker.run(stop_receiver.clone(), l1_batch_commit_data_generator), diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs index 43a8152499f..eee6a83710f 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -1,5 +1,9 @@ -use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; -use zksync_types::{commitment::L1BatchWithMetadata, ethabi::Token}; +use std::sync::Arc; + +use zksync_types::{ + commitment::L1BatchWithMetadata, ethabi::Token, + l1_batch_commit_data_generator::L1BatchCommitDataGenerator, +}; use crate::{ i_executor::structures::{CommitBatchInfo, StoredBatchInfo}, @@ -11,7 +15,7 @@ use crate::{ pub struct CommitBatches { pub last_committed_l1_batch: L1BatchWithMetadata, pub l1_batches: Vec, - pub l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + pub l1_batch_commit_data_generator: Arc, } impl Tokenize for CommitBatches { @@ -21,7 +25,8 @@ impl Tokenize for CommitBatches { .l1_batches .iter() .map(|batch| { - CommitBatchInfo::new(batch, self.l1_batch_commit_data_generator).into_token() + CommitBatchInfo::new(batch, self.l1_batch_commit_data_generator.clone()) + .into_token() }) .collect(); diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index b711aa4fc12..8352c816a95 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -1,8 +1,9 @@ -use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; +use std::sync::Arc; + use zksync_types::{ commitment::L1BatchWithMetadata, ethabi::Token, - utils, + l1_batch_commit_data_generator::L1BatchCommitDataGenerator, web3::{contract::Error as Web3ContractError, error::Error as Web3ApiError}, U256, }; @@ -13,13 +14,13 @@ use crate::Tokenizable; #[derive(Debug)] pub struct CommitBatchInfo<'a> { pub l1_batch_with_metadata: &'a L1BatchWithMetadata, - pub l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + pub l1_batch_commit_data_generator: Arc, } impl<'a> CommitBatchInfo<'a> { pub fn new( l1_batch_with_metadata: &'a L1BatchWithMetadata, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> Self { Self { l1_batch_with_metadata, @@ -51,14 +52,8 @@ impl<'a> Tokenizable for CommitBatchInfo<'a> { { pre_boojum_into_token(self.l1_batch_with_metadata) } else { - match self.l1_batch_commit_data_generator { - L1BatchCommitDataGeneratorMode::Rollup => { - Token::Tuple(rollup_mode_l1_commit_data(self.l1_batch_with_metadata)) - } - L1BatchCommitDataGeneratorMode::Validium => { - Token::Tuple(validium_mode_l1_commit_data(self.l1_batch_with_metadata)) - } - } + self.l1_batch_commit_data_generator + .l1_commit_data(self.l1_batch_with_metadata) } } } @@ -93,49 +88,3 @@ fn pre_boojum_into_token<'a>(l1_batch_commit_with_metadata: &'a L1BatchWithMetad ), ]) } - -fn validium_mode_l1_commit_data<'a>(l1_batch_with_metadata: &'a L1BatchWithMetadata) -> Vec { - let header = &l1_batch_with_metadata.header; - let metadata = &l1_batch_with_metadata.metadata; - let commit_data = vec![ - // `batchNumber` - Token::Uint(U256::from(header.number.0)), - // `timestamp` - Token::Uint(U256::from(header.timestamp)), - // `indexRepeatedStorageChanges` - Token::Uint(U256::from(metadata.rollup_last_leaf_index)), - // `newStateRoot` - Token::FixedBytes(metadata.merkle_root_hash.as_bytes().to_vec()), - // `numberOfLayer1Txs` - Token::Uint(U256::from(header.l1_tx_count)), - // `priorityOperationsHash` - Token::FixedBytes(header.priority_ops_onchain_data_hash().as_bytes().to_vec()), - // `bootloaderHeapInitialContentsHash` - Token::FixedBytes( - metadata - .bootloader_initial_content_commitment - .unwrap() - .as_bytes() - .to_vec(), - ), - // `eventsQueueStateHash` - Token::FixedBytes( - metadata - .events_queue_commitment - .unwrap() - .as_bytes() - .to_vec(), - ), - // `systemLogs` - Token::Bytes(metadata.l2_l1_messages_compressed.clone()), - ]; - commit_data -} - -fn rollup_mode_l1_commit_data<'a>(l1_batch_with_metadata: &'a L1BatchWithMetadata) -> Vec { - let mut commit_data = validium_mode_l1_commit_data(l1_batch_with_metadata); - commit_data.push(Token::Bytes(utils::construct_pubdata( - l1_batch_with_metadata, - ))); - commit_data -} diff --git a/core/lib/types/src/l1_batch_commit_data_generator.rs b/core/lib/types/src/l1_batch_commit_data_generator.rs new file mode 100644 index 00000000000..2c2ef93b73c --- /dev/null +++ b/core/lib/types/src/l1_batch_commit_data_generator.rs @@ -0,0 +1,80 @@ +use zksync_basic_types::{ethabi::Token, U256}; + +use crate::{commitment::L1BatchWithMetadata, utils}; + +pub trait L1BatchCommitDataGenerator +where + Self: std::fmt::Debug + Send + Sync, +{ + fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token; + fn l1_commit_data_size(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> usize { + crate::ethabi::encode(&[Token::Array(vec![ + self.l1_commit_data(l1_batch_with_metadata) + ])]) + .len() + } +} + +#[derive(Debug, Clone)] +pub struct RollupModeL1BatchCommitDataGenerator {} + +#[derive(Debug, Clone)] +pub struct ValidiumModeL1BatchCommitDataGenerator {} + +impl L1BatchCommitDataGenerator for RollupModeL1BatchCommitDataGenerator { + fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token { + Token::Tuple(rollup_mode_l1_commit_data(l1_batch_with_metadata)) + } +} + +impl L1BatchCommitDataGenerator for ValidiumModeL1BatchCommitDataGenerator { + fn l1_commit_data(&self, l1_batch_with_metadata: &L1BatchWithMetadata) -> Token { + Token::Tuple(validium_mode_l1_commit_data(l1_batch_with_metadata)) + } +} + +fn validium_mode_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec { + let header = &l1_batch_with_metadata.header; + let metadata = &l1_batch_with_metadata.metadata; + let commit_data = vec![ + // `batchNumber` + Token::Uint(U256::from(header.number.0)), + // `timestamp` + Token::Uint(U256::from(header.timestamp)), + // `indexRepeatedStorageChanges` + Token::Uint(U256::from(metadata.rollup_last_leaf_index)), + // `newStateRoot` + Token::FixedBytes(metadata.merkle_root_hash.as_bytes().to_vec()), + // `numberOfLayer1Txs` + Token::Uint(U256::from(header.l1_tx_count)), + // `priorityOperationsHash` + Token::FixedBytes(header.priority_ops_onchain_data_hash().as_bytes().to_vec()), + // `bootloaderHeapInitialContentsHash` + Token::FixedBytes( + metadata + .bootloader_initial_content_commitment + .unwrap() + .as_bytes() + .to_vec(), + ), + // `eventsQueueStateHash` + Token::FixedBytes( + metadata + .events_queue_commitment + .unwrap() + .as_bytes() + .to_vec(), + ), + // `systemLogs` + Token::Bytes(metadata.l2_l1_messages_compressed.clone()), + ]; + commit_data +} + +fn rollup_mode_l1_commit_data(l1_batch_with_metadata: &L1BatchWithMetadata) -> Vec { + let mut commit_data = validium_mode_l1_commit_data(l1_batch_with_metadata); + commit_data.push(Token::Bytes(utils::construct_pubdata( + l1_batch_with_metadata, + ))); + commit_data +} diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 27cffb360a3..775ea2cd89c 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -55,6 +55,8 @@ pub mod transaction_request; pub mod utils; pub mod vm_version; +pub mod l1_batch_commit_data_generator; + /// Denotes the first byte of the special zkSync's EIP-712-signed transaction. pub const EIP_712_TX_TYPE: u8 = 0x71; diff --git a/core/lib/zksync_core/src/consistency_checker/mod.rs b/core/lib/zksync_core/src/consistency_checker/mod.rs index 08d53d63fe4..657ab9e20b5 100644 --- a/core/lib/zksync_core/src/consistency_checker/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/mod.rs @@ -1,13 +1,14 @@ -use std::{fmt, time::Duration}; +use std::{fmt, sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::sync::watch; -use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; use zksync_contracts::PRE_BOOJUM_COMMIT_FUNCTION; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_eth_client::{clients::QueryClient, Error as L1ClientError, EthInterface}; use zksync_l1_contract_interface::{i_executor::structures::CommitBatchInfo, Tokenizable}; -use zksync_types::{web3::ethabi, L1BatchNumber, H256}; +use zksync_types::{ + l1_batch_commit_data_generator::L1BatchCommitDataGenerator, web3::ethabi, L1BatchNumber, H256, +}; use crate::{ metrics::{CheckerComponent, EN_METRICS}, @@ -68,7 +69,7 @@ impl LocalL1BatchCommitData { async fn new( storage: &mut StorageProcessor<'_>, batch_number: L1BatchNumber, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> anyhow::Result> { let Some(storage_l1_batch) = storage .blocks_dal() @@ -254,7 +255,7 @@ impl ConsistencyChecker { pub async fn run( mut self, mut stop_receiver: watch::Receiver, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> anyhow::Result<()> { // It doesn't make sense to start the checker until we have at least one L1 batch with metadata. let earliest_l1_batch_number = diff --git a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs index 03f11815932..f56fdcfe053 100644 --- a/core/lib/zksync_core/src/consistency_checker/tests/mod.rs +++ b/core/lib/zksync_core/src/consistency_checker/tests/mod.rs @@ -5,13 +5,13 @@ use std::{collections::HashMap, slice}; use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use tokio::sync::mpsc; -use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; use zksync_dal::StorageProcessor; use zksync_eth_client::clients::MockEthereum; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, - web3::contract::Options, L2ChainId, ProtocolVersion, ProtocolVersionId, H256, + l1_batch_commit_data_generator::RollupModeL1BatchCommitDataGenerator, web3::contract::Options, + L2ChainId, ProtocolVersion, ProtocolVersionId, H256, }; use super::*; @@ -45,11 +45,11 @@ fn create_pre_boojum_l1_batch_with_metadata(number: u32) -> L1BatchWithMetadata fn build_commit_tx_input_data( batches: &[L1BatchWithMetadata], - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> Vec { - let commit_tokens = batches - .iter() - .map(|batch| CommitBatchInfo::new(batch, l1_batch_commit_data_generator).into_token()); + let commit_tokens = batches.iter().map(|batch| { + CommitBatchInfo::new(batch, l1_batch_commit_data_generator.clone()).into_token() + }); let commit_tokens = ethabi::Token::Array(commit_tokens.collect()); let mut encoded = vec![]; @@ -89,7 +89,7 @@ fn build_commit_tx_input_data_is_correct() { create_l1_batch_with_metadata(1), create_l1_batch_with_metadata(2), ]; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let commit_tx_input_data = build_commit_tx_input_data(&batches, l1_batch_commit_data_generator.clone()); @@ -103,7 +103,8 @@ fn build_commit_tx_input_data_is_correct() { .unwrap(); assert_eq!( commit_data, - CommitBatchInfo::new(batch, l1_batch_commit_data_generator).into_token() + CommitBatchInfo::new(batch, l1_batch_commit_data_generator.clone().clone()) + .into_token() ); } } @@ -309,7 +310,7 @@ async fn normal_checker_function( let mut commit_tx_hash_by_l1_batch = HashMap::with_capacity(l1_batches.len()); let client = MockEthereum::default(); - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); for (i, l1_batches) in l1_batches.chunks(batches_per_transaction).enumerate() { let input_data = build_commit_tx_input_data(l1_batches, l1_batch_commit_data_generator.clone()); @@ -390,7 +391,7 @@ async fn checker_processes_pre_boojum_batches( let mut commit_tx_hash_by_l1_batch = HashMap::with_capacity(l1_batches.len()); let client = MockEthereum::default(); - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); for (i, l1_batch) in l1_batches.iter().enumerate() { let input_data = build_commit_tx_input_data( slice::from_ref(l1_batch), @@ -452,7 +453,7 @@ async fn checker_functions_after_snapshot_recovery(delay_batch_insertion: bool) let l1_batch = create_l1_batch_with_metadata(99); - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let commit_tx_input_data = build_commit_tx_input_data( slice::from_ref(&l1_batch), @@ -535,7 +536,7 @@ impl IncorrectDataKind { self, client: &MockEthereum, l1_batch: &L1BatchWithMetadata, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> H256 { let (commit_tx_input_data, successful_status) = match self { Self::MissingStatus => { @@ -614,7 +615,7 @@ async fn checker_detects_incorrect_tx_data(kind: IncorrectDataKind, snapshot_rec } let l1_batch = create_l1_batch_with_metadata(if snapshot_recovery { 99 } else { 1 }); - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let client = MockEthereum::default(); let commit_tx_hash = kind .apply(&client, &l1_batch, l1_batch_commit_data_generator.clone()) diff --git a/core/lib/zksync_core/src/eth_sender/aggregator.rs b/core/lib/zksync_core/src/eth_sender/aggregator.rs index a7c6388fa41..eca0a87c353 100644 --- a/core/lib/zksync_core/src/eth_sender/aggregator.rs +++ b/core/lib/zksync_core/src/eth_sender/aggregator.rs @@ -1,9 +1,6 @@ use std::sync::Arc; -use zksync_config::configs::{ - chain::L1BatchCommitDataGeneratorMode, - eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig}, -}; +use zksync_config::configs::eth_sender::{ProofLoadingMode, ProofSendingMode, SenderConfig}; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; use zksync_l1_contract_interface::i_executor::methods::{ @@ -13,8 +10,8 @@ use zksync_object_store::{ObjectStore, ObjectStoreError}; use zksync_prover_interface::outputs::L1BatchProofForL1; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, - helpers::unix_timestamp_ms, protocol_version::L1VerifierConfig, L1BatchNumber, - ProtocolVersionId, + helpers::unix_timestamp_ms, l1_batch_commit_data_generator::L1BatchCommitDataGenerator, + protocol_version::L1VerifierConfig, L1BatchNumber, ProtocolVersionId, }; use super::{ @@ -32,14 +29,14 @@ pub struct Aggregator { execute_criteria: Vec>, config: SenderConfig, blob_store: Arc, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, } impl Aggregator { pub fn new( config: SenderConfig, blob_store: Arc, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> Self { Self { commit_criteria: vec![ @@ -234,7 +231,7 @@ impl Aggregator { batches.map(|batches| CommitBatches { last_committed_l1_batch, l1_batches: batches, - l1_batch_commit_data_generator: self.l1_batch_commit_data_generator, + l1_batch_commit_data_generator: self.l1_batch_commit_data_generator.clone(), }) } @@ -324,7 +321,7 @@ impl Aggregator { storage: &mut StorageProcessor<'_>, ready_for_proof_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> Option { let batches = extract_ready_subrange( storage, @@ -418,7 +415,7 @@ async fn extract_ready_subrange( publish_criteria: &mut [Box], unpublished_l1_batches: Vec, last_sealed_l1_batch: L1BatchNumber, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> Option> { let mut last_l1_batch: Option = None; for criterion in publish_criteria { diff --git a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs index c8b261cfd9c..b1f070d9133 100644 --- a/core/lib/zksync_core/src/eth_sender/publish_criterion.rs +++ b/core/lib/zksync_core/src/eth_sender/publish_criterion.rs @@ -1,13 +1,12 @@ -use std::fmt; +use std::{fmt, sync::Arc}; use async_trait::async_trait; use chrono::Utc; -use zksync_config::configs::chain::L1BatchCommitDataGeneratorMode; use zksync_dal::StorageProcessor; use zksync_l1_contract_interface::{i_executor::structures::CommitBatchInfo, Tokenizable}; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, ethabi, - L1BatchNumber, + l1_batch_commit_data_generator::L1BatchCommitDataGenerator, L1BatchNumber, }; use super::metrics::METRICS; @@ -25,7 +24,7 @@ pub trait L1BatchPublishCriterion: fmt::Debug + Send + Sync { storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], last_sealed_l1_batch: L1BatchNumber, - _l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + _l1_batch_commit_data_generator: Arc, ) -> Option; } @@ -47,7 +46,7 @@ impl L1BatchPublishCriterion for NumberCriterion { _storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, - _l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + _l1_batch_commit_data_generator: Arc, ) -> Option { let mut batch_numbers = consecutive_l1_batches .iter() @@ -94,7 +93,7 @@ impl L1BatchPublishCriterion for TimestampDeadlineCriterion { _storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], last_sealed_l1_batch: L1BatchNumber, - _l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + _l1_batch_commit_data_generator: Arc, ) -> Option { let first_l1_batch = consecutive_l1_batches.iter().next()?; let last_l1_batch_number = consecutive_l1_batches.iter().last()?.header.number.0; @@ -159,7 +158,7 @@ impl L1BatchPublishCriterion for GasCriterion { storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, - _l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + _l1_batch_commit_data_generator: Arc, ) -> Option { let base_cost = agg_l1_batch_base_cost(self.op); assert!( @@ -217,7 +216,7 @@ impl L1BatchPublishCriterion for DataSizeCriterion { _storage: &mut StorageProcessor<'_>, consecutive_l1_batches: &[L1BatchWithMetadata], _last_sealed_l1_batch: L1BatchNumber, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> Option { const STORED_BLOCK_INFO_SIZE: usize = 96; // size of `StoredBlockInfo` solidity struct let mut data_size_left = self.data_limit - STORED_BLOCK_INFO_SIZE; @@ -227,7 +226,7 @@ impl L1BatchPublishCriterion for DataSizeCriterion { let l1_commit_data_size = ethabi::encode(&[ethabi::Token::Array(vec![CommitBatchInfo::new( l1_batch, - l1_batch_commit_data_generator, + l1_batch_commit_data_generator.clone(), ) .into_token()])]) .len(); diff --git a/core/lib/zksync_core/src/eth_sender/tests.rs b/core/lib/zksync_core/src/eth_sender/tests.rs index 79ffed85f79..0784752afff 100644 --- a/core/lib/zksync_core/src/eth_sender/tests.rs +++ b/core/lib/zksync_core/src/eth_sender/tests.rs @@ -3,10 +3,7 @@ use std::sync::Arc; use assert_matches::assert_matches; use once_cell::sync::Lazy; use zksync_config::{ - configs::{ - chain::L1BatchCommitDataGeneratorMode, - eth_sender::{ProofSendingMode, SenderConfig}, - }, + configs::eth_sender::{ProofSendingMode, SenderConfig}, ContractsConfig, ETHSenderConfig, GasAdjusterConfig, }; use zksync_dal::{ConnectionPool, StorageProcessor}; @@ -20,6 +17,9 @@ use zksync_types::{ commitment::{L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata}, ethabi::Token, helpers::unix_timestamp_ms, + l1_batch_commit_data_generator::{ + L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, + }, web3::contract::Error, Address, L1BatchNumber, L1BlockNumber, ProtocolVersionId, H256, }; @@ -63,7 +63,7 @@ impl EthSenderTester { connection_pool: ConnectionPool, history: Vec, non_ordering_confirmations: bool, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> Self { let eth_sender_config = ETHSenderConfig::for_tests(); let contracts_config = ContractsConfig::for_tests(); @@ -152,7 +152,7 @@ impl EthSenderTester { #[tokio::test] async fn confirm_many() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool, vec![10; 100], @@ -235,7 +235,7 @@ async fn confirm_many() -> anyhow::Result<()> { #[tokio::test] async fn resend_each_block() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool, vec![7, 6, 5, 5, 5, 2, 1], @@ -353,7 +353,7 @@ async fn resend_each_block() -> anyhow::Result<()> { #[tokio::test] async fn dont_resend_already_mined() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -431,7 +431,7 @@ async fn dont_resend_already_mined() -> anyhow::Result<()> { #[tokio::test] async fn three_scenarios() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool.clone(), vec![100; 100], @@ -510,7 +510,7 @@ async fn three_scenarios() -> anyhow::Result<()> { #[tokio::test] async fn failed_eth_tx() { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool.clone(), vec![100; 100], @@ -589,7 +589,7 @@ fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { #[tokio::test] async fn correct_order_for_confirmations() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -659,7 +659,7 @@ async fn correct_order_for_confirmations() -> anyhow::Result<()> { #[tokio::test] async fn skipped_l1_batch_at_the_start() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -763,7 +763,7 @@ async fn skipped_l1_batch_at_the_start() -> anyhow::Result<()> { #[tokio::test] async fn skipped_l1_batch_in_the_middle() -> anyhow::Result<()> { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -861,7 +861,7 @@ async fn skipped_l1_batch_in_the_middle() -> anyhow::Result<()> { #[tokio::test] async fn test_parse_multicall_data() { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -948,7 +948,7 @@ async fn test_parse_multicall_data() { #[tokio::test] async fn get_multicall_data() { let connection_pool = ConnectionPool::test_pool().await; - let l1_batch_commit_data_generator = L1BatchCommitDataGeneratorMode::Rollup; + let l1_batch_commit_data_generator = Arc::new(RollupModeL1BatchCommitDataGenerator {}); let mut tester = EthSenderTester::new( connection_pool, vec![100; 100], @@ -1026,7 +1026,7 @@ async fn commit_l1_batch( last_committed_l1_batch: L1BatchHeader, l1_batch: L1BatchHeader, confirm: bool, - l1_batch_commit_data_generator: L1BatchCommitDataGeneratorMode, + l1_batch_commit_data_generator: Arc, ) -> H256 { let operation = AggregatedOperation::Commit(CommitBatches { last_committed_l1_batch: l1_batch_with_metadata(last_committed_l1_batch), diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index cf2df943f58..e6119d48ae0 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -17,8 +17,8 @@ use zksync_config::{ configs::{ api::{MerkleTreeApiConfig, Web3JsonRpcConfig}, chain::{ - CircuitBreakerConfig, MempoolConfig, NetworkConfig, OperationsManagerConfig, - StateKeeperConfig, + CircuitBreakerConfig, L1BatchCommitDataGeneratorMode, MempoolConfig, NetworkConfig, + OperationsManagerConfig, StateKeeperConfig, }, contracts::ProverAtGenesis, database::{MerkleTreeConfig, MerkleTreeMode}, @@ -37,6 +37,10 @@ use zksync_queued_job_processor::JobProcessor; use zksync_state::PostgresStorageCaches; use zksync_types::{ fee_model::FeeModelConfig, + l1_batch_commit_data_generator::{ + L1BatchCommitDataGenerator, RollupModeL1BatchCommitDataGenerator, + ValidiumModeL1BatchCommitDataGenerator, + }, protocol_version::{L1VerifierConfig, VerifierParams}, system_contracts::get_system_smart_contracts, web3::contract::tokens::Detokenize, @@ -591,12 +595,21 @@ pub async fn initialize_components( .state_keeper_config .clone() .context("state_keeper_config")?; + let l1_batch_commit_data_generator: Arc = + match state_keeper_config.l1_batch_commit_data_generator_mode { + L1BatchCommitDataGeneratorMode::Rollup => { + Arc::new(RollupModeL1BatchCommitDataGenerator {}) + } + L1BatchCommitDataGeneratorMode::Validium => { + Arc::new(ValidiumModeL1BatchCommitDataGenerator {}) + } + }; let eth_tx_aggregator_actor = EthTxAggregator::new( eth_sender.sender.clone(), Aggregator::new( eth_sender.sender.clone(), store_factory.create_store().await, - state_keeper_config.l1_batch_commit_data_generator_mode, + l1_batch_commit_data_generator, ), Arc::new(eth_client), contracts_config.validator_timelock_addr,