From 9127d0e0ff0f8de7b52867297808fe5035c76d10 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 7 Jan 2025 14:36:02 +0000 Subject: [PATCH 001/113] feat(root): compare trie updates of state root task with regular root (#13704) --- crates/engine/tree/src/tree/mod.rs | 21 +- crates/engine/tree/src/tree/trie_updates.rs | 208 ++++++++++++++++++++ 2 files changed, 226 insertions(+), 3 deletions(-) create mode 100644 crates/engine/tree/src/tree/trie_updates.rs diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 5b65e49f39233..fea93c8a53481 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -27,6 +27,7 @@ use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; +pub use reth_engine_primitives::InvalidBlockHook; use reth_engine_primitives::{ BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, @@ -82,12 +83,13 @@ pub mod config; mod invalid_block_hook; mod metrics; mod persistence_state; +pub mod root; +mod trie_updates; + pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; -pub use reth_engine_primitives::InvalidBlockHook; - -pub mod root; +use trie_updates::compare_trie_updates; /// Keeps track of the state of the tree. /// @@ -2352,6 +2354,19 @@ where task_elapsed = ?time_from_last_update, "Task state root finished" ); + + if task_state_root != block.header().state_root() { + debug!(target: "engine::tree", "Task state root does not match block state root"); + let (regular_root, regular_updates) = + state_provider.state_root_with_updates(hashed_state.clone())?; + + if regular_root == block.header().state_root() { + compare_trie_updates(&task_trie_updates, ®ular_updates); + } else { + debug!(target: "engine::tree", "Regular state root does not match block state root"); + } + } + (task_state_root, task_trie_updates, time_from_last_update) } Err(error) => { diff --git a/crates/engine/tree/src/tree/trie_updates.rs b/crates/engine/tree/src/tree/trie_updates.rs new file mode 100644 index 0000000000000..ea78aca13b87a --- /dev/null +++ b/crates/engine/tree/src/tree/trie_updates.rs @@ -0,0 +1,208 @@ +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; +use reth_trie::{ + updates::{StorageTrieUpdates, TrieUpdates}, + BranchNodeCompact, Nibbles, +}; +use tracing::debug; + +#[derive(Debug, Default)] +struct TrieUpdatesDiff { + account_nodes: HashMap, Option)>, + removed_nodes: HashMap, + storage_tries: HashMap, +} + +impl TrieUpdatesDiff { + fn has_differences(&self) -> bool { + !self.account_nodes.is_empty() || + !self.removed_nodes.is_empty() || + !self.storage_tries.is_empty() + } + + pub(super) fn log_differences(mut self) { + if self.has_differences() { + for (path, (task, regular)) in &mut self.account_nodes { + debug!(target: "engine::tree", ?path, ?task, ?regular, "Difference in account trie updates"); + } + + for (path, (task, regular)) in &self.removed_nodes { + debug!(target: "engine::tree", ?path, ?task, ?regular, "Difference in removed account trie nodes"); + } + + for (address, storage_diff) in self.storage_tries { + storage_diff.log_differences(address); + } + } + } +} + +#[derive(Debug)] +enum StorageTrieDiffEntry { + /// Storage Trie entry exists for one of the task or regular trie updates, but not the other. + Existence(bool, bool), + /// Storage Trie entries exists for both task and regular trie updates, but their values + /// differ. + Value(StorageTrieUpdatesDiff), +} + +impl StorageTrieDiffEntry { + fn log_differences(self, address: B256) { + match self { + Self::Existence(task, regular) => { + debug!(target: "engine::tree", ?address, ?task, ?regular, "Difference in storage trie existence"); + } + Self::Value(mut storage_diff) => { + if let Some((task, regular)) = storage_diff.is_deleted { + debug!(target: "engine::tree", ?address, ?task, ?regular, "Difference in storage trie deletion"); + } + + for (path, (task, regular)) in &mut storage_diff.storage_nodes { + debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, "Difference in storage trie updates"); + } + + for (path, (task, regular)) in &storage_diff.removed_nodes { + debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, "Difference in removed account trie nodes"); + } + } + } + } +} + +#[derive(Debug, Default)] +struct StorageTrieUpdatesDiff { + is_deleted: Option<(bool, bool)>, + storage_nodes: HashMap, Option)>, + removed_nodes: HashMap, +} + +impl StorageTrieUpdatesDiff { + fn has_differences(&self) -> bool { + self.is_deleted.is_some() || + !self.storage_nodes.is_empty() || + !self.removed_nodes.is_empty() + } +} + +/// Compares the trie updates from state root task and regular state root calculation, and logs +/// the differences if there's any. +pub(super) fn compare_trie_updates(task: &TrieUpdates, regular: &TrieUpdates) { + let mut diff = TrieUpdatesDiff::default(); + + // compare account nodes + for key in task + .account_nodes + .keys() + .chain(regular.account_nodes.keys()) + .cloned() + .collect::>() + { + let (left, right) = (task.account_nodes.get(&key), regular.account_nodes.get(&key)); + + if !branch_nodes_equal(left, right) { + diff.account_nodes.insert(key, (left.cloned(), right.cloned())); + } + } + + // compare removed nodes + for key in task + .removed_nodes + .iter() + .chain(regular.removed_nodes.iter()) + .cloned() + .collect::>() + { + let (left, right) = + (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); + if left != right { + diff.removed_nodes.insert(key, (left, right)); + } + } + + // compare storage tries + for key in task + .storage_tries + .keys() + .chain(regular.storage_tries.keys()) + .copied() + .collect::>() + { + let (left, right) = (task.storage_tries.get(&key), regular.storage_tries.get(&key)); + if left != right { + if let Some((left, right)) = left.zip(right) { + let storage_diff = compare_storage_trie_updates(left, right); + if storage_diff.has_differences() { + diff.storage_tries.insert(key, StorageTrieDiffEntry::Value(storage_diff)); + } + } else { + diff.storage_tries + .insert(key, StorageTrieDiffEntry::Existence(left.is_some(), right.is_some())); + } + } + } + + // log differences + diff.log_differences(); +} + +fn compare_storage_trie_updates( + task: &StorageTrieUpdates, + regular: &StorageTrieUpdates, +) -> StorageTrieUpdatesDiff { + let mut diff = StorageTrieUpdatesDiff { + is_deleted: (task.is_deleted != regular.is_deleted) + .then_some((task.is_deleted, regular.is_deleted)), + ..Default::default() + }; + + // compare storage nodes + for key in task + .storage_nodes + .keys() + .chain(regular.storage_nodes.keys()) + .cloned() + .collect::>() + { + let (left, right) = (task.storage_nodes.get(&key), regular.storage_nodes.get(&key)); + if !branch_nodes_equal(left, right) { + diff.storage_nodes.insert(key, (left.cloned(), right.cloned())); + } + } + + // compare removed nodes + for key in task + .removed_nodes + .iter() + .chain(regular.removed_nodes.iter()) + .cloned() + .collect::>() + { + let (left, right) = + (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); + if left != right { + diff.removed_nodes.insert(key, (left, right)); + } + } + + diff +} + +/// Compares the branch nodes from state root task and regular state root calculation. +/// +/// Returns `true` if they are equal. +fn branch_nodes_equal( + task: Option<&BranchNodeCompact>, + regular: Option<&BranchNodeCompact>, +) -> bool { + if let (Some(task), Some(regular)) = (task.as_ref(), regular.as_ref()) { + task.state_mask == regular.state_mask && + // We do not compare the tree mask because it is known to be mismatching + task.hash_mask == regular.hash_mask && + task.hashes == regular.hashes && + task.root_hash == regular.root_hash + } else { + task == regular + } +} From b2c00418dfe1734cc44b68818287d20e88b608c1 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 Jan 2025 16:52:50 +0100 Subject: [PATCH 002/113] fix(ci): Enable wasm check for `reth-optimism-primitives` (#13693) Co-authored-by: Matthias Seitz --- .github/assets/check_wasm.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 738b932ef4b4e..2c6b97101b379 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -45,7 +45,6 @@ exclude_crates=( reth-optimism-node reth-optimism-payload-builder reth-optimism-rpc - reth-optimism-primitives reth-rpc reth-rpc-api reth-rpc-api-testing-util From f7f201215631ad05774b8a9e97b704ef27a01718 Mon Sep 17 00:00:00 2001 From: Tin Chung <56880684+chungquantin@users.noreply.github.com> Date: Tue, 7 Jan 2025 23:11:42 +0700 Subject: [PATCH 003/113] refactor: `Consensus` trait error type (#13655) --- bin/reth/src/commands/debug_cmd/build_block.rs | 4 ++-- bin/reth/src/commands/debug_cmd/execution.rs | 5 +++-- bin/reth/src/commands/debug_cmd/merkle.rs | 4 ++-- .../src/commands/debug_cmd/replay_engine.rs | 4 ++-- crates/blockchain-tree/src/externals.rs | 6 +++--- crates/cli/commands/src/import.rs | 4 ++-- .../consensus/beacon/src/engine/test_utils.rs | 15 ++++++++------- crates/consensus/consensus/src/lib.rs | 12 +++++++----- crates/consensus/consensus/src/noop.rs | 11 +++++------ crates/consensus/consensus/src/test_utils.rs | 9 ++++----- crates/engine/local/src/service.rs | 4 ++-- crates/engine/service/src/service.rs | 4 ++-- crates/engine/tree/src/download.rs | 4 ++-- crates/engine/tree/src/tree/mod.rs | 6 +++--- crates/ethereum/consensus/src/lib.rs | 9 ++++----- crates/ethereum/node/src/node.rs | 3 ++- crates/net/downloaders/src/bodies/bodies.rs | 6 +++--- crates/net/downloaders/src/bodies/queue.rs | 4 ++-- crates/net/downloaders/src/bodies/request.rs | 6 +++--- crates/net/downloaders/src/bodies/task.rs | 4 ++-- crates/net/p2p/src/full_block.rs | 10 +++++----- crates/net/p2p/src/test_utils/headers.rs | 8 ++++++-- crates/node/api/src/node.rs | 7 +++++-- crates/node/builder/src/components/builder.rs | 7 +++++-- crates/node/builder/src/components/consensus.rs | 5 +++-- crates/node/builder/src/components/mod.rs | 12 +++++++++--- crates/node/builder/src/setup.rs | 6 +++--- crates/node/core/src/utils.rs | 4 ++-- crates/optimism/consensus/src/lib.rs | 2 ++ crates/rpc/rpc-builder/src/lib.rs | 17 ++++++++++------- crates/rpc/rpc/src/validation.rs | 4 ++-- crates/stages/stages/src/lib.rs | 4 ++-- crates/stages/stages/src/sets.rs | 10 +++++----- 33 files changed, 122 insertions(+), 98 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index edf85b3c58989..1e95c1e33c708 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -21,7 +21,7 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use reth_cli_runner::CliContext; use reth_consensus::{Consensus, FullConsensus}; -use reth_errors::RethResult; +use reth_errors::{ConsensusError, RethResult}; use reth_ethereum_payload_builder::EthereumBuilderConfig; use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; @@ -128,7 +128,7 @@ impl> Command { ) -> eyre::Result<()> { let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index e25bb6afff669..a7af54d573b7e 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -18,6 +18,7 @@ use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_errors::ConsensusError; use reth_exex::ExExManagerHandle; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; @@ -64,7 +65,7 @@ impl> Command { &self, config: &Config, client: Client, - consensus: Arc, + consensus: Arc>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, static_file_producer: StaticFileProducer>, @@ -172,7 +173,7 @@ impl> Command { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); // Configure and build network diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 59fe2bafaf6cf..bb79068bd5479 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -10,7 +10,7 @@ use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, Environ use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::tables; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_evm::execute::{BatchExecutor, BlockExecutorProvider}; @@ -129,7 +129,7 @@ impl> Command { info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); // build the full block client - let consensus: Arc = + let consensus: Arc> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let block_range_client = FullBlockClient::new(fetch_client, consensus); diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs index 3d17ea456526c..80d60cfb39bea 100644 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ b/bin/reth/src/commands/debug_cmd/replay_engine.rs @@ -12,7 +12,7 @@ use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, Environ use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; use reth_config::Config; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_db::DatabaseEnv; use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; use reth_ethereum_payload_builder::EthereumBuilderConfig; @@ -97,7 +97,7 @@ impl> Command { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let consensus: Arc = + let consensus: Arc> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index 9e72008e838f9..ad22417a91d7a 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -1,7 +1,7 @@ //! Blockchain tree externals. use alloy_primitives::{BlockHash, BlockNumber}; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_db::{static_file::BlockHashMask, tables}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_node_types::NodeTypesWithDB; @@ -28,7 +28,7 @@ pub struct TreeExternals { /// The provider factory, used to commit the canonical chain, or unwind it. pub(crate) provider_factory: ProviderFactory, /// The consensus engine. - pub(crate) consensus: Arc, + pub(crate) consensus: Arc>, /// The executor factory to execute blocks with. pub(crate) executor_factory: E, } @@ -37,7 +37,7 @@ impl TreeExternals { /// Create new tree externals. pub fn new( provider_factory: ProviderFactory, - consensus: Arc, + consensus: Arc>, executor_factory: E, ) -> Self { Self { provider_factory, consensus, executor_factory } diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index adb973815731d..a73322a903f4d 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -7,7 +7,7 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_config::Config; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_db::tables; use reth_db_api::transaction::DbTx; use reth_downloaders::{ @@ -169,7 +169,7 @@ pub fn build_import_pipeline( ) -> eyre::Result<(Pipeline, impl Stream>)> where N: ProviderNodeTypes + CliNodeTypes, - C: Consensus, BodyTy> + 'static, + C: Consensus, BodyTy, Error = ConsensusError> + 'static, E: BlockExecutorProvider, { if !file_client.has_canonical_blocks() { diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs index ae627cae6961c..56de724aded22 100644 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ b/crates/consensus/beacon/src/engine/test_utils.rs @@ -13,7 +13,7 @@ use reth_blockchain_tree::{ }; use reth_chainspec::ChainSpec; use reth_config::config::StageConfig; -use reth_consensus::{test_utils::TestConsensus, FullConsensus}; +use reth_consensus::{test_utils::TestConsensus, ConsensusError, FullConsensus}; use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, @@ -332,12 +332,13 @@ where let provider_factory = create_test_provider_factory_with_chain_spec(self.base_config.chain_spec.clone()); - let consensus: Arc = match self.base_config.consensus { - TestConsensusConfig::Real => { - Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) - } - TestConsensusConfig::Test => Arc::new(TestConsensus::default()), - }; + let consensus: Arc> = + match self.base_config.consensus { + TestConsensusConfig::Real => { + Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) + } + TestConsensusConfig::Test => Arc::new(TestConsensus::default()), + }; let payload_builder = spawn_test_payload_service::(); // use either noop client or a user provided client (for example TestFullBlockClient) diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index ba1b1321e7768..1de99d8278f59 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -66,12 +66,15 @@ pub trait FullConsensus: /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] pub trait Consensus: AsHeaderValidator { + /// The error type related to consensus. + type Error; + /// Ensures that body field values match the header. fn validate_body_against_header( &self, body: &B, header: &SealedHeader, - ) -> Result<(), ConsensusError>; + ) -> Result<(), Self::Error>; /// Validate a block disregarding world state, i.e. things that can be checked before sender /// recovery and execution. @@ -82,8 +85,7 @@ pub trait Consensus: AsHeaderValidator { /// **This should not be called for the genesis block**. /// /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) - -> Result<(), ConsensusError>; + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), Self::Error>; } /// HeaderValidator is a protocol that validates headers and their relationships. @@ -170,13 +172,13 @@ impl, H> AsHeaderValidator for T { /// Helper trait to cast `Arc` to `Arc` pub trait AsConsensus: Consensus { /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] - fn as_consensus<'a>(self: Arc) -> Arc + 'a> + fn as_consensus<'a>(self: Arc) -> Arc + 'a> where Self: 'a; } impl, H, B> AsConsensus for T { - fn as_consensus<'a>(self: Arc) -> Arc + 'a> + fn as_consensus<'a>(self: Arc) -> Arc + 'a> where Self: 'a, { diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index c56e9867a2560..ea269c07dada9 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -30,18 +30,17 @@ impl HeaderValidator for NoopConsensus { } impl Consensus for NoopConsensus { + type Error = ConsensusError; + fn validate_body_against_header( &self, _body: &B, _header: &SealedHeader, - ) -> Result<(), ConsensusError> { + ) -> Result<(), Self::Error> { Ok(()) } - fn validate_block_pre_execution( - &self, - _block: &SealedBlock, - ) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { Ok(()) } } @@ -51,7 +50,7 @@ impl FullConsensus for NoopConsensus { &self, _block: &BlockWithSenders, _input: PostExecutionInput<'_, N::Receipt>, - ) -> Result<(), ConsensusError> { + ) -> Result<(), Self::Error> { Ok(()) } } diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 082c8ca8bb5a6..3f26222c4b905 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -61,11 +61,13 @@ impl FullConsensus for TestConsensus { } impl Consensus for TestConsensus { + type Error = ConsensusError; + fn validate_body_against_header( &self, _body: &B, _header: &SealedHeader, - ) -> Result<(), ConsensusError> { + ) -> Result<(), Self::Error> { if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) } else { @@ -73,10 +75,7 @@ impl Consensus for TestConsensus { } } - fn validate_block_pre_execution( - &self, - _block: &SealedBlock, - ) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 3c7bc72baed56..12c24bd6816af 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -18,7 +18,7 @@ use crate::miner::{LocalMiner, MiningMode}; use futures_util::{Stream, StreamExt}; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ @@ -64,7 +64,7 @@ where /// Constructor for [`LocalEngineService`]. #[allow(clippy::too_many_arguments)] pub fn new( - consensus: Arc>, + consensus: Arc>, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, blockchain_db: BlockchainProvider2, diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index aeaf364a8cdcf..5d60182b6e994 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -2,7 +2,7 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, @@ -69,7 +69,7 @@ where /// Constructor for `EngineService`. #[allow(clippy::too_many_arguments)] pub fn new( - consensus: Arc>, + consensus: Arc>, executor_factory: E, chain_spec: Arc, client: Client, diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 1e42e25477b1a..262c642f0a87f 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -4,7 +4,7 @@ use crate::{engine::DownloadRequest, metrics::BlockDownloaderMetrics}; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use futures::FutureExt; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, BlockClient, @@ -84,7 +84,7 @@ where /// Create a new instance pub fn new( client: Client, - consensus: Arc>, + consensus: Arc>, ) -> Self { Self { full_block_client: FullBlockClient::new(client, consensus), diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index fea93c8a53481..27e402bb44a93 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -496,7 +496,7 @@ where { provider: P, executor_provider: E, - consensus: Arc>, + consensus: Arc>, payload_validator: V, /// Keeps track of internals such as executed and buffered blocks. state: EngineApiTreeState, @@ -585,7 +585,7 @@ where pub fn new( provider: P, executor_provider: E, - consensus: Arc>, + consensus: Arc>, payload_validator: V, outgoing: UnboundedSender>, state: EngineApiTreeState, @@ -643,7 +643,7 @@ where pub fn spawn_new( provider: P, executor_provider: E, - consensus: Arc>, + consensus: Arc>, payload_validator: V, persistence: PersistenceHandle, payload_builder: PayloadBuilderHandle, diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index c1ba56b8c6242..c31be45e22103 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -116,18 +116,17 @@ where H: BlockHeader, B: BlockBody, { + type Error = ConsensusError; + fn validate_body_against_header( &self, body: &B, header: &SealedHeader, - ) -> Result<(), ConsensusError> { + ) -> Result<(), Self::Error> { validate_body_against_header(body, header.header()) } - fn validate_block_pre_execution( - &self, - block: &SealedBlock, - ) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), Self::Error> { validate_block_pre_execution(block, &self.chain_spec) } } diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index f1a1f56ec048d..f37e08ef553f1 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -3,6 +3,7 @@ use crate::{EthEngineTypes, EthEvmConfig}; use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, }; @@ -252,7 +253,7 @@ impl ConsensusBuilder for EthereumConsensusBuilder where Node: FullNodeTypes>, { - type Consensus = Arc; + type Consensus = Arc>; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 47a816f4ce6b3..454f6bffc562e 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -5,7 +5,7 @@ use alloy_primitives::BlockNumber; use futures::Stream; use futures_util::StreamExt; use reth_config::BodiesConfig; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ bodies::{ client::BodiesClient, @@ -39,7 +39,7 @@ pub struct BodiesDownloader { /// The bodies client client: Arc, /// The consensus client - consensus: Arc>, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -579,7 +579,7 @@ impl BodiesDownloaderBuilder { pub fn build( self, client: B, - consensus: Arc>, + consensus: Arc>, provider: Provider, ) -> BodiesDownloader where diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index 892eae14cbb1f..b9f63b143ac2a 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -4,7 +4,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::BlockNumber; use futures::{stream::FuturesUnordered, Stream}; use futures_util::StreamExt; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ bodies::{client::BodiesClient, response::BlockResponse}, error::DownloadResult, @@ -59,7 +59,7 @@ where pub(crate) fn push_new_request( &mut self, client: Arc, - consensus: Arc>, + consensus: Arc>, request: Vec>, ) { // Set last max requested block number diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index a3ad1f3b9dc2d..79b76f2dbf58d 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -2,7 +2,7 @@ use crate::metrics::{BodyDownloaderMetrics, ResponseMetrics}; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use futures::{Future, FutureExt}; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ bodies::{client::BodiesClient, response::BlockResponse}, error::{DownloadError, DownloadResult}, @@ -40,7 +40,7 @@ use std::{ /// and eventually disconnected. pub(crate) struct BodiesRequestFuture { client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. @@ -62,7 +62,7 @@ where /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( client: Arc, - consensus: Arc>, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 9377be78676c7..863c889532c3c 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -43,7 +43,7 @@ impl TaskDow /// # Example /// /// ``` - /// use reth_consensus::Consensus; + /// use reth_consensus::{Consensus, ConsensusError}; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_network_p2p::bodies::client::BodiesClient; /// use reth_primitives_traits::InMemorySize; @@ -55,7 +55,7 @@ impl TaskDow /// Provider: HeaderProvider
+ Unpin + 'static, /// >( /// client: Arc, - /// consensus: Arc>, + /// consensus: Arc>, /// provider: Provider, /// ) { /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 62981ad5d9ab5..fdee01ab99889 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -7,7 +7,7 @@ use crate::{ }; use alloy_consensus::BlockHeader; use alloy_primitives::{Sealable, B256}; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::WithPeerId; use reth_primitives::{SealedBlock, SealedHeader}; @@ -30,7 +30,7 @@ where Client: BlockClient, { client: Client, - consensus: Arc>, + consensus: Arc>, } impl FullBlockClient @@ -40,7 +40,7 @@ where /// Creates a new instance of `FullBlockClient`. pub fn new( client: Client, - consensus: Arc>, + consensus: Arc>, ) -> Self { Self { client, consensus } } @@ -118,7 +118,7 @@ where Client: BlockClient, { client: Client, - consensus: Arc>, + consensus: Arc>, hash: B256, request: FullBlockRequest, header: Option>, @@ -330,7 +330,7 @@ where /// The client used to fetch headers and bodies. client: Client, /// The consensus instance used to validate the blocks. - consensus: Arc>, + consensus: Arc>, /// The block hash to start fetching from (inclusive). start_hash: B256, /// How many blocks to fetch: `len([start_hash, ..]) == count` diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index 5809ad6bdd403..6e20b335a1078 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -12,7 +12,7 @@ use crate::{ }; use alloy_consensus::Header; use futures::{Future, FutureExt, Stream, StreamExt}; -use reth_consensus::{test_utils::TestConsensus, Consensus}; +use reth_consensus::{test_utils::TestConsensus, Consensus, ConsensusError}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::SealedHeader; @@ -147,7 +147,11 @@ impl Stream for TestDownload { let empty: SealedHeader = SealedHeader::default(); if let Err(error) = - >::validate_header_against_parent(&this.consensus, &empty, &empty) + >::validate_header_against_parent( + &this.consensus, + &empty, + &empty, + ) { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 8db75480d11a4..66c131581892c 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -3,7 +3,7 @@ use crate::ConfigureEvm; use alloy_rpc_types_engine::JwtSecret; use reth_beacon_consensus::BeaconConsensusEngineHandle; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, Database, @@ -58,7 +58,10 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives, Error = ConsensusError> + + Clone + + Unpin + + 'static; /// Network API. type Network: FullNetwork; diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index ce24c8bff8dfb..977381b6582b2 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -7,7 +7,7 @@ use crate::{ }, BuilderContext, ConfigureEvm, FullNodeTypes, }; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkPrimitives; use reth_node_api::{BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; @@ -402,7 +402,10 @@ where + 'static, EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider::Primitives>, - Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, + Cons: FullConsensus<::Primitives, Error = ConsensusError> + + Clone + + Unpin + + 'static, { type Components = Components; diff --git a/crates/node/builder/src/components/consensus.rs b/crates/node/builder/src/components/consensus.rs index 074080d337b15..0620b2507d2a9 100644 --- a/crates/node/builder/src/components/consensus.rs +++ b/crates/node/builder/src/components/consensus.rs @@ -1,4 +1,5 @@ //! Consensus component for the node builder. +use reth_consensus::{ConsensusError, FullConsensus}; use reth_node_api::NodeTypes; use crate::{BuilderContext, FullNodeTypes}; @@ -7,7 +8,7 @@ use std::future::Future; /// A type that knows how to build the consensus implementation. pub trait ConsensusBuilder: Send { /// The consensus implementation to build. - type Consensus: reth_consensus::FullConsensus<::Primitives> + type Consensus: FullConsensus<::Primitives, Error = ConsensusError> + Clone + Unpin + 'static; @@ -22,7 +23,7 @@ pub trait ConsensusBuilder: Send { impl ConsensusBuilder for F where Node: FullNodeTypes, - Consensus: reth_consensus::FullConsensus<::Primitives> + Consensus: FullConsensus<::Primitives, Error = ConsensusError> + Clone + Unpin + 'static, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index 892380a4c6ca9..c5ac67e5cbc79 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -23,7 +23,7 @@ pub use pool::*; use reth_network_p2p::BlockClient; use crate::{ConfigureEvm, FullNodeTypes}; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::execute::BlockExecutorProvider; use reth_network::{NetworkHandle, NetworkPrimitives}; use reth_network_api::FullNetwork; @@ -47,7 +47,10 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati type Executor: BlockExecutorProvider::Primitives>; /// The consensus type of the node. - type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; + type Consensus: FullConsensus<::Primitives, Error = ConsensusError> + + Clone + + Unpin + + 'static; /// Network API. type Network: FullNetwork< @@ -106,7 +109,10 @@ where + 'static, EVM: ConfigureEvm
, Transaction = TxTy>, Executor: BlockExecutorProvider::Primitives>, - Cons: FullConsensus<::Primitives> + Clone + Unpin + 'static, + Cons: FullConsensus<::Primitives, Error = ConsensusError> + + Clone + + Unpin + + 'static, { type Pool = Pool; type Evm = EVM; diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 62cfbac9bea8c..610ca7bbc7990 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use alloy_primitives::{BlockNumber, B256}; use reth_config::{config::StageConfig, PruneConfig}; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -27,7 +27,7 @@ use tokio::sync::watch; pub fn build_networked_pipeline( config: &StageConfig, client: Client, - consensus: Arc>, + consensus: Arc>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, @@ -75,7 +75,7 @@ pub fn build_pipeline( stage_config: &StageConfig, header_downloader: H, body_downloader: B, - consensus: Arc>, + consensus: Arc>, max_block: Option, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index 1db9c1f6b9ff7..31d847da7fbd5 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -5,7 +5,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_rpc_types_engine::{JwtError, JwtSecret}; use eyre::Result; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; @@ -72,7 +72,7 @@ where pub async fn get_single_body( client: Client, header: SealedHeader, - consensus: impl Consensus, + consensus: impl Consensus, ) -> Result> where Client: BodiesClient, diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 01f8f9a72f50e..7d54b8a049bb2 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -61,6 +61,8 @@ impl FullConsensus for OpBeaconConsensus { } impl Consensus for OpBeaconConsensus { + type Error = ConsensusError; + fn validate_body_against_header( &self, body: &OpBlockBody, diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 10dab2ab5b318..1e758522e4868 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -16,6 +16,7 @@ //! Configure only an http server with a selection of [`RethRpcModule`]s //! //! ``` +//! use reth_consensus::{ConsensusError, FullConsensus}; //! use reth_engine_primitives::PayloadValidator; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; @@ -67,7 +68,7 @@ //! CanonStateSubscriptions + Clone + 'static, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, -//! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Consensus: FullConsensus + Clone + 'static, //! Validator: PayloadValidator, //! { //! // configure the rpc module per transport @@ -99,6 +100,7 @@ //! //! //! ``` +//! use reth_consensus::{ConsensusError, FullConsensus}; //! use reth_engine_primitives::{EngineTypes, PayloadValidator}; //! use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; //! use reth_network_api::{NetworkInfo, Peers}; @@ -159,7 +161,7 @@ //! EngineT: EngineTypes, //! EvmConfig: ConfigureEvm
, //! BlockExecutor: BlockExecutorProvider, -//! Consensus: reth_consensus::FullConsensus + Clone + 'static, +//! Consensus: FullConsensus + Clone + 'static, //! Validator: PayloadValidator, //! { //! // configure the rpc module per transport @@ -226,7 +228,7 @@ use jsonrpsee::{ Methods, RpcModule, }; use reth_chainspec::EthereumHardforks; -use reth_consensus::FullConsensus; +use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{EngineTypes, PayloadValidator}; use reth_evm::{execute::BlockExecutorProvider, ConfigureEvm}; use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; @@ -298,7 +300,7 @@ pub async fn launch, block_executor: BlockExecutor, - consensus: Arc>, + consensus: Arc>, payload_validator: Arc>, ) -> Result where @@ -684,7 +686,7 @@ where Transaction = ::SignedTx, >, BlockExecutor: BlockExecutorProvider, - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: FullConsensus + Clone + 'static, { /// Configures all [`RpcModule`]s specific to the given [`TransportRpcModuleConfig`] which can /// be used to start the transport server(s). @@ -1347,7 +1349,8 @@ where /// Instantiates `ValidationApi` pub fn validation_api(&self) -> ValidationApi where - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: + FullConsensus + Clone + 'static, Provider: BlockReader::Block>, { ValidationApi::new( @@ -1379,7 +1382,7 @@ where >, >, BlockExecutor: BlockExecutorProvider, - Consensus: reth_consensus::FullConsensus + Clone + 'static, + Consensus: FullConsensus + Clone + 'static, { /// Configures the auth module that includes the /// * `engine_` namespace diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index cb3ab4f296cfb..3e65db5a2c963 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -43,7 +43,7 @@ where /// Create a new instance of the [`ValidationApi`] pub fn new( provider: Provider, - consensus: Arc>, + consensus: Arc>, executor_provider: E, config: ValidationApiConfig, task_spawner: Box, @@ -461,7 +461,7 @@ pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. - consensus: Arc>, + consensus: Arc>, /// Execution payload validator. payload_validator: Arc::Block>>, /// Block executor factory. diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index ce6a96cf3496a..20c780e24c613 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -30,11 +30,11 @@ //! # use reth_provider::test_utils::{create_test_provider_factory, MockNodeTypesWithDB}; //! # use reth_static_file::StaticFileProducer; //! # use reth_config::config::StageConfig; -//! # use reth_consensus::Consensus; +//! # use reth_consensus::{Consensus, ConsensusError}; //! # use reth_consensus::test_utils::TestConsensus; //! # //! # let chain_spec = MAINNET.clone(); -//! # let consensus: Arc = Arc::new(TestConsensus::default()); +//! # let consensus: Arc> = Arc::new(TestConsensus::default()); //! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build( //! # Arc::new(TestHeadersClient::default()), //! # consensus.clone().as_header_validator() diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 53eb233796460..7b8205e25e177 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -44,7 +44,7 @@ use crate::{ }; use alloy_primitives::B256; use reth_config::config::StageConfig; -use reth_consensus::Consensus; +use reth_consensus::{Consensus, ConsensusError}; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}; use reth_provider::HeaderSyncGapProvider; @@ -102,7 +102,7 @@ where pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc>, + consensus: Arc>, header_downloader: H, body_downloader: B, executor_factory: E, @@ -185,7 +185,7 @@ where /// The tip for the headers stage. tip: watch::Receiver, /// The consensus engine used to validate incoming data. - consensus: Arc>, + consensus: Arc>, /// The block header downloader header_downloader: H, /// The block body downloader @@ -203,7 +203,7 @@ where pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc>, + consensus: Arc>, header_downloader: H, body_downloader: B, stages_config: StageConfig, @@ -236,7 +236,7 @@ where provider: P, tip: watch::Receiver, header_downloader: H, - consensus: Arc>, + consensus: Arc>, stages_config: StageConfig, ) -> StageSetBuilder where From 2b301aa102250c7f2990e852a0f9ac461eb88f8e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jan 2025 18:16:21 +0100 Subject: [PATCH 004/113] feat: use engine launcher as default (#13709) --- Cargo.lock | 3 +- crates/e2e-test-utils/src/lib.rs | 15 +- crates/engine/local/src/payload.rs | 16 + crates/exex/test-utils/src/lib.rs | 6 +- crates/node/builder/Cargo.toml | 3 - crates/node/builder/docs/mermaid/builder.mmd | 2 +- crates/node/builder/src/builder/mod.rs | 37 +- crates/node/builder/src/launch/mod.rs | 357 +------------------ examples/custom-engine-types/Cargo.toml | 1 + examples/custom-engine-types/src/main.rs | 4 + 10 files changed, 55 insertions(+), 389 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57eeea0741188..0b398215a84d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2933,6 +2933,7 @@ dependencies = [ "reth", "reth-basic-payload-builder", "reth-chainspec", + "reth-engine-local", "reth-ethereum-payload-builder", "reth-node-api", "reth-node-core", @@ -8045,7 +8046,6 @@ dependencies = [ "jsonrpsee", "rayon", "reth-beacon-consensus", - "reth-blockchain-tree", "reth-chain-state", "reth-chainspec", "reth-cli-util", @@ -8072,7 +8072,6 @@ dependencies = [ "reth-node-events", "reth-node-metrics", "reth-payload-builder", - "reth-payload-validator", "reth-primitives", "reth-provider", "reth-prune", diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 44e518eec5c88..8378cbbd73108 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -13,9 +13,7 @@ use reth_node_builder::{ PayloadTypes, }; use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; -use reth_provider::providers::{ - BlockchainProvider, BlockchainProvider2, NodeTypesForProvider, NodeTypesForTree, -}; +use reth_provider::providers::{BlockchainProvider2, NodeTypesForProvider, NodeTypesForTree}; use reth_rpc_server_types::RpcModuleSelection; use reth_tasks::TaskManager; use std::sync::Arc; @@ -58,7 +56,10 @@ where TmpNodeAdapter, Components: NodeComponents, Network: PeersHandleProvider>, >, - N::AddOns: RethRpcAddOns>, + N::AddOns: RethRpcAddOns> + EngineValidatorAddOn>, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Engine as PayloadTypes>::PayloadAttributes, + >, { let tasks = TaskManager::current(); let exec = tasks.executor(); @@ -203,11 +204,11 @@ where /// Testing database pub type TmpDB = Arc>; -type TmpNodeAdapter>> = +type TmpNodeAdapter>> = FullNodeTypesAdapter; /// Type alias for a `NodeAdapter` -pub type Adapter>> = NodeAdapter< +pub type Adapter>> = NodeAdapter< TmpNodeAdapter, <>>::ComponentsBuilder as NodeComponentsBuilder< TmpNodeAdapter, @@ -215,5 +216,5 @@ pub type Adapter; /// Type alias for a type of `NodeHelper` -pub type NodeHelperType>> = +pub type NodeHelperType>> = NodeTestContext, >>::AddOns>; diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 045f6fea02e23..088a42fbf96be 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -59,3 +59,19 @@ where } } } + +/// A temporary workaround to support local payload engine launcher for arbitrary payload +/// attributes. +// TODO(mattsse): This should be reworked so that LocalPayloadAttributesBuilder can be implemented +// for any +pub trait UnsupportedLocalAttributes: Send + Sync + 'static {} + +impl PayloadAttributesBuilder for LocalPayloadAttributesBuilder +where + ChainSpec: Send + Sync + 'static, + T: UnsupportedLocalAttributes, +{ + fn build(&self, _: u64) -> T { + panic!("Unsupported payload attributes") + } +} diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 77289a73ca725..471b0c5b26fee 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -37,7 +37,7 @@ use reth_node_builder::{ Components, ComponentsBuilder, ConsensusBuilder, ExecutorBuilder, NodeComponentsBuilder, PoolBuilder, }, - BuilderContext, Node, NodeAdapter, RethFullAdapter2, + BuilderContext, Node, NodeAdapter, RethFullAdapter, }; use reth_node_core::node_config::NodeConfig; use reth_node_ethereum::{ @@ -169,14 +169,14 @@ pub type TmpDB = Arc>; /// The [`NodeAdapter`] for the [`TestExExContext`]. Contains type necessary to /// boot the testing environment pub type Adapter = NodeAdapter< - RethFullAdapter2, + RethFullAdapter, <>, >, - >>::ComponentsBuilder as NodeComponentsBuilder>>::Components, + >>::ComponentsBuilder as NodeComponentsBuilder>>::Components, >; /// An [`ExExContext`] using the [`Adapter`] type. pub type TestExExContext = ExExContext; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index ff07d5ee26e13..5218bc2d5e3db 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] ## reth reth-beacon-consensus.workspace = true -reth-blockchain-tree.workspace = true reth-chain-state.workspace = true reth-chainspec.workspace = true reth-cli-util.workspace = true @@ -41,7 +40,6 @@ reth-node-core.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-payload-builder.workspace = true -reth-payload-validator.workspace = true reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true @@ -99,7 +97,6 @@ default = [] js-tracer = ["reth-rpc/js-tracer"] test-utils = [ "reth-db/test-utils", - "reth-blockchain-tree/test-utils", "reth-chain-state/test-utils", "reth-chainspec/test-utils", "reth-consensus/test-utils", diff --git a/crates/node/builder/docs/mermaid/builder.mmd b/crates/node/builder/docs/mermaid/builder.mmd index aa56bfe736d29..96282d3fd9fd3 100644 --- a/crates/node/builder/docs/mermaid/builder.mmd +++ b/crates/node/builder/docs/mermaid/builder.mmd @@ -9,7 +9,7 @@ graph TD; end NodeBuilderC--"launch"-->launch subgraph launch - database("database init")-->tree("blockchain tree init") + database("database init")-->tree("blockchain provider init") tree--BuilderContext-->components{"build_components"} subgraph components ComponentsBuilder--"first creates"-->Pool diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 47d9a54572e94..a30797573011a 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -7,11 +7,10 @@ use crate::{ components::NodeComponentsBuilder, node::FullNode, rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, - BlockReaderFor, DefaultNodeLauncher, LaunchNode, Node, NodeHandle, + BlockReaderFor, EngineNodeLauncher, LaunchNode, Node, }; use alloy_eips::eip4844::env_settings::EnvKzgSettings; use futures::Future; -use reth_blockchain_tree::externals::NodeTypesForTree; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli_util::get_secret_key; use reth_db_api::{ @@ -34,7 +33,7 @@ use reth_node_core::{ primitives::Head, }; use reth_provider::{ - providers::{BlockchainProvider, BlockchainProvider2, NodeTypesForProvider}, + providers::{BlockchainProvider2, NodeTypesForProvider, NodeTypesForTree}, ChainSpecProvider, FullProvider, }; use reth_tasks::TaskExecutor; @@ -51,11 +50,6 @@ pub use states::*; /// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. pub type RethFullAdapter = - FullNodeTypesAdapter>>; - -/// The adapter type for a reth node with the builtin provider type -// Note: we need to hardcode this because custom components might depend on it in associated types. -pub type RethFullAdapter2 = FullNodeTypesAdapter>>; #[allow(clippy::doc_markdown)] @@ -346,18 +340,14 @@ where /// /// This bootstraps the node internals, creates all the components with the given [Node] /// - /// Returns a [`NodeHandle`] that can be used to interact with the node. + /// Returns a [`NodeHandle`](crate::NodeHandle) that can be used to interact with the node. pub async fn launch_node( self, node: N, ) -> eyre::Result< - NodeHandle< - NodeAdapter< - RethFullAdapter, - >>::Components, - >, - N::AddOns, - >, + , N::ComponentsBuilder, N::AddOns>, + >>::Node, > where N: Node, ChainSpec = ChainSpec> + NodeTypesForTree, @@ -368,6 +358,9 @@ where >, >, N::Primitives: FullNodePrimitives, + EngineNodeLauncher: LaunchNode< + NodeBuilderWithComponents, N::ComponentsBuilder, N::AddOns>, + >, { self.node(node).launch().await } @@ -558,14 +551,20 @@ where T: NodeTypesWithEngine + NodeTypesForTree, CB: NodeComponentsBuilder>, AO: RethRpcAddOns, CB::Components>>, + EngineNodeLauncher: LaunchNode, CB, AO>>, { - /// Launches the node with the [`DefaultNodeLauncher`] that sets up engine API consensus and rpc + /// Launches the node with the [`EngineNodeLauncher`] that sets up engine API consensus and rpc pub async fn launch( self, - ) -> eyre::Result, CB::Components>, AO>> { + ) -> eyre::Result< + , CB, AO>, + >>::Node, + > { let Self { builder, task_executor } = self; - let launcher = DefaultNodeLauncher::new(task_executor, builder.config.datadir()); + let launcher = + EngineNodeLauncher::new(task_executor, builder.config.datadir(), Default::default()); builder.launch_with(launcher).await } } diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index c6a00a6eec8cc..33e37c329ae12 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -6,50 +6,12 @@ mod exex; pub(crate) mod engine; pub use common::LaunchContext; -use common::{Attached, LaunchContextWith, WithConfigs}; pub use exex::ExExLauncher; -use reth_db_api::{ - database_metrics::{DatabaseMetadata, DatabaseMetrics}, - Database, -}; -use std::{future::Future, sync::Arc}; +use std::future::Future; -use futures::{future::Either, stream, stream_select, StreamExt}; -use reth_beacon_consensus::{ - hooks::{EngineHooks, PruneHook, StaticFileHook}, - BeaconConsensusEngine, -}; -use reth_blockchain_tree::{ - noop::NoopBlockchainTree, BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, - TreeExternals, -}; -use reth_chainspec::EthChainSpec; -use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; -use reth_engine_util::EngineMessageStreamExt; -use reth_exex::ExExManagerHandle; -use reth_network::BlockDownloaderProvider; -use reth_node_api::{AddOnsContext, FullNodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine}; -use reth_node_core::{ - dirs::{ChainPath, DataDirPath}, - exit::NodeExitFuture, -}; -use reth_node_events::{cl::ConsensusLayerHealthEvents, node, node::NodeEvent}; -use reth_provider::providers::{BlockchainProvider, NodeTypesForTree}; use reth_rpc::eth::RpcNodeCore; use reth_tasks::TaskExecutor; -use reth_tracing::tracing::{debug, info}; -use tokio::sync::{mpsc::unbounded_channel, oneshot}; -use tokio_stream::wrappers::UnboundedReceiverStream; - -use crate::{ - builder::{NodeAdapter, NodeTypesAdapter}, - components::{NodeComponents, NodeComponentsBuilder}, - hooks::NodeHooks, - node::FullNode, - rpc::{RethRpcAddOns, RpcHandle}, - AddOns, NodeBuilderWithComponents, NodeHandle, -}; /// Alias for [`reth_rpc_eth_types::EthApiBuilderCtx`], adapter for [`RpcNodeCore`]. pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< @@ -68,7 +30,8 @@ pub type EthApiBuilderCtx = reth_rpc_eth_types::EthApiBuilderCtx< /// /// This is essentially the launch logic for a node. /// -/// See also [`DefaultNodeLauncher`] and [`NodeBuilderWithComponents::launch_with`] +/// See also [`EngineNodeLauncher`](crate::EngineNodeLauncher) and +/// [`NodeBuilderWithComponents::launch_with`](crate::NodeBuilderWithComponents) pub trait LaunchNode { /// The node type that is created. type Node; @@ -88,317 +51,3 @@ where self(target) } } - -/// The default launcher for a node. -#[derive(Debug)] -pub struct DefaultNodeLauncher { - /// The task executor for the node. - pub ctx: LaunchContext, -} - -impl DefaultNodeLauncher { - /// Create a new instance of the default node launcher. - pub const fn new(task_executor: TaskExecutor, data_dir: ChainPath) -> Self { - Self { ctx: LaunchContext::new(task_executor, data_dir) } - } -} - -impl LaunchNode> for DefaultNodeLauncher -where - Types: NodeTypesWithEngine + NodeTypesForTree, - DB: Database + DatabaseMetrics + DatabaseMetadata + Clone + Unpin + 'static, - T: FullNodeTypes< - Provider = BlockchainProvider>, - Types = Types, - DB = DB, - >, - CB: NodeComponentsBuilder, - AO: RethRpcAddOns>, -{ - type Node = NodeHandle, AO>; - - async fn launch_node( - self, - target: NodeBuilderWithComponents, - ) -> eyre::Result { - let Self { ctx } = self; - let NodeBuilderWithComponents { - adapter: NodeTypesAdapter { database }, - components_builder, - add_ons: AddOns { hooks, exexs: installed_exex, add_ons }, - config, - } = target; - let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; - - // TODO: remove tree and move tree_config and canon_state_notification_sender - // initialization to with_blockchain_db once the engine revamp is done - // https://github.com/paradigmxyz/reth/issues/8742 - let tree_config = BlockchainTreeConfig::default(); - - // NOTE: This is a temporary workaround to provide the canon state notification sender to the components builder because there's a cyclic dependency between the blockchain provider and the tree component. This will be removed once the Blockchain provider no longer depends on an instance of the tree: - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(tree_config.max_reorg_depth() as usize * 2); - - let tree = Arc::new(NoopBlockchainTree::with_canon_state_notifications( - canon_state_notification_sender.clone(), - )); - - // setup the launch context - let mut ctx = ctx - .with_configured_globals() - // load the toml config - .with_loaded_toml_config(config)? - // add resolved peers - .with_resolved_peers().await? - // attach the database - .attach(database.clone()) - // ensure certain settings take effect - .with_adjusted_configs() - // Create the provider factory - .with_provider_factory().await? - .inspect(|_| { - info!(target: "reth::cli", "Database opened"); - }) - .with_prometheus_server().await? - .inspect(|this| { - debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis"); - }) - .with_genesis()? - .inspect(|this: &LaunchContextWith, _>>| { - info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks()); - }) - .with_metrics_task() - // passing FullNodeTypes as type parameter here so that we can build - // later the components. - .with_blockchain_db::(move |provider_factory| { - Ok(BlockchainProvider::new(provider_factory, tree)?) - })? - .with_components(components_builder, on_component_initialized).await?; - - let consensus = Arc::new(ctx.components().consensus().clone()); - - let tree_externals = TreeExternals::new( - ctx.provider_factory().clone(), - consensus.clone(), - ctx.components().block_executor().clone(), - ); - let tree = BlockchainTree::new(tree_externals, tree_config)? - .with_sync_metrics_tx(ctx.sync_metrics_tx()) - // Note: This is required because we need to ensure that both the components and the - // tree are using the same channel for canon state notifications. This will be removed - // once the Blockchain provider no longer depends on an instance of the tree - .with_canon_state_notification_sender(canon_state_notification_sender); - - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - ctx.node_adapter_mut().provider = ctx.blockchain_db().clone().with_tree(blockchain_tree); - - debug!(target: "reth::cli", "configured blockchain tree"); - - // spawn exexs - let exex_manager_handle = ExExLauncher::new( - ctx.head(), - ctx.node_adapter().clone(), - installed_exex, - ctx.configs().clone(), - ) - .launch() - .await?; - - // create pipeline - let network_client = ctx.components().network().fetch_client().await?; - let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel(); - - let node_config = ctx.node_config(); - let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx) - .maybe_skip_fcu(node_config.debug.skip_fcu) - .maybe_skip_new_payload(node_config.debug.skip_new_payload) - .maybe_reorg( - ctx.blockchain_db().clone(), - ctx.components().evm_config().clone(), - reth_payload_validator::ExecutionPayloadValidator::new(ctx.chain_spec()), - node_config.debug.reorg_frequency, - node_config.debug.reorg_depth, - ) - // Store messages _after_ skipping so that `replay-engine` command - // would replay only the messages that were observed by the engine - // during this run. - .maybe_store_messages(node_config.debug.engine_api_store.clone()); - - let max_block = ctx.max_block(network_client.clone()).await?; - let mut hooks = EngineHooks::new(); - - let static_file_producer = ctx.static_file_producer(); - let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new( - static_file_producer.clone(), - Box::new(ctx.task_executor().clone()), - )); - info!(target: "reth::cli", "StaticFileProducer initialized"); - - // Configure the pipeline - let pipeline_exex_handle = - exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (pipeline, client) = if ctx.is_dev() { - eyre::bail!("Dev mode is not supported for legacy engine") - } else { - let pipeline = crate::setup::build_networked_pipeline( - &ctx.toml_config().stages, - network_client.clone(), - consensus.clone(), - ctx.provider_factory().clone(), - ctx.task_executor(), - ctx.sync_metrics_tx(), - ctx.prune_config(), - max_block, - static_file_producer, - ctx.components().block_executor().clone(), - pipeline_exex_handle, - )?; - - (pipeline, network_client.clone()) - }; - - let pipeline_events = pipeline.events(); - - let initial_target = ctx.node_config().debug.tip; - - let mut pruner_builder = ctx.pruner_builder(); - if let Some(exex_manager_handle) = &exex_manager_handle { - pruner_builder = - pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); - } - let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone()); - - let pruner_events = pruner.events(); - info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); - hooks.add(PruneHook::new(pruner, Box::new(ctx.task_executor().clone()))); - - // Configure the consensus engine - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::with_channel( - client, - pipeline, - ctx.blockchain_db().clone(), - Box::new(ctx.task_executor().clone()), - Box::new(ctx.components().network().clone()), - max_block, - ctx.components().payload_builder().clone(), - initial_target, - reth_beacon_consensus::MIN_BLOCKS_FOR_PIPELINE_RUN, - consensus_engine_tx, - Box::pin(consensus_engine_stream), - hooks, - )?; - info!(target: "reth::cli", "Consensus engine initialized"); - - let events = stream_select!( - pipeline_events.map(Into::>::into), - if ctx.node_config().debug.tip.is_none() && !ctx.is_dev() { - Either::Left( - ConsensusLayerHealthEvents::new(Box::new(ctx.blockchain_db().clone())) - .map(Into::into), - ) - } else { - Either::Right(stream::empty()) - }, - pruner_events.map(Into::into), - static_file_producer_events.map(Into::into), - ); - ctx.task_executor().spawn_critical( - "events task", - node::handle_events( - Some(Box::new(ctx.components().network().clone())), - Some(ctx.head().number), - events, - ), - ); - - // extract the jwt secret from the args if possible - let jwt_secret = ctx.auth_jwt_secret()?; - - let add_ons_ctx = AddOnsContext { - node: ctx.node_adapter().clone(), - config: ctx.node_config(), - beacon_engine_handle, - jwt_secret, - }; - - let RpcHandle { rpc_server_handles, rpc_registry } = - add_ons.launch_add_ons(add_ons_ctx).await?; - - // Run consensus engine to completion - let (tx, rx) = oneshot::channel(); - info!(target: "reth::cli", "Starting consensus engine"); - ctx.task_executor().spawn_critical_blocking("consensus engine", async move { - let res = beacon_consensus_engine.await; - let _ = tx.send(res); - }); - - if let Some(maybe_custom_etherscan_url) = ctx.node_config().debug.etherscan.clone() { - info!(target: "reth::cli", "Using etherscan as consensus client"); - - let chain = ctx.node_config().chain.chain(); - let etherscan_url = maybe_custom_etherscan_url.map(Ok).unwrap_or_else(|| { - // If URL isn't provided, use default Etherscan URL for the chain if it is known - chain - .etherscan_urls() - .map(|urls| urls.0.to_string()) - .ok_or_else(|| eyre::eyre!("failed to get etherscan url for chain: {chain}")) - })?; - - let block_provider = EtherscanBlockProvider::new( - etherscan_url, - chain.etherscan_api_key().ok_or_else(|| { - eyre::eyre!( - "etherscan api key not found for rpc consensus client for chain: {chain}" - ) - })?, - ); - let rpc_consensus_client = DebugConsensusClient::new( - rpc_server_handles.auth.clone(), - Arc::new(block_provider), - ); - ctx.task_executor().spawn_critical("etherscan consensus client", async move { - rpc_consensus_client.run::().await - }); - } - - if let Some(rpc_ws_url) = ctx.node_config().debug.rpc_consensus_ws.clone() { - info!(target: "reth::cli", "Using rpc provider as consensus client"); - - let block_provider = RpcBlockProvider::new(rpc_ws_url); - let rpc_consensus_client = DebugConsensusClient::new( - rpc_server_handles.auth.clone(), - Arc::new(block_provider), - ); - ctx.task_executor().spawn_critical("rpc consensus client", async move { - rpc_consensus_client.run::().await - }); - } - - let full_node = FullNode { - evm_config: ctx.components().evm_config().clone(), - block_executor: ctx.components().block_executor().clone(), - pool: ctx.components().pool().clone(), - network: ctx.components().network().clone(), - provider: ctx.node_adapter().provider.clone(), - payload_builder: ctx.components().payload_builder().clone(), - task_executor: ctx.task_executor().clone(), - config: ctx.node_config().clone(), - data_dir: ctx.data_dir().clone(), - add_ons_handle: RpcHandle { rpc_server_handles, rpc_registry }, - }; - // Notify on node started - on_node_started.on_event(FullNode::clone(&full_node))?; - - let handle = NodeHandle { - node_exit_future: NodeExitFuture::new( - async { Ok(rx.await??) }, - full_node.config.debug.terminate, - ), - node: full_node, - }; - - Ok(handle) - } -} diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index d6642a8edfe5e..536ff1a944721 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -13,6 +13,7 @@ reth-node-core.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true +reth-engine-local.workspace = true reth-node-ethereum = { workspace = true, features = ["test-utils"] } reth-tracing.workspace = true reth-trie-db.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index c64cd0495306c..ce25eedaacca8 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -54,6 +54,7 @@ use reth_basic_payload_builder::{ PayloadBuilder, PayloadConfig, }; use reth_chainspec::{Chain, ChainSpec, ChainSpecProvider}; +use reth_engine_local::payload::UnsupportedLocalAttributes; use reth_ethereum_payload_builder::EthereumBuilderConfig; use reth_node_api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, @@ -88,6 +89,9 @@ pub struct CustomPayloadAttributes { pub custom: u64, } +// TODO(mattsse): remove this tmp workaround +impl UnsupportedLocalAttributes for CustomPayloadAttributes {} + /// Custom error type used in payload attributes validation #[derive(Debug, Error)] pub enum CustomError { From 3e980e61d896fbe754280895268f0bd2ad221e4e Mon Sep 17 00:00:00 2001 From: James Prestwich Date: Tue, 7 Jan 2025 12:38:09 -0500 Subject: [PATCH 005/113] Discussion draft: change DB Writer to take value references (#13672) --- .../src/commands/debug_cmd/build_block.rs | 2 +- .../commands/debug_cmd/in_memory_merkle.rs | 2 +- bin/reth/src/commands/debug_cmd/merkle.rs | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/exex/exex/src/backfill/test_utils.rs | 4 +- .../cli/src/commands/import_receipts.rs | 2 +- .../prune/prune/src/segments/user/history.rs | 4 +- crates/stages/stages/src/stages/execution.rs | 2 +- .../stages/src/stages/hashing_account.rs | 6 +-- .../stages/src/stages/hashing_storage.rs | 2 +- crates/stages/stages/src/stages/headers.rs | 6 +-- crates/stages/stages/src/stages/merkle.rs | 2 +- crates/stages/stages/src/stages/mod.rs | 2 +- .../stages/src/stages/sender_recovery.rs | 2 +- crates/stages/stages/src/stages/tx_lookup.rs | 4 +- crates/stages/stages/src/stages/utils.rs | 4 +- .../stages/stages/src/test_utils/test_db.rs | 4 +- crates/storage/db-api/src/cursor.rs | 6 +-- crates/storage/db-api/src/mock.rs | 6 +-- .../storage/db-api/src/models/integer_list.rs | 2 +- crates/storage/db-api/src/models/mod.rs | 8 +-- crates/storage/db-api/src/scale.rs | 2 +- crates/storage/db-api/src/table.rs | 6 +-- crates/storage/db-common/src/init.rs | 6 ++- crates/storage/db/benches/criterion.rs | 4 +- crates/storage/db/benches/hash_keys.rs | 4 +- .../db/src/implementation/mdbx/cursor.rs | 6 +-- .../storage/db/src/implementation/mdbx/mod.rs | 42 ++++++++-------- crates/storage/db/src/tables/raw.rs | 2 +- .../src/providers/blockchain_provider.rs | 2 +- .../provider/src/providers/consistent.rs | 2 +- .../src/providers/database/provider.rs | 50 +++++++++---------- crates/storage/provider/src/traits/block.rs | 2 +- crates/storage/provider/src/traits/state.rs | 2 +- crates/storage/provider/src/writer/mod.rs | 21 ++++---- crates/storage/storage-api/src/chain.rs | 4 +- crates/trie/db/src/trie_cursor.rs | 6 +-- crates/trie/db/tests/fuzz_in_memory_nodes.rs | 8 +-- crates/trie/db/tests/trie.rs | 30 +++++------ crates/trie/db/tests/walker.rs | 9 ++-- crates/trie/db/tests/witness.rs | 4 +- 41 files changed, 149 insertions(+), 137 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 1e95c1e33c708..55082c1c37996 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -287,7 +287,7 @@ impl> Command { let provider_rw = provider_factory.provider_rw()?; provider_rw.append_blocks_with_state( Vec::from([block_with_senders]), - execution_outcome, + &execution_outcome, hashed_post_state.into_sorted(), trie_updates, )?; diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index bd8c8d1cdcc1f..b0ac35ee577bb 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -177,7 +177,7 @@ impl> Command { .map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; provider_rw.write_state( - execution_outcome, + &execution_outcome, OriginalValuesKnown::No, StorageLocation::Database, )?; diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index bb79068bd5479..98d0889c89c0c 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -169,7 +169,7 @@ impl> Command { let execution_outcome = executor.finalize(); provider_rw.write_state( - execution_outcome, + &execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database, )?; diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 3964ea53b7e2e..de7169b1d139c 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1260,7 +1260,7 @@ where provider_rw .append_blocks_with_state( blocks.into_blocks().collect(), - state, + &state, hashed_state_sorted, trie_updates, ) diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index eb7598377f2c9..721071f081e1a 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -80,7 +80,7 @@ where let block = block.clone().seal_slow(); provider_rw.append_blocks_with_state( vec![block], - execution_outcome, + &execution_outcome, Default::default(), Default::default(), )?; @@ -214,7 +214,7 @@ where let provider_rw = provider_factory.provider_rw()?; provider_rw.append_blocks_with_state( vec![block1.clone(), block2.clone()], - execution_outcome.clone(), + &execution_outcome, Default::default(), Default::default(), )?; diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index 040ecdc003570..e564982cfd574 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -223,7 +223,7 @@ where // finally, write the receipts provider.write_state( - execution_outcome, + &execution_outcome, OriginalValuesKnown::Yes, StorageLocation::StaticFiles, )?; diff --git a/crates/prune/prune/src/segments/user/history.rs b/crates/prune/prune/src/segments/user/history.rs index e27884a927807..4e2218af23fbc 100644 --- a/crates/prune/prune/src/segments/user/history.rs +++ b/crates/prune/prune/src/segments/user/history.rs @@ -125,7 +125,7 @@ where cursor.delete_current()?; // Upsert will replace the last shard for this sharded key with // the previous value. - cursor.upsert(RawKey::new(key), prev_value)?; + cursor.upsert(RawKey::new(key), &prev_value)?; Ok(PruneShardOutcome::Updated) } // If there's no previous shard for this sharded key, @@ -151,7 +151,7 @@ where } else { cursor.upsert( RawKey::new(key), - RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)), + &RawValue::new(BlockNumberList::new_pre_sorted(higher_blocks)), )?; Ok(PruneShardOutcome::Updated) } diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index efafc904180db..77b8a78df1021 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -442,7 +442,7 @@ where let time = Instant::now(); // write output - provider.write_state(state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; + provider.write_state(&state, OriginalValuesKnown::Yes, StorageLocation::StaticFiles)?; let db_write_duration = time.elapsed(); debug!( diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 551c10d7711f2..976c775d1ab11 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -100,7 +100,7 @@ impl AccountHashingStage { provider.tx_ref().cursor_write::()?; accounts.sort_by(|a, b| a.0.cmp(&b.0)); for (addr, acc) in &accounts { - account_cursor.append(*addr, *acc)?; + account_cursor.append(*addr, acc)?; } let mut acc_changeset_cursor = @@ -113,7 +113,7 @@ impl AccountHashingStage { bytecode_hash: None, }; let acc_before_tx = AccountBeforeTx { address: *addr, info: Some(prev_acc) }; - acc_changeset_cursor.append(t, acc_before_tx)?; + acc_changeset_cursor.append(t, &acc_before_tx)?; } } @@ -202,7 +202,7 @@ where let (key, value) = item?; hashed_account_cursor - .append(RawKey::::from_vec(key), RawValue::::from_vec(value))?; + .append(RawKey::::from_vec(key), &RawValue::::from_vec(value))?; } } else { // Aggregate all transition changesets and make a list of accounts that have been diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index 6075e62158fdb..4c9788d42e036 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -533,7 +533,7 @@ mod tests { } if !entry.value.is_zero() { - storage_cursor.upsert(bn_address.address(), entry)?; + storage_cursor.upsert(bn_address.address(), &entry)?; } } Ok(()) diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index bf6611d9ed884..f411060bcca31 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -184,12 +184,12 @@ where if first_sync { cursor_header_numbers.append( RawKey::::from_vec(hash), - RawValue::::from_vec(number), + &RawValue::::from_vec(number), )?; } else { cursor_header_numbers.insert( RawKey::::from_vec(hash), - RawValue::::from_vec(number), + &RawValue::::from_vec(number), )?; } } @@ -660,7 +660,7 @@ mod tests { provider .append_blocks_with_state( sealed_blocks, - ExecutionOutcome::default(), + &ExecutionOutcome::default(), HashedPostStateSorted::default(), TrieUpdates::default(), ) diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index f697ced2dc81b..4c163d8042acf 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -648,7 +648,7 @@ mod tests { if !value.is_zero() { let storage_entry = StorageEntry { key: hashed_slot, value }; - storage_cursor.upsert(hashed_address, storage_entry).unwrap(); + storage_cursor.upsert(hashed_address, &storage_entry).unwrap(); } } } diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index e5cf6d525c28f..33a4d76a11f97 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -357,7 +357,7 @@ mod tests { { let provider_rw = db.factory.provider_rw().unwrap(); let mut cursor = provider_rw.tx_ref().cursor_write::().unwrap(); - cursor.insert(key, Default::default()).unwrap(); + cursor.insert(key, &Default::default()).unwrap(); provider_rw.commit().unwrap(); assert!(matches!( diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index 8d768265465f4..34598714a18bb 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -207,7 +207,7 @@ where } } }; - senders_cursor.append(tx_id, sender)?; + senders_cursor.append(tx_id, &sender)?; processed_transactions += 1; } } diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index dd15c4f43fca6..4e3f4a8776ed7 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -164,9 +164,9 @@ where let key = RawKey::::from_vec(hash); if append_only { - txhash_cursor.append(key, RawValue::::from_vec(number))? + txhash_cursor.append(key, &RawValue::::from_vec(number))? } else { - txhash_cursor.insert(key, RawValue::::from_vec(number))? + txhash_cursor.insert(key, &RawValue::::from_vec(number))? } } diff --git a/crates/stages/stages/src/stages/utils.rs b/crates/stages/stages/src/stages/utils.rs index 169d556348b21..add013d40710d 100644 --- a/crates/stages/stages/src/stages/utils.rs +++ b/crates/stages/stages/src/stages/utils.rs @@ -224,9 +224,9 @@ where let value = BlockNumberList::new_pre_sorted(chunk); if append_only { - cursor.append(key, value)?; + cursor.append(key, &value)?; } else { - cursor.upsert(key, value)?; + cursor.upsert(key, &value)?; } } } diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 5e4c61b6fd36a..c46757adfb246 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -396,7 +396,7 @@ impl TestStageDB { { cursor.delete_current()?; } - cursor.upsert(address, entry)?; + cursor.upsert(address, &entry)?; let mut cursor = tx.cursor_dup_write::()?; if cursor @@ -406,7 +406,7 @@ impl TestStageDB { { cursor.delete_current()?; } - cursor.upsert(hashed_address, hashed_entry)?; + cursor.upsert(hashed_address, &hashed_entry)?; Ok(()) }) diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 9297f738ab5ab..4a7fccc1280a7 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -104,17 +104,17 @@ pub trait DbDupCursorRO { pub trait DbCursorRW { /// Database operation that will update an existing row if a specified value already /// exists in a table, and insert a new row if the specified value doesn't already exist - fn upsert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; + fn upsert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>; /// Database operation that will insert a row at a given key. If the key is already /// present, the operation will result in an error. - fn insert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; + fn insert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>; /// Append value to next cursor item. /// /// This is efficient for pre-sorted data. If the data is not pre-sorted, use /// [`DbCursorRW::insert`]. - fn append(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError>; + fn append(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError>; /// Delete current value that cursor points to fn delete_current(&mut self) -> Result<(), DatabaseError>; diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index 5580727fdbed4..ece47f81ee5ae 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -220,7 +220,7 @@ impl DbCursorRW for CursorMock { fn upsert( &mut self, _key: ::Key, - _value: ::Value, + _value: &::Value, ) -> Result<(), DatabaseError> { Ok(()) } @@ -228,7 +228,7 @@ impl DbCursorRW for CursorMock { fn insert( &mut self, _key: ::Key, - _value: ::Value, + _value: &::Value, ) -> Result<(), DatabaseError> { Ok(()) } @@ -236,7 +236,7 @@ impl DbCursorRW for CursorMock { fn append( &mut self, _key: ::Key, - _value: ::Value, + _value: &::Value, ) -> Result<(), DatabaseError> { Ok(()) } diff --git a/crates/storage/db-api/src/models/integer_list.rs b/crates/storage/db-api/src/models/integer_list.rs index 5301ec303e50d..c252d5ee0c874 100644 --- a/crates/storage/db-api/src/models/integer_list.rs +++ b/crates/storage/db-api/src/models/integer_list.rs @@ -165,7 +165,7 @@ impl Compress for IntegerList { self.to_bytes() } - fn compress_to_buf>(self, buf: &mut B) { + fn compress_to_buf>(&self, buf: &mut B) { self.to_mut_bytes(buf) } } diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index e818a1a478d05..232e257a1dc85 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -194,8 +194,8 @@ macro_rules! impl_compression_for_compact { impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? Compress for $name$(<$($generic),*>)? { type Compressed = Vec; - fn compress_to_buf>(self, buf: &mut B) { - let _ = Compact::to_compact(&self, buf); + fn compress_to_buf>(&self, buf: &mut B) { + let _ = Compact::to_compact(self, buf); } } @@ -253,8 +253,8 @@ macro_rules! impl_compression_fixed_compact { Some(self.as_ref()) } - fn compress_to_buf>(self, buf: &mut B) { - let _ = Compact::to_compact(&self, buf); + fn compress_to_buf>(&self, buf: &mut B) { + let _ = Compact::to_compact(self, buf); } } diff --git a/crates/storage/db-api/src/scale.rs b/crates/storage/db-api/src/scale.rs index 591635be054e6..2ab1c3b5e819c 100644 --- a/crates/storage/db-api/src/scale.rs +++ b/crates/storage/db-api/src/scale.rs @@ -21,7 +21,7 @@ where parity_scale_codec::Encode::encode(&self) } - fn compress_to_buf>(self, buf: &mut B) { + fn compress_to_buf>(&self, buf: &mut B) { parity_scale_codec::Encode::encode_to(&self, OutputCompat::wrap_mut(buf)); } } diff --git a/crates/storage/db-api/src/table.rs b/crates/storage/db-api/src/table.rs index a4d3f87b40b56..5715852a5ddd9 100644 --- a/crates/storage/db-api/src/table.rs +++ b/crates/storage/db-api/src/table.rs @@ -32,7 +32,7 @@ pub trait Compress: Send + Sync + Sized + Debug { } /// Compresses data to a given buffer. - fn compress_to_buf>(self, buf: &mut B); + fn compress_to_buf>(&self, buf: &mut B); } /// Trait that will transform the data to be read from the DB. @@ -132,7 +132,7 @@ pub trait TableImporter: DbTxMut { for kv in source_tx.cursor_read::()?.walk(None)? { let (k, v) = kv?; - destination_cursor.append(k, v)?; + destination_cursor.append(k, &v)?; } Ok(()) @@ -157,7 +157,7 @@ pub trait TableImporter: DbTxMut { }; for row in source_range? { let (key, value) = row?; - destination_cursor.append(key, value)?; + destination_cursor.append(key, &value)?; } Ok(()) diff --git a/crates/storage/db-common/src/init.rs b/crates/storage/db-common/src/init.rs index 30b5bd2c885c7..9cc1e8d2c05d3 100644 --- a/crates/storage/db-common/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -252,7 +252,11 @@ where Vec::new(), ); - provider.write_state(execution_outcome, OriginalValuesKnown::Yes, StorageLocation::Database)?; + provider.write_state( + &execution_outcome, + OriginalValuesKnown::Yes, + StorageLocation::Database, + )?; trace!(target: "reth::cli", "Inserted state"); diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index b8102326d0a23..abfc8be33daa0 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -137,7 +137,7 @@ where let tx = db.tx_mut().expect("tx"); let mut crsr = tx.cursor_write::().expect("cursor"); for (k, _, v, _) in input { - crsr.append(k, v).expect("submit"); + crsr.append(k, &v).expect("submit"); } tx.inner.commit().unwrap() }, @@ -157,7 +157,7 @@ where let mut crsr = tx.cursor_write::().expect("cursor"); for index in RANDOM_INDEXES { let (k, _, v, _) = input.get(index).unwrap().clone(); - crsr.insert(k, v).expect("submit"); + crsr.insert(k, &v).expect("submit"); } tx.inner.commit().unwrap() diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index e4e87014eb88d..cb145789de9d6 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -184,7 +184,7 @@ where let mut crsr = tx.cursor_write::().expect("cursor"); black_box({ for (k, v) in input { - crsr.append(k, v).expect("submit"); + crsr.append(k, &v).expect("submit"); } tx.inner.commit().unwrap() @@ -202,7 +202,7 @@ where let mut crsr = tx.cursor_write::().expect("cursor"); black_box({ for (k, v) in input { - crsr.insert(k, v).expect("submit"); + crsr.insert(k, &v).expect("submit"); } tx.inner.commit().unwrap() diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 756a622bcb035..ec5f3b7c28248 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -241,7 +241,7 @@ impl DbCursorRW for Cursor { /// it will append the value to the subkey, even if the subkeys are the same. So if you want /// to properly upsert, you'll need to `seek_exact` & `delete_current` if the key+subkey was /// found, before calling `upsert`. - fn upsert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + fn upsert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = compress_to_buf_or_ref!(self, value); self.execute_with_operation_metric( @@ -263,7 +263,7 @@ impl DbCursorRW for Cursor { ) } - fn insert(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + fn insert(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = compress_to_buf_or_ref!(self, value); self.execute_with_operation_metric( @@ -287,7 +287,7 @@ impl DbCursorRW for Cursor { /// Appends the data to the end of the table. Consequently, the append operation /// will fail if the inserted key is less than the last table key - fn append(&mut self, key: T::Key, value: T::Value) -> Result<(), DatabaseError> { + fn append(&mut self, key: T::Key, value: &T::Value) -> Result<(), DatabaseError> { let key = key.encode(); let value = compress_to_buf_or_ref!(self, value); self.execute_with_operation_metric( diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 8c3d36308892f..d2e0d91b1d23c 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -479,7 +479,7 @@ impl DatabaseEnv { if Some(&version) != last_version.as_ref() { version_cursor.upsert( SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(), - version, + &version, )?; tx.commit()?; } @@ -580,8 +580,8 @@ mod tests { let entry_0 = StorageEntry { key: B256::with_last_byte(1), value: U256::from(0) }; let entry_1 = StorageEntry { key: B256::with_last_byte(1), value: U256::from(1) }; - dup_cursor.upsert(Address::with_last_byte(1), entry_0).expect(ERROR_UPSERT); - dup_cursor.upsert(Address::with_last_byte(1), entry_1).expect(ERROR_UPSERT); + dup_cursor.upsert(Address::with_last_byte(1), &entry_0).expect(ERROR_UPSERT); + dup_cursor.upsert(Address::with_last_byte(1), &entry_1).expect(ERROR_UPSERT); assert_eq!( dup_cursor.walk(None).unwrap().collect::, _>>(), @@ -910,12 +910,12 @@ mod tests { let mut cursor = tx.cursor_write::().unwrap(); // INSERT - assert_eq!(cursor.insert(key_to_insert, B256::ZERO), Ok(())); + assert_eq!(cursor.insert(key_to_insert, &B256::ZERO), Ok(())); assert_eq!(cursor.current(), Ok(Some((key_to_insert, B256::ZERO)))); // INSERT (failure) assert_eq!( - cursor.insert(key_to_insert, B256::ZERO), + cursor.insert(key_to_insert, &B256::ZERO), Err(DatabaseWriteError { info: Error::KeyExist.into(), operation: DatabaseWriteOperation::CursorInsert, @@ -947,11 +947,11 @@ mod tests { let subkey2 = B256::random(); let entry1 = StorageEntry { key: subkey1, value: U256::ZERO }; - assert!(dup_cursor.insert(key, entry1).is_ok()); + assert!(dup_cursor.insert(key, &entry1).is_ok()); // Can't insert let entry2 = StorageEntry { key: subkey2, value: U256::ZERO }; - assert!(dup_cursor.insert(key, entry2).is_err()); + assert!(dup_cursor.insert(key, &entry2).is_err()); } #[test] @@ -964,9 +964,9 @@ mod tests { let key3 = Address::with_last_byte(3); let mut cursor = tx.cursor_write::().unwrap(); - assert!(cursor.insert(key1, Account::default()).is_ok()); - assert!(cursor.insert(key2, Account::default()).is_ok()); - assert!(cursor.insert(key3, Account::default()).is_ok()); + assert!(cursor.insert(key1, &Account::default()).is_ok()); + assert!(cursor.insert(key2, &Account::default()).is_ok()); + assert!(cursor.insert(key3, &Account::default()).is_ok()); // Seek & delete key2 cursor.seek_exact(key2).unwrap(); @@ -1002,7 +1002,7 @@ mod tests { assert_eq!(cursor.current(), Ok(Some((9, B256::ZERO)))); for pos in (2..=8).step_by(2) { - assert_eq!(cursor.insert(pos, B256::ZERO), Ok(())); + assert_eq!(cursor.insert(pos, &B256::ZERO), Ok(())); assert_eq!(cursor.current(), Ok(Some((pos, B256::ZERO)))); } tx.commit().expect(ERROR_COMMIT); @@ -1031,7 +1031,7 @@ mod tests { let key_to_append = 5; let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut cursor = tx.cursor_write::().unwrap(); - assert_eq!(cursor.append(key_to_append, B256::ZERO), Ok(())); + assert_eq!(cursor.append(key_to_append, &B256::ZERO), Ok(())); tx.commit().expect(ERROR_COMMIT); // Confirm the result @@ -1059,7 +1059,7 @@ mod tests { let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut cursor = tx.cursor_write::().unwrap(); assert_eq!( - cursor.append(key_to_append, B256::ZERO), + cursor.append(key_to_append, &B256::ZERO), Err(DatabaseWriteError { info: Error::KeyMismatch.into(), operation: DatabaseWriteOperation::CursorAppend, @@ -1088,15 +1088,15 @@ mod tests { let key = Address::random(); let account = Account::default(); - cursor.upsert(key, account).expect(ERROR_UPSERT); + cursor.upsert(key, &account).expect(ERROR_UPSERT); assert_eq!(cursor.seek_exact(key), Ok(Some((key, account)))); let account = Account { nonce: 1, ..Default::default() }; - cursor.upsert(key, account).expect(ERROR_UPSERT); + cursor.upsert(key, &account).expect(ERROR_UPSERT); assert_eq!(cursor.seek_exact(key), Ok(Some((key, account)))); let account = Account { nonce: 2, ..Default::default() }; - cursor.upsert(key, account).expect(ERROR_UPSERT); + cursor.upsert(key, &account).expect(ERROR_UPSERT); assert_eq!(cursor.seek_exact(key), Ok(Some((key, account)))); let mut dup_cursor = tx.cursor_dup_write::().unwrap(); @@ -1104,12 +1104,12 @@ mod tests { let value = U256::from(1); let entry1 = StorageEntry { key: subkey, value }; - dup_cursor.upsert(key, entry1).expect(ERROR_UPSERT); + dup_cursor.upsert(key, &entry1).expect(ERROR_UPSERT); assert_eq!(dup_cursor.seek_by_key_subkey(key, subkey), Ok(Some(entry1))); let value = U256::from(2); let entry2 = StorageEntry { key: subkey, value }; - dup_cursor.upsert(key, entry2).expect(ERROR_UPSERT); + dup_cursor.upsert(key, &entry2).expect(ERROR_UPSERT); assert_eq!(dup_cursor.seek_by_key_subkey(key, subkey), Ok(Some(entry1))); assert_eq!(dup_cursor.next_dup_val(), Ok(Some(entry2))); } @@ -1127,7 +1127,7 @@ mod tests { .try_for_each(|val| { cursor.append( transition_id, - AccountBeforeTx { address: Address::with_last_byte(val), info: None }, + &AccountBeforeTx { address: Address::with_last_byte(val), info: None }, ) }) .expect(ERROR_APPEND); @@ -1153,7 +1153,7 @@ mod tests { assert_eq!( cursor.append( transition_id - 1, - AccountBeforeTx { address: Address::with_last_byte(subkey_to_append), info: None } + &AccountBeforeTx { address: Address::with_last_byte(subkey_to_append), info: None } ), Err(DatabaseWriteError { info: Error::KeyMismatch.into(), @@ -1166,7 +1166,7 @@ mod tests { assert_eq!( cursor.append( transition_id, - AccountBeforeTx { address: Address::with_last_byte(subkey_to_append), info: None } + &AccountBeforeTx { address: Address::with_last_byte(subkey_to_append), info: None } ), Ok(()) ); diff --git a/crates/storage/db/src/tables/raw.rs b/crates/storage/db/src/tables/raw.rs index 453116ee5e358..18fe0da23cd80 100644 --- a/crates/storage/db/src/tables/raw.rs +++ b/crates/storage/db/src/tables/raw.rs @@ -168,7 +168,7 @@ impl Compress for RawValue { self.value } - fn compress_to_buf>(self, buf: &mut B) { + fn compress_to_buf>(&self, buf: &mut B) { buf.put_slice(self.value.as_slice()) } } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 20c371cdf1c58..a868afb515140 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1810,7 +1810,7 @@ mod tests { .into_iter() .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) .collect(), - ExecutionOutcome { + &ExecutionOutcome { bundle: BundleState::new( database_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index f2872352b8f3f..049fd1b4b1f80 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1730,7 +1730,7 @@ mod tests { .into_iter() .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) .collect(), - ExecutionOutcome { + &ExecutionOutcome { bundle: BundleState::new( database_state.into_iter().map(|(address, (account, _))| { (address, None, Some(account.into()), Default::default()) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 89b2ae5b60012..c0a98f95e9731 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -1670,7 +1670,7 @@ impl StageCheckpointWriter for DatabaseProvider StateWriter fn write_state( &self, - execution_outcome: ExecutionOutcome, + execution_outcome: &ExecutionOutcome, is_value_known: OriginalValuesKnown, write_receipts_to: StorageLocation, ) -> ProviderResult<()> { @@ -1785,7 +1785,7 @@ impl StateWriter }) .transpose()?; - for (idx, receipts) in execution_outcome.receipts.into_iter().enumerate() { + for (idx, receipts) in execution_outcome.receipts.iter().enumerate() { let block_number = execution_outcome.first_block + idx as u64; // Increment block number for receipts static file writer @@ -1798,11 +1798,11 @@ impl StateWriter .map(|(_, indices)| indices.first_tx_num()) .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - for (idx, receipt) in receipts.into_iter().enumerate() { + for (idx, receipt) in receipts.iter().enumerate() { let receipt_idx = first_tx_index + idx as u64; if let Some(receipt) = receipt { if let Some(writer) = &mut receipts_static_writer { - writer.append_receipt(receipt_idx, &receipt)?; + writer.append_receipt(receipt_idx, receipt)?; } if let Some(cursor) = &mut receipts_cursor { @@ -1897,7 +1897,7 @@ impl StateWriter for (address, account) in changes.accounts { if let Some(account) = account { tracing::trace!(?address, "Updating plain state account"); - accounts_cursor.upsert(address, account.into())?; + accounts_cursor.upsert(address, &account.into())?; } else if accounts_cursor.seek_exact(address)?.is_some() { tracing::trace!(?address, "Deleting plain state account"); accounts_cursor.delete_current()?; @@ -1908,7 +1908,7 @@ impl StateWriter tracing::trace!(len = changes.contracts.len(), "Writing bytecodes"); let mut bytecodes_cursor = self.tx_ref().cursor_write::()?; for (hash, bytecode) in changes.contracts { - bytecodes_cursor.upsert(hash, Bytecode(bytecode))?; + bytecodes_cursor.upsert(hash, &Bytecode(bytecode))?; } // Write new storage state and wipe storage if needed. @@ -1936,7 +1936,7 @@ impl StateWriter } if !entry.value.is_zero() { - storages_cursor.upsert(address, entry)?; + storages_cursor.upsert(address, &entry)?; } } } @@ -1949,7 +1949,7 @@ impl StateWriter let mut hashed_accounts_cursor = self.tx_ref().cursor_write::()?; for (hashed_address, account) in hashed_state.accounts().accounts_sorted() { if let Some(account) = account { - hashed_accounts_cursor.upsert(hashed_address, account)?; + hashed_accounts_cursor.upsert(hashed_address, &account)?; } else if hashed_accounts_cursor.seek_exact(hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } @@ -1975,7 +1975,7 @@ impl StateWriter } if !entry.value.is_zero() { - hashed_storage_cursor.upsert(*hashed_address, entry)?; + hashed_storage_cursor.upsert(*hashed_address, &entry)?; } } } @@ -2047,7 +2047,7 @@ impl StateWriter if old_account != new_account { let existing_entry = plain_accounts_cursor.seek_exact(*address)?; if let Some(account) = old_account { - plain_accounts_cursor.upsert(*address, *account)?; + plain_accounts_cursor.upsert(*address, account)?; } else if existing_entry.is_some() { plain_accounts_cursor.delete_current()?; } @@ -2068,7 +2068,7 @@ impl StateWriter // insert value if needed if !old_storage_value.is_zero() { - plain_storage_cursor.upsert(*address, storage_entry)?; + plain_storage_cursor.upsert(*address, &storage_entry)?; } } } @@ -2147,7 +2147,7 @@ impl StateWriter if old_account != new_account { let existing_entry = plain_accounts_cursor.seek_exact(*address)?; if let Some(account) = old_account { - plain_accounts_cursor.upsert(*address, *account)?; + plain_accounts_cursor.upsert(*address, account)?; } else if existing_entry.is_some() { plain_accounts_cursor.delete_current()?; } @@ -2168,7 +2168,7 @@ impl StateWriter // insert value if needed if !old_storage_value.is_zero() { - plain_storage_cursor.upsert(*address, storage_entry)?; + plain_storage_cursor.upsert(*address, &storage_entry)?; } } } @@ -2255,7 +2255,7 @@ impl TrieWriter for DatabaseProvider Some(node) => { if !nibbles.0.is_empty() { num_entries += 1; - account_trie_cursor.upsert(nibbles, node.clone())?; + account_trie_cursor.upsert(nibbles, node)?; } } None => { @@ -2330,7 +2330,7 @@ impl HashingWriter for DatabaseProvi let mut hashed_accounts_cursor = self.tx.cursor_write::()?; for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { - hashed_accounts_cursor.upsert(*hashed_address, *account)?; + hashed_accounts_cursor.upsert(*hashed_address, account)?; } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } @@ -2360,7 +2360,7 @@ impl HashingWriter for DatabaseProvi changesets.into_iter().map(|(ad, ac)| (keccak256(ad), ac)).collect::>(); for (hashed_address, account) in &hashed_accounts { if let Some(account) = account { - hashed_accounts_cursor.upsert(*hashed_address, *account)?; + hashed_accounts_cursor.upsert(*hashed_address, account)?; } else if hashed_accounts_cursor.seek_exact(*hashed_address)?.is_some() { hashed_accounts_cursor.delete_current()?; } @@ -2397,7 +2397,7 @@ impl HashingWriter for DatabaseProvi } if !value.is_zero() { - hashed_storage.upsert(hashed_address, StorageEntry { key, value })?; + hashed_storage.upsert(hashed_address, &StorageEntry { key, value })?; } } Ok(hashed_storage_keys) @@ -2449,7 +2449,7 @@ impl HashingWriter for DatabaseProvi } if !value.is_zero() { - hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value })?; + hashed_storage_cursor.upsert(hashed_address, &StorageEntry { key, value })?; } Ok(()) }) @@ -2561,7 +2561,7 @@ impl HistoryWriter for DatabaseProvi if !partial_shard.is_empty() { cursor.insert( ShardedKey::last(address), - BlockNumberList::new_pre_sorted(partial_shard), + &BlockNumberList::new_pre_sorted(partial_shard), )?; } } @@ -2619,7 +2619,7 @@ impl HistoryWriter for DatabaseProvi if !partial_shard.is_empty() { cursor.insert( StorageShardedKey::last(address, storage_key), - BlockNumberList::new_pre_sorted(partial_shard), + &BlockNumberList::new_pre_sorted(partial_shard), )?; } } @@ -2864,7 +2864,7 @@ impl BlockWrite let mut durations_recorder = metrics::DurationsRecorder::default(); // insert block meta - block_indices_cursor.append(*block_number, block_indices)?; + block_indices_cursor.append(*block_number, &block_indices)?; durations_recorder.record_relative(metrics::Action::InsertBlockBodyIndices); @@ -2872,7 +2872,7 @@ impl BlockWrite // write transaction block index if !body.transactions().is_empty() { - tx_block_cursor.append(block_indices.last_tx_num(), *block_number)?; + tx_block_cursor.append(block_indices.last_tx_num(), block_number)?; durations_recorder.record_relative(metrics::Action::InsertTransactionBlocks); } @@ -2882,7 +2882,7 @@ impl BlockWrite writer.append_transaction(next_tx_num, transaction)?; } if let Some(cursor) = tx_cursor.as_mut() { - cursor.append(next_tx_num, transaction.clone())?; + cursor.append(next_tx_num, transaction)?; } // Increment transaction id for each transaction. @@ -2992,7 +2992,7 @@ impl BlockWrite fn append_blocks_with_state( &self, blocks: Vec>, - execution_outcome: ExecutionOutcome, + execution_outcome: &ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()> { diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 9c5821057fc85..a0b9657e4032c 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -133,7 +133,7 @@ pub trait BlockWriter: Send + Sync { fn append_blocks_with_state( &self, blocks: Vec>, - execution_outcome: ExecutionOutcome, + execution_outcome: &ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, ) -> ProviderResult<()>; diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index 2c4ee2cfa8d33..b49e05db2f77d 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -18,7 +18,7 @@ pub trait StateWriter { /// `Some`. It should be `None` if there is any kind of pruning/filtering over the receipts. fn write_state( &self, - execution_outcome: ExecutionOutcome, + execution_outcome: &ExecutionOutcome, is_value_known: OriginalValuesKnown, write_receipts_to: StorageLocation, ) -> ProviderResult<()>; diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index c1ce33978fd01..022c71f81c447 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -169,7 +169,7 @@ where // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. self.database().write_state( - Arc::unwrap_or_clone(execution_output), + &execution_output, OriginalValuesKnown::No, StorageLocation::StaticFiles, )?; @@ -273,10 +273,13 @@ mod tests { for address in addresses { let hashed_address = keccak256(address); accounts_cursor - .insert(hashed_address, Account { nonce: 1, ..Default::default() }) + .insert(hashed_address, &Account { nonce: 1, ..Default::default() }) .unwrap(); storage_cursor - .insert(hashed_address, StorageEntry { key: hashed_slot, value: U256::from(1) }) + .insert( + hashed_address, + &StorageEntry { key: hashed_slot, value: U256::from(1) }, + ) .unwrap(); } provider_rw.commit().unwrap(); @@ -496,7 +499,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); // Check plain storage state @@ -596,7 +599,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 2, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); assert_eq!( @@ -663,7 +666,7 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -811,7 +814,7 @@ mod tests { let outcome: ExecutionOutcome = ExecutionOutcome::new(bundle, Receipts::default(), 1, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider @@ -976,7 +979,7 @@ mod tests { let outcome = ExecutionOutcome::new(init_state.take_bundle(), Receipts::default(), 0, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut state = State::builder().with_bundle_update().build(); @@ -1023,7 +1026,7 @@ mod tests { let outcome = ExecutionOutcome::new(state.take_bundle(), Receipts::default(), 1, Vec::new()); provider - .write_state(outcome, OriginalValuesKnown::Yes, StorageLocation::Database) + .write_state(&outcome, OriginalValuesKnown::Yes, StorageLocation::Database) .expect("Could not write bundle state to DB"); let mut storage_changeset_cursor = provider diff --git a/crates/storage/storage-api/src/chain.rs b/crates/storage/storage-api/src/chain.rs index 6e26e2666d44d..6306f418fee0e 100644 --- a/crates/storage/storage-api/src/chain.rs +++ b/crates/storage/storage-api/src/chain.rs @@ -108,14 +108,14 @@ where // Write ommers if any if !body.ommers.is_empty() { - ommers_cursor.append(block_number, StoredBlockOmmers { ommers: body.ommers })?; + ommers_cursor.append(block_number, &StoredBlockOmmers { ommers: body.ommers })?; } // Write withdrawals if any if let Some(withdrawals) = body.withdrawals { if !withdrawals.is_empty() { withdrawals_cursor - .append(block_number, StoredBlockWithdrawals { withdrawals })?; + .append(block_number, &StoredBlockWithdrawals { withdrawals })?; } } } diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index b364e9a86f14b..4dddc5c4da61c 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -158,7 +158,7 @@ where if let Some(node) = maybe_updated { self.cursor.upsert( self.hashed_address, - StorageTrieEntry { nibbles, node: node.clone() }, + &StorageTrieEntry { nibbles, node: node.clone() }, )?; } } @@ -229,7 +229,7 @@ mod tests { cursor .upsert( key.into(), - BranchNodeCompact::new( + &BranchNodeCompact::new( 0b0000_0010_0000_0001, 0b0000_0010_0000_0001, 0, @@ -264,7 +264,7 @@ mod tests { let value = BranchNodeCompact::new(1, 1, 1, vec![B256::random()], None); cursor - .upsert(hashed_address, StorageTrieEntry { nibbles: key.clone(), node: value.clone() }) + .upsert(hashed_address, &StorageTrieEntry { nibbles: key.clone(), node: value.clone() }) .unwrap(); let mut cursor = DatabaseStorageTrieCursor::new(cursor, hashed_address); diff --git a/crates/trie/db/tests/fuzz_in_memory_nodes.rs b/crates/trie/db/tests/fuzz_in_memory_nodes.rs index e293b0caaf712..874f71bfc40b3 100644 --- a/crates/trie/db/tests/fuzz_in_memory_nodes.rs +++ b/crates/trie/db/tests/fuzz_in_memory_nodes.rs @@ -31,7 +31,7 @@ proptest! { // Insert init state into database for (hashed_address, balance) in init_state.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + hashed_account_cursor.upsert(hashed_address, &Account { balance, ..Default::default() }).unwrap(); } // Compute initial root and updates @@ -46,7 +46,7 @@ proptest! { for (hashed_address, balance) in state_update { if let Some(balance) = balance { let account = Account { balance, ..Default::default() }; - hashed_account_cursor.upsert(hashed_address, account).unwrap(); + hashed_account_cursor.upsert(hashed_address, &account).unwrap(); hashed_state.accounts.insert(hashed_address, Some(account)); state.insert(hashed_address, balance); } else { @@ -85,7 +85,7 @@ proptest! { // Insert init state into database for (hashed_slot, value) in init_storage.clone() { hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: hashed_slot, value }) + .upsert(hashed_address, &StorageEntry { key: hashed_slot, value }) .unwrap(); } @@ -102,7 +102,7 @@ proptest! { let mut hashed_storage = HashedStorage::new(is_deleted); for (hashed_slot, value) in storage_update.clone() { hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: hashed_slot, value }) + .upsert(hashed_address, &StorageEntry { key: hashed_slot, value }) .unwrap(); hashed_storage.storage.insert(hashed_slot, value); } diff --git a/crates/trie/db/tests/trie.rs b/crates/trie/db/tests/trie.rs index 45c72ffd51d67..a768bcad4205e 100644 --- a/crates/trie/db/tests/trie.rs +++ b/crates/trie/db/tests/trie.rs @@ -57,7 +57,7 @@ fn incremental_vs_full_root(inputs: &[&str], modified: &str) { let data = inputs.iter().map(|x| B256::from_str(x).unwrap()); let value = U256::from(0); for key in data { - hashed_storage_cursor.upsert(hashed_address, StorageEntry { key, value }).unwrap(); + hashed_storage_cursor.upsert(hashed_address, &StorageEntry { key, value }).unwrap(); } // Generate the intermediate nodes on the receiving end of the channel @@ -71,7 +71,7 @@ fn incremental_vs_full_root(inputs: &[&str], modified: &str) { hashed_storage_cursor.delete_current().unwrap(); } hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: modified_key, value }) + .upsert(hashed_address, &StorageEntry { key: modified_key, value }) .unwrap(); // 2. Calculate full merkle root @@ -313,7 +313,7 @@ fn storage_root_regression() { let mut hashed_storage_cursor = tx.tx_ref().cursor_dup_write::().unwrap(); for (hashed_slot, value) in storage.clone() { - hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); + hashed_storage_cursor.upsert(key3, &StorageEntry { key: hashed_slot, value }).unwrap(); } tx.commit().unwrap(); let tx = factory.provider_rw().unwrap(); @@ -349,7 +349,7 @@ fn account_and_storage_trie() { let key1 = B256::from_str("b000000000000000000000000000000000000000000000000000000000000000").unwrap(); let account1 = Account { nonce: 0, balance: U256::from(3).mul(ether), bytecode_hash: None }; - hashed_account_cursor.upsert(key1, account1).unwrap(); + hashed_account_cursor.upsert(key1, &account1).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key1), &encode_account(account1, None)); // Some address whose hash starts with 0xB040 @@ -358,7 +358,7 @@ fn account_and_storage_trie() { assert_eq!(key2[0], 0xB0); assert_eq!(key2[1], 0x40); let account2 = Account { nonce: 0, balance: ether, ..Default::default() }; - hashed_account_cursor.upsert(key2, account2).unwrap(); + hashed_account_cursor.upsert(key2, &account2).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key2), &encode_account(account2, None)); // Some address whose hash starts with 0xB041 @@ -370,7 +370,7 @@ fn account_and_storage_trie() { B256::from_str("5be74cad16203c4905c068b012a2e9fb6d19d036c410f16fd177f337541440dd").unwrap(); let account3 = Account { nonce: 0, balance: U256::from(2).mul(ether), bytecode_hash: Some(code_hash) }; - hashed_account_cursor.upsert(key3, account3).unwrap(); + hashed_account_cursor.upsert(key3, &account3).unwrap(); for (hashed_slot, value) in storage { if hashed_storage_cursor .seek_by_key_subkey(key3, hashed_slot) @@ -380,7 +380,7 @@ fn account_and_storage_trie() { { hashed_storage_cursor.delete_current().unwrap(); } - hashed_storage_cursor.upsert(key3, StorageEntry { key: hashed_slot, value }).unwrap(); + hashed_storage_cursor.upsert(key3, &StorageEntry { key: hashed_slot, value }).unwrap(); } let account3_storage_root = StorageRoot::from_tx(tx.tx_ref(), address3).root().unwrap(); hash_builder @@ -389,19 +389,19 @@ fn account_and_storage_trie() { let key4a = B256::from_str("B1A0000000000000000000000000000000000000000000000000000000000000").unwrap(); let account4a = Account { nonce: 0, balance: U256::from(4).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key4a, account4a).unwrap(); + hashed_account_cursor.upsert(key4a, &account4a).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key4a), &encode_account(account4a, None)); let key5 = B256::from_str("B310000000000000000000000000000000000000000000000000000000000000").unwrap(); let account5 = Account { nonce: 0, balance: U256::from(8).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key5, account5).unwrap(); + hashed_account_cursor.upsert(key5, &account5).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key5), &encode_account(account5, None)); let key6 = B256::from_str("B340000000000000000000000000000000000000000000000000000000000000").unwrap(); let account6 = Account { nonce: 0, balance: U256::from(1).mul(ether), ..Default::default() }; - hashed_account_cursor.upsert(key6, account6).unwrap(); + hashed_account_cursor.upsert(key6, &account6).unwrap(); hash_builder.add_leaf(Nibbles::unpack(key6), &encode_account(account6, None)); // Populate account & storage trie DB tables @@ -452,7 +452,7 @@ fn account_and_storage_trie() { let key4b = keccak256(address4b); assert_eq!(key4b.0[0], key4a.0[0]); let account4b = Account { nonce: 0, balance: U256::from(5).mul(ether), bytecode_hash: None }; - hashed_account_cursor.upsert(key4b, account4b).unwrap(); + hashed_account_cursor.upsert(key4b, &account4b).unwrap(); let mut prefix_set = PrefixSetMut::default(); prefix_set.insert(Nibbles::unpack(key4b)); @@ -649,7 +649,7 @@ proptest! { let should_generate_changeset = !state.is_empty(); let mut changes = PrefixSetMut::default(); for (hashed_address, balance) in accounts.clone() { - hashed_account_cursor.upsert(hashed_address, Account { balance, ..Default::default() }).unwrap(); + hashed_account_cursor.upsert(hashed_address, &Account { balance, ..Default::default() }).unwrap(); if should_generate_changeset { changes.insert(Nibbles::unpack(hashed_address)); } @@ -703,7 +703,9 @@ fn extension_node_storage_trie( hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), hex!("3100000000000000000000000000000000000000000000000000000000000000"), ] { - hashed_storage.upsert(hashed_address, StorageEntry { key: B256::new(key), value }).unwrap(); + hashed_storage + .upsert(hashed_address, &StorageEntry { key: B256::new(key), value }) + .unwrap(); hb.add_leaf(Nibbles::unpack(key), &alloy_rlp::encode_fixed_size(&value)); } @@ -730,7 +732,7 @@ fn extension_node_trie( hex!("30af8f0000000000000000000000000000000000000000000000000000000000"), hex!("3100000000000000000000000000000000000000000000000000000000000000"), ] { - hashed_accounts.upsert(B256::new(key), a).unwrap(); + hashed_accounts.upsert(B256::new(key), &a).unwrap(); hb.add_leaf(Nibbles::unpack(key), &val); } diff --git a/crates/trie/db/tests/walker.rs b/crates/trie/db/tests/walker.rs index 0e0b094920b1c..2194a2fadf606 100644 --- a/crates/trie/db/tests/walker.rs +++ b/crates/trie/db/tests/walker.rs @@ -38,7 +38,7 @@ fn walk_nodes_with_common_prefix() { let mut account_cursor = tx.tx_ref().cursor_write::().unwrap(); for (k, v) in &inputs { - account_cursor.upsert(k.clone().into(), v.clone()).unwrap(); + account_cursor.upsert(k.clone().into(), &v.clone()).unwrap(); } let account_trie = DatabaseAccountTrieCursor::new(account_cursor); test_cursor(account_trie, &expected); @@ -47,7 +47,10 @@ fn walk_nodes_with_common_prefix() { let mut storage_cursor = tx.tx_ref().cursor_dup_write::().unwrap(); for (k, v) in &inputs { storage_cursor - .upsert(hashed_address, StorageTrieEntry { nibbles: k.clone().into(), node: v.clone() }) + .upsert( + hashed_address, + &StorageTrieEntry { nibbles: k.clone().into(), node: v.clone() }, + ) .unwrap(); } let storage_trie = DatabaseStorageTrieCursor::new(storage_cursor, hashed_address); @@ -106,7 +109,7 @@ fn cursor_rootnode_with_changesets() { let hashed_address = B256::random(); for (k, v) in nodes { - cursor.upsert(hashed_address, StorageTrieEntry { nibbles: k.into(), node: v }).unwrap(); + cursor.upsert(hashed_address, &StorageTrieEntry { nibbles: k.into(), node: v }).unwrap(); } let mut trie = DatabaseStorageTrieCursor::new(cursor, hashed_address); diff --git a/crates/trie/db/tests/witness.rs b/crates/trie/db/tests/witness.rs index 385f6269f3945..1b760ba2d912f 100644 --- a/crates/trie/db/tests/witness.rs +++ b/crates/trie/db/tests/witness.rs @@ -114,10 +114,10 @@ fn correctly_decodes_branch_node_values() { let mut hashed_storage_cursor = provider.tx_ref().cursor_dup_write::().unwrap(); hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: hashed_slot1, value: U256::from(1) }) + .upsert(hashed_address, &StorageEntry { key: hashed_slot1, value: U256::from(1) }) .unwrap(); hashed_storage_cursor - .upsert(hashed_address, StorageEntry { key: hashed_slot2, value: U256::from(1) }) + .upsert(hashed_address, &StorageEntry { key: hashed_slot2, value: U256::from(1) }) .unwrap(); let state_root = StateRoot::from_tx(provider.tx_ref()).root().unwrap(); From 35392bd8e931e408ffeb1e58e03f25c045bef200 Mon Sep 17 00:00:00 2001 From: DevOrbitlabs Date: Wed, 8 Jan 2025 02:44:39 +0700 Subject: [PATCH 006/113] chore: make SealedBlock.header field private (#13646) Co-authored-by: Matthias Seitz --- bin/reth-bench/src/bench/new_payload_fcu.rs | 2 +- crates/blockchain-tree/src/blockchain_tree.rs | 27 ++++---- crates/blockchain-tree/src/shareable.rs | 4 +- crates/blockchain-tree/src/state.rs | 34 +++++----- crates/chain-state/src/in_memory.rs | 14 ++--- crates/consensus/beacon/src/engine/mod.rs | 12 ++-- crates/engine/tree/src/tree/mod.rs | 17 ++--- crates/ethereum/payload/src/lib.rs | 2 +- crates/evm/execution-types/src/chain.rs | 6 +- crates/exex/exex/src/manager.rs | 32 +++++----- crates/net/downloaders/src/bodies/bodies.rs | 2 +- crates/net/downloaders/src/bodies/request.rs | 2 +- crates/net/downloaders/src/test_utils/mod.rs | 2 +- crates/net/p2p/src/bodies/response.rs | 3 +- crates/node/events/src/node.rs | 12 ++-- crates/optimism/payload/src/builder.rs | 2 +- crates/primitives/src/block.rs | 12 +++- crates/rpc/rpc-eth-api/src/helpers/call.rs | 5 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 4 +- crates/rpc/rpc/src/trace.rs | 10 ++- crates/rpc/rpc/src/validation.rs | 15 +++-- crates/stages/stages/benches/setup/mod.rs | 4 +- crates/stages/stages/src/stages/bodies.rs | 2 +- .../stages/src/stages/hashing_storage.rs | 6 +- .../stages/stages/src/test_utils/test_db.rs | 2 +- .../static-file/src/static_file_producer.rs | 2 +- .../src/providers/blockchain_provider.rs | 62 ++++++++++--------- .../provider/src/providers/consistent.rs | 12 ++-- .../src/providers/database/provider.rs | 19 +++--- .../storage/provider/src/test_utils/blocks.rs | 2 +- 30 files changed, 169 insertions(+), 161 deletions(-) diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index e3d388b37ecee..2866cf8fb45c0 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -75,7 +75,7 @@ impl Command { while let Some((block, head, safe, finalized)) = receiver.recv().await { // just put gas used here let gas_used = block.gas_used; - let block_number = block.header.number; + let block_number = block.number; let versioned_hashes: Vec = block.body().blob_versioned_hashes_iter().copied().collect(); diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index de7169b1d139c..465f779e60bc6 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1199,7 +1199,8 @@ where durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChildren); // Send notification about new canonical chain and return outcome of canonicalization. - let outcome = CanonicalOutcome::Committed { head: chain_notification.tip().header.clone() }; + let outcome = + CanonicalOutcome::Committed { head: chain_notification.tip().sealed_header().clone() }; let _ = self.canon_state_notification_sender.send(chain_notification); Ok(outcome) } @@ -1434,8 +1435,8 @@ mod tests { ) { // insert genesis to db. - genesis.header.set_block_number(10); - genesis.header.set_state_root(EMPTY_ROOT_HASH); + genesis.set_block_number(10); + genesis.set_state_root(EMPTY_ROOT_HASH); let provider = factory.provider_rw().unwrap(); provider @@ -1669,7 +1670,7 @@ mod tests { assert_eq!( tree.make_canonical(fork_block.hash()).unwrap(), - CanonicalOutcome::Committed { head: fork_block.header.clone() } + CanonicalOutcome::Committed { head: fork_block.sealed_header().clone() } ); assert_eq!( @@ -1679,7 +1680,7 @@ mod tests { assert_eq!( tree.make_canonical(canonical_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_1.header.clone() } + CanonicalOutcome::Committed { head: canonical_block_1.sealed_header().clone() } ); assert_eq!( @@ -1694,12 +1695,12 @@ mod tests { assert_eq!( tree.make_canonical(sidechain_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: sidechain_block_1.header.clone() } + CanonicalOutcome::Committed { head: sidechain_block_1.sealed_header().clone() } ); assert_eq!( tree.make_canonical(canonical_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_1.header.clone() } + CanonicalOutcome::Committed { head: canonical_block_1.sealed_header().clone() } ); assert_eq!( @@ -1709,7 +1710,7 @@ mod tests { assert_eq!( tree.make_canonical(sidechain_block_2.hash()).unwrap(), - CanonicalOutcome::Committed { head: sidechain_block_2.header.clone() } + CanonicalOutcome::Committed { head: sidechain_block_2.sealed_header().clone() } ); assert_eq!( @@ -1719,7 +1720,7 @@ mod tests { assert_eq!( tree.make_canonical(canonical_block_3.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_3.header.clone() } + CanonicalOutcome::Committed { head: canonical_block_3.sealed_header().clone() } ); } @@ -1841,7 +1842,7 @@ mod tests { assert_eq!( tree.make_canonical(block2.hash()).unwrap(), - CanonicalOutcome::Committed { head: block2.header.clone() } + CanonicalOutcome::Committed { head: block2.sealed_header().clone() } ); assert_eq!( @@ -1854,7 +1855,7 @@ mod tests { assert_eq!( tree.make_canonical(block3.hash()).unwrap(), - CanonicalOutcome::Committed { head: block3.header.clone() } + CanonicalOutcome::Committed { head: block3.sealed_header().clone() } ); assert_eq!( @@ -1876,7 +1877,7 @@ mod tests { assert_eq!( tree.make_canonical(block5.hash()).unwrap(), - CanonicalOutcome::Committed { head: block5.header.clone() } + CanonicalOutcome::Committed { head: block5.sealed_header().clone() } ); let provider = tree.externals.provider_factory.provider().unwrap(); @@ -2337,7 +2338,7 @@ mod tests { assert_eq!( tree.insert_block(block2b.clone(), BlockValidationKind::Exhaustive).unwrap(), InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head: block2.header.num_hash(), + head: block2.num_hash(), missing_ancestor: block2b.parent_num_hash() }) ); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index e668f4e2dac02..6cb36cfab7cc2 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -114,7 +114,7 @@ where { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); - self.tree.read().sidechain_block_by_hash(hash).map(|b| b.header.clone()) + self.tree.read().sidechain_block_by_hash(hash).map(|b| b.sealed_header().clone()) } fn block_by_hash(&self, block_hash: BlockHash) -> Option { @@ -128,7 +128,7 @@ where } fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.read().get_buffered_block(&block_hash).map(|b| b.header.clone()) + self.tree.read().get_buffered_block(&block_hash).map(|b| b.sealed_header().clone()) } fn is_canonical(&self, hash: BlockHash) -> Result { diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs index 2d01293e20f89..762ced6bf486b 100644 --- a/crates/blockchain-tree/src/state.rs +++ b/crates/blockchain-tree/src/state.rs @@ -191,10 +191,10 @@ mod tests { let mut block1 = block.clone(); let mut block2 = block; - block1.block.header.set_hash(block1_hash); - block1.block.header.set_block_number(9); - block2.block.header.set_hash(block2_hash); - block2.block.header.set_block_number(10); + block1.block.set_hash(block1_hash); + block1.block.set_block_number(9); + block2.block.set_hash(block2_hash); + block2.block.set_block_number(10); let chain = AppendableChain::new(Chain::new( [block1, block2], @@ -257,10 +257,10 @@ mod tests { let mut block1: SealedBlockWithSenders = Default::default(); let mut block2: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(block1_hash); - block1.block.header.set_block_number(9); - block2.block.header.set_hash(block2_hash); - block2.block.header.set_block_number(10); + block1.block.set_hash(block1_hash); + block1.block.set_block_number(9); + block2.block.set_hash(block2_hash); + block2.block.set_block_number(10); // Create an chain with these blocks let chain = AppendableChain::new(Chain::new( @@ -299,10 +299,10 @@ mod tests { let mut block1: SealedBlockWithSenders = Default::default(); let mut block2: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(block1_hash); - block1.block.header.set_block_number(9); - block2.block.header.set_hash(block2_hash); - block2.block.header.set_block_number(10); + block1.block.set_hash(block1_hash); + block1.block.set_block_number(9); + block2.block.set_hash(block2_hash); + block2.block.set_block_number(10); // Create a chain with these blocks let chain = AppendableChain::new(Chain::new( @@ -337,7 +337,7 @@ mod tests { // Create a block with a random hash and add it to the buffer let block_hash = B256::random(); let mut block: SealedBlockWithSenders = Default::default(); - block.block.header.set_hash(block_hash); + block.block.set_hash(block_hash); // Add the block to the buffered blocks in the TreeState tree_state.buffered_blocks.insert_block(block.clone()); @@ -366,9 +366,9 @@ mod tests { let mut ancestor_block: SealedBlockWithSenders = Default::default(); let mut descendant_block: SealedBlockWithSenders = Default::default(); - ancestor_block.block.header.set_hash(ancestor_hash); - descendant_block.block.header.set_hash(descendant_hash); - descendant_block.block.header.set_parent_hash(ancestor_hash); + ancestor_block.block.set_hash(ancestor_hash); + descendant_block.block.set_hash(descendant_hash); + descendant_block.block.set_parent_hash(ancestor_hash); // Insert the blocks into the buffer tree_state.buffered_blocks.insert_block(ancestor_block.clone()); @@ -398,7 +398,7 @@ mod tests { let receipt2 = Receipt::default(); let mut block: SealedBlockWithSenders = Default::default(); - block.block.header.set_hash(block_hash); + block.block.set_hash(block_hash); let receipts = vec![receipt1, receipt2]; diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 06d228a8f82ee..49c20bacdc2ba 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -18,7 +18,7 @@ use reth_primitives::{ use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; -use std::{collections::BTreeMap, ops::Deref, sync::Arc, time::Instant}; +use std::{collections::BTreeMap, sync::Arc, time::Instant}; use tokio::sync::{broadcast, watch}; /// Size of the broadcast channel used to notify canonical state events. @@ -181,9 +181,9 @@ impl CanonicalInMemoryState { safe: Option>, ) -> Self { let in_memory_state = InMemoryState::new(blocks, numbers, pending); - let header = in_memory_state - .head_state() - .map_or_else(SealedHeader::default, |state| state.block_ref().block().deref().clone()); + let header = in_memory_state.head_state().map_or_else(SealedHeader::default, |state| { + state.block_ref().block().sealed_header().clone() + }); let chain_info_tracker = ChainInfoTracker::new(header, finalized, safe); let (canon_state_notification_sender, _) = broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE); @@ -229,7 +229,7 @@ impl CanonicalInMemoryState { /// Returns the header corresponding to the given hash. pub fn header_by_hash(&self, hash: B256) -> Option> { - self.state_by_hash(hash).map(|block| block.block_ref().block.header.clone()) + self.state_by_hash(hash).map(|block| block.block_ref().block.sealed_header().clone()) } /// Clears all entries in the in memory state. @@ -462,7 +462,7 @@ impl CanonicalInMemoryState { /// Returns the `SealedHeader` corresponding to the pending state. pub fn pending_sealed_header(&self) -> Option> { - self.pending_state().map(|h| h.block_ref().block().deref().clone()) + self.pending_state().map(|h| h.block_ref().block().sealed_header().clone()) } /// Returns the `Header` corresponding to the pending state. @@ -1321,7 +1321,7 @@ mod tests { assert_eq!(state.pending_header().unwrap(), block2.block().header().clone()); // Check the pending sealed header - assert_eq!(state.pending_sealed_header().unwrap(), block2.block().header.clone()); + assert_eq!(state.pending_sealed_header().unwrap(), block2.block().sealed_header().clone()); // Check the pending block with senders assert_eq!( diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 2dc139acedb40..9c1adb2bd7838 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1670,7 +1670,7 @@ where self.latest_valid_hash_for_invalid_payload(block.parent_hash)? }; // keep track of the invalid header - self.invalid_headers.insert(block.header.block_with_parent()); + self.invalid_headers.insert(block.sealed_header().block_with_parent()); PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: error.to_string() }, latest_valid_hash, @@ -1779,7 +1779,7 @@ where let (block, err) = err.split(); warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); - self.invalid_headers.insert(block.header.block_with_parent()); + self.invalid_headers.insert(block.sealed_header().block_with_parent()); } } } @@ -2485,7 +2485,7 @@ mod tests { ..Default::default() }, ); - block1.header.set_difficulty(U256::from(1)); + block1.set_difficulty(U256::from(1)); // a second pre-merge block let mut block2 = random_block( @@ -2497,7 +2497,7 @@ mod tests { ..Default::default() }, ); - block2.header.set_difficulty(U256::from(1)); + block2.set_difficulty(U256::from(1)); // a transition block let mut block3 = random_block( @@ -2509,7 +2509,7 @@ mod tests { ..Default::default() }, ); - block3.header.set_difficulty(U256::from(1)); + block3.set_difficulty(U256::from(1)); let (_static_dir, static_dir_path) = create_test_static_files_dir(); insert_blocks( @@ -2883,7 +2883,7 @@ mod tests { async fn payload_pre_merge() { let data = BlockchainTestData::default(); let mut block1 = data.blocks[0].0.block.clone(); - block1.header.set_difficulty( + block1.set_difficulty( MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), ); block1 = block1.unseal::().seal_slow(); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 27e402bb44a93..00531da7f7d21 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -936,7 +936,7 @@ where while current_canonical_number > current_number { if let Some(block) = self.executed_block_by_hash(old_hash)? { old_chain.push(block.clone()); - old_hash = block.block.header.parent_hash(); + old_hash = block.block.parent_hash(); current_canonical_number -= 1; } else { // This shouldn't happen as we're walking back the canonical chain @@ -952,7 +952,7 @@ where // a common ancestor (fork block) is reached. while old_hash != current_hash { if let Some(block) = self.executed_block_by_hash(old_hash)? { - old_hash = block.block.header.parent_hash(); + old_hash = block.block.parent_hash(); old_chain.push(block); } else { // This shouldn't happen as we're walking back the canonical chain @@ -1082,7 +1082,7 @@ where // 2. ensure we can apply a new chain update for the head block if let Some(chain_update) = self.on_new_head(state.head_block_hash)? { - let tip = chain_update.tip().header.clone(); + let tip = chain_update.tip().sealed_header().clone(); self.on_canonical_chain_update(chain_update); // update the safe and finalized blocks and ensure their values are valid @@ -1617,8 +1617,11 @@ where hash: B256, ) -> ProviderResult>> { // check memory first - let block = - self.state.tree_state.block_by_hash(hash).map(|block| block.as_ref().clone().header); + let block = self + .state + .tree_state + .block_by_hash(hash) + .map(|block| block.as_ref().sealed_header().clone()); if block.is_some() { Ok(block) @@ -2031,7 +2034,7 @@ where // update the tracked canonical head self.state.tree_state.set_canonical_head(chain_update.tip().num_hash()); - let tip = chain_update.tip().header.clone(); + let tip = chain_update.tip().sealed_header().clone(); let notification = chain_update.to_chain_notification(); // reinsert any missing reorged blocks @@ -2543,7 +2546,7 @@ where }; // keep track of the invalid header - self.state.invalid_headers.insert(block.header.block_with_parent()); + self.state.invalid_headers.insert(block.block_with_parent()); Ok(PayloadStatus::new( PayloadStatusEnum::Invalid { validation_error: validation_err.to_string() }, latest_valid_hash, diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 66ca1662b97e3..a4a02c3ef768f 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -490,7 +490,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", id=%attributes.id, sealed_block_header = ?sealed_block.header, "sealed built block"); + debug!(target: "payload_builder", id=%attributes.id, sealed_block_header = ?sealed_block.sealed_header(), "sealed built block"); // create the executed block data let executed = ExecutedBlock { diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 91badacc187c0..7e6ba2046043a 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -91,7 +91,7 @@ impl Chain { /// Returns an iterator over all headers in the block with increasing block numbers. pub fn headers(&self) -> impl Iterator> + '_ { - self.blocks.values().map(|block| block.header.clone()) + self.blocks.values().map(|block| block.sealed_header().clone()) } /// Get cached trie updates for this chain. @@ -858,8 +858,8 @@ mod tests { let mut block2 = block; // Set the hashes of block1 and block2 - block1.block.header.set_hash(block1_hash); - block2.block.header.set_hash(block2_hash); + block1.block.set_hash(block1_hash); + block2.block.set_hash(block2_hash); // Create a random receipt object, receipt1 let receipt1 = Receipt { diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 16a9305261411..32944bd2805f2 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -767,8 +767,8 @@ mod tests { // Define the notification for testing let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(B256::new([0x01; 32])); - block1.block.header.set_block_number(10); + block1.block.set_hash(B256::new([0x01; 32])); + block1.block.set_block_number(10); let notification1 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), @@ -785,8 +785,8 @@ mod tests { // Push another notification let mut block2: SealedBlockWithSenders = Default::default(); - block2.block.header.set_hash(B256::new([0x02; 32])); - block2.block.header.set_block_number(20); + block2.block.set_hash(B256::new([0x02; 32])); + block2.block.set_block_number(20); let notification2 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block2.clone()], Default::default(), Default::default())), @@ -828,8 +828,8 @@ mod tests { // Push some notifications to fill part of the buffer let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(B256::new([0x01; 32])); - block1.block.header.set_block_number(10); + block1.set_hash(B256::new([0x01; 32])); + block1.set_block_number(10); let notification1 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), @@ -1117,12 +1117,12 @@ mod tests { // Setup two blocks for the chain commit notification let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(B256::new([0x01; 32])); - block1.block.header.set_block_number(10); + block1.block.set_hash(B256::new([0x01; 32])); + block1.block.set_block_number(10); let mut block2: SealedBlockWithSenders = Default::default(); - block2.block.header.set_hash(B256::new([0x02; 32])); - block2.block.header.set_block_number(11); + block2.block.set_hash(B256::new([0x02; 32])); + block2.block.set_block_number(11); // Setup a notification let notification = ExExNotification::ChainCommitted { @@ -1170,8 +1170,8 @@ mod tests { exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.header.set_hash(B256::new([0x01; 32])); - block1.block.header.set_block_number(10); + block1.block.set_hash(B256::new([0x01; 32])); + block1.block.set_block_number(10); let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), @@ -1327,7 +1327,7 @@ mod tests { }; let (finalized_headers_tx, rx) = watch::channel(None); - finalized_headers_tx.send(Some(genesis_block.header.clone()))?; + finalized_headers_tx.send(Some(genesis_block.sealed_header().clone()))?; let finalized_header_stream = ForkChoiceStream::new(rx); let mut exex_manager = std::pin::pin!(ExExManager::new( @@ -1361,7 +1361,7 @@ mod tests { [notification.clone()] ); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(block.sealed_header().clone()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx didn't emit the `FinishedHeight` event assert_eq!( @@ -1374,7 +1374,7 @@ mod tests { .send(ExExEvent::FinishedHeight((rng.gen::(), rng.gen::()).into())) .unwrap(); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(block.sealed_header().clone()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx emitted a `FinishedHeight` event with a // non-canonical block @@ -1386,7 +1386,7 @@ mod tests { // Send a `FinishedHeight` event with a canonical block events_tx.send(ExExEvent::FinishedHeight(block.num_hash())).unwrap(); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(block.sealed_header().clone()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL is finalized assert_eq!(exex_manager.wal.iter_notifications()?.next().transpose()?, None); diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 454f6bffc562e..a7be903f23687 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -677,7 +677,7 @@ mod tests { BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..2, ..Default::default() }, ); - let headers = blocks.iter().map(|block| block.header.clone()).collect::>(); + let headers = blocks.iter().map(|block| block.sealed_header().clone()).collect::>(); let bodies = blocks .into_iter() .map(|block| (block.hash(), block.into_body())) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index 79b76f2dbf58d..f8c93a2a78e33 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -194,7 +194,7 @@ where // Body is invalid, put the header back and return an error let hash = block.hash(); let number = block.number(); - self.pending_headers.push_front(block.header); + self.pending_headers.push_front(block.into_sealed_header()); return Err(DownloadError::BodyValidation { hash, number, diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 0529b78a2b20e..2e8d5365c0dbe 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -28,7 +28,7 @@ pub(crate) fn generate_bodies( BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..2, ..Default::default() }, ); - let headers = blocks.iter().map(|block| block.header.clone()).collect(); + let headers = blocks.iter().map(|block| block.sealed_header().clone()).collect(); let bodies = blocks.into_iter().map(|block| (block.hash(), block.into_body())).collect(); (headers, bodies) diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 956057d98bffd..517c5b879835a 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -2,7 +2,6 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, U256}; use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; use reth_primitives_traits::InMemorySize; - /// The block response #[derive(PartialEq, Eq, Debug, Clone)] pub enum BlockResponse { @@ -19,7 +18,7 @@ where /// Return the reference to the response header pub const fn header(&self) -> &SealedHeader { match self { - Self::Full(block) => &block.header, + Self::Full(block) => block.sealed_header(), Self::Empty(header) => header, } } diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 129fe20ea7852..4b38b4050bb40 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -255,12 +255,12 @@ impl NodeState { hash=?block.hash(), peers=self.num_connected_peers(), txs=block.body().transactions().len(), - gas=%format_gas(block.header.gas_used()), - gas_throughput=%format_gas_throughput(block.header.gas_used(), elapsed), - full=%format!("{:.1}%", block.header.gas_used() as f64 * 100.0 / block.header.gas_limit() as f64), - base_fee=%format!("{:.2}gwei", block.header.base_fee_per_gas().unwrap_or(0) as f64 / GWEI_TO_WEI as f64), - blobs=block.header.blob_gas_used().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, - excess_blobs=block.header.excess_blob_gas().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, + gas=%format_gas(block.gas_used()), + gas_throughput=%format_gas_throughput(block.gas_used(), elapsed), + full=%format!("{:.1}%", block.gas_used() as f64 * 100.0 / block.gas_limit() as f64), + base_fee=%format!("{:.2}gwei", block.base_fee_per_gas().unwrap_or(0) as f64 / GWEI_TO_WEI as f64), + blobs=block.blob_gas_used().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, + excess_blobs=block.excess_blob_gas().unwrap_or(0) / alloy_eips::eip4844::DATA_GAS_PER_BLOB, ?elapsed, "Block added to canonical chain" ); diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index b87b01b37b2b2..7bf3f8015b711 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -434,7 +434,7 @@ where }; let sealed_block = Arc::new(block.seal_slow()); - debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header, "sealed built block"); + debug!(target: "payload_builder", id=%ctx.attributes().payload_id(), sealed_block_header = ?sealed_block.header(), "sealed built block"); // create the executed block data let executed: ExecutedBlock = ExecutedBlock { diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 3cff1646e4358..bcf5ad71abed5 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -167,7 +167,7 @@ pub struct SealedBlock { /// Locked block header. #[deref] #[deref_mut] - pub header: SealedHeader, + header: SealedHeader, /// Block body. body: B, } @@ -190,6 +190,16 @@ impl SealedBlock { &self.body } + /// Returns the Sealed header. + pub const fn sealed_header(&self) -> &SealedHeader { + &self.header + } + + /// Consumes the block and returns the sealed header. + pub fn into_sealed_header(self) -> SealedHeader { + self.header + } + /// Consumes the block and returns the header. pub fn into_header(self) -> H { self.header.unseal() diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 2aecb500dd5e9..91bedbeb532ea 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -126,10 +126,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA base_fee_params, ) } else { - base_block - .header - .next_block_base_fee(base_fee_params) - .unwrap_or_default() + base_block.next_block_base_fee(base_fee_params).unwrap_or_default() }; block_env.basefee = U256::from(base_fee); } else { diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 66c6b5a27a92f..bb4c9c5ebf58d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -496,12 +496,12 @@ pub trait Trace: db, cfg, block_env, - block.header.parent_beacon_block_root(), + block.parent_beacon_block_root(), ) .map_err(|_| EthApiError::EvmCustom("failed to apply 4788 system call".to_string()))?; system_caller - .pre_block_blockhashes_contract_call(db, cfg, block_env, block.header.parent_hash()) + .pre_block_blockhashes_contract_call(db, cfg, block_env, block.parent_hash()) .map_err(|_| { EthApiError::EvmCustom("failed to apply blockhashes system call".to_string()) })?; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 33e32aec02815..d906419021b90 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -314,7 +314,7 @@ where if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { all_traces.extend( self.extract_reward_traces( - block.header.header(), + block.header(), block.body().ommers(), base_block_reward, ) @@ -391,11 +391,9 @@ where maybe_traces.map(|traces| traces.into_iter().flatten().collect::>()); if let (Some(block), Some(traces)) = (maybe_block, maybe_traces.as_mut()) { - if let Some(base_block_reward) = - self.calculate_base_block_reward(block.header.header())? - { + if let Some(base_block_reward) = self.calculate_base_block_reward(block.header())? { traces.extend(self.extract_reward_traces( - block.block.header(), + block.header(), block.body().ommers(), base_block_reward, )); @@ -490,7 +488,7 @@ where Ok(Some(BlockOpcodeGas { block_hash: block.hash(), - block_number: block.header.number(), + block_number: block.number(), transactions, })) } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 3e65db5a2c963..1c40004f8bf39 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -102,10 +102,10 @@ where message: BidTrace, registered_gas_limit: u64, ) -> Result<(), ValidationApiError> { - self.validate_message_against_header(&block.header, &message)?; + self.validate_message_against_header(block.sealed_header(), &message)?; - self.consensus.validate_header_with_total_difficulty(&block.header, U256::MAX)?; - self.consensus.validate_header(&block.header)?; + self.consensus.validate_header_with_total_difficulty(block.sealed_header(), U256::MAX)?; + self.consensus.validate_header(block.sealed_header())?; self.consensus.validate_block_pre_execution(&block)?; if !self.disallow.is_empty() { @@ -130,15 +130,14 @@ where let latest_header = self.provider.latest_header()?.ok_or_else(|| ValidationApiError::MissingLatestBlock)?; - if latest_header.hash() != block.header.parent_hash() { + if latest_header.hash() != block.parent_hash() { return Err(ConsensusError::ParentHashMismatch( - GotExpected { got: block.header.parent_hash(), expected: latest_header.hash() } - .into(), + GotExpected { got: block.parent_hash(), expected: latest_header.hash() }.into(), ) .into()) } - self.consensus.validate_header_against_parent(&block.header, &latest_header)?; - self.validate_gas_limit(registered_gas_limit, &latest_header, &block.header)?; + self.consensus.validate_header_against_parent(block.sealed_header(), &latest_header)?; + self.validate_gas_limit(registered_gas_limit, &latest_header, block.sealed_header())?; let latest_header_hash = latest_header.hash(); let state_provider = self.provider.state_by_block_hash(latest_header_hash)?; diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index 2c1174d632926..d6bf4414450f9 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -151,7 +151,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { .unwrap(); let second_block = blocks.get_mut(1).unwrap(); let cloned_second = second_block.clone(); - let mut updated_header = cloned_second.header.clone().unseal(); + let mut updated_header = cloned_second.header().clone(); updated_header.state_root = root; *second_block = SealedBlock::new(SealedHeader::seal(updated_header), cloned_second.into_body()); @@ -185,7 +185,7 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let last_block = blocks.last_mut().unwrap(); let cloned_last = last_block.clone(); - let mut updated_header = cloned_last.header.clone().unseal(); + let mut updated_header = cloned_last.header().clone(); updated_header.state_root = root; *last_block = SealedBlock::new(SealedHeader::seal(updated_header), cloned_last.into_body()); diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index b17ad3562a09c..51941183953e8 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -582,7 +582,7 @@ mod tests { ..Default::default() }, ); - self.db.insert_headers_with_td(blocks.iter().map(|block| &block.header))?; + self.db.insert_headers_with_td(blocks.iter().map(|block| block.sealed_header()))?; if let Some(progress) = blocks.get(start as usize) { // Insert last progress data { diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index 4c9788d42e036..c9b959e2595d3 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -344,7 +344,7 @@ mod tests { BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..3, ..Default::default() }, ); - self.db.insert_headers(blocks.iter().map(|block| &block.header))?; + self.db.insert_headers(blocks.iter().map(|block| block.sealed_header()))?; let iter = blocks.iter(); let mut next_tx_num = 0; @@ -373,7 +373,7 @@ mod tests { tx, (block_number, *addr).into(), new_entry, - progress.header.number == stage_progress, + progress.number == stage_progress, )?; } @@ -392,7 +392,7 @@ mod tests { key: keccak256("mining"), value: U256::from(rng.gen::()), }, - progress.header.number == stage_progress, + progress.number == stage_progress, )?; } diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index c46757adfb246..59ba08df8aa0a 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -235,7 +235,7 @@ impl TestStageDB { .then(|| provider.latest_writer(StaticFileSegment::Headers).unwrap()); blocks.iter().try_for_each(|block| { - Self::insert_header(headers_writer.as_mut(), &tx, &block.header, U256::ZERO) + Self::insert_header(headers_writer.as_mut(), &tx, block.sealed_header(), U256::ZERO) })?; if let Some(mut writer) = headers_writer { diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index 7653d0d5af3d5..fcbbb9e3b0a39 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -292,7 +292,7 @@ mod tests { let tx = db.factory.db_ref().tx_mut().expect("init tx"); for block in &blocks { - TestStageDB::insert_header(None, &tx, &block.header, U256::ZERO) + TestStageDB::insert_header(None, &tx, block.sealed_header(), U256::ZERO) .expect("insert block header"); } tx.commit().expect("commit tx"); diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index a868afb515140..818e224d6f870 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -810,7 +810,7 @@ mod tests { }; use revm::db::BundleState; use std::{ - ops::{Bound, Range, RangeBounds}, + ops::{Bound, Deref, Range, RangeBounds}, sync::Arc, time::Instant, }; @@ -948,9 +948,9 @@ mod tests { let finalized_block = blocks.get(block_count - 3).unwrap(); // Set the canonical head, safe, and finalized blocks - provider.set_canonical_head(canonical_block.header.clone()); - provider.set_safe(safe_block.header.clone()); - provider.set_finalized(finalized_block.header.clone()); + provider.set_canonical_head(canonical_block.sealed_header().clone()); + provider.set_safe(safe_block.sealed_header().clone()); + provider.set_finalized(finalized_block.sealed_header().clone()); Ok((provider, database_blocks.clone(), in_memory_blocks.clone(), receipts)) } @@ -1357,7 +1357,7 @@ mod tests { let in_memory_block = in_memory_blocks.last().unwrap().clone(); // make sure that the finalized block is on db let finalized_block = database_blocks.get(database_blocks.len() - 3).unwrap(); - provider.set_finalized(finalized_block.header.clone()); + provider.set_finalized(finalized_block.sealed_header().clone()); let blocks = [database_blocks, in_memory_blocks].concat(); @@ -1376,7 +1376,7 @@ mod tests { blocks .iter() .take_while(|header| header.number <= 8) - .map(|b| b.header.clone()) + .map(|b| b.sealed_header().clone()) .collect::>() ); @@ -1548,38 +1548,38 @@ mod tests { let block_number = database_block.number; assert_eq!( provider.header_by_number_or_tag(block_number.into()).unwrap(), - Some(database_block.header.clone().unseal()) + Some(database_block.header().clone()) ); assert_eq!( - provider.sealed_header_by_number_or_tag(block_number.into()).unwrap(), - Some(database_block.header) + provider.sealed_header_by_number_or_tag(block_number.into())?, + Some(database_block.sealed_header().clone()) ); assert_eq!( provider.header_by_number_or_tag(BlockNumberOrTag::Latest).unwrap(), - Some(canonical_block.header.clone().unseal()) + Some(canonical_block.header().clone()) ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Latest).unwrap(), - Some(canonical_block.header) + Some(canonical_block.sealed_header().clone()) ); assert_eq!( provider.header_by_number_or_tag(BlockNumberOrTag::Safe).unwrap(), - Some(safe_block.header.clone().unseal()) + Some(safe_block.header().clone()) ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Safe).unwrap(), - Some(safe_block.header) + Some(safe_block.sealed_header().clone()) ); assert_eq!( provider.header_by_number_or_tag(BlockNumberOrTag::Finalized).unwrap(), - Some(finalized_block.header.clone().unseal()) + Some(finalized_block.header().clone()) ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Finalized).unwrap(), - Some(finalized_block.header) + Some(finalized_block.sealed_header().clone()) ); Ok(()) @@ -1603,20 +1603,20 @@ mod tests { assert_eq!( provider.header_by_id(block_number.into()).unwrap(), - Some(database_block.header.clone().unseal()) + Some(database_block.header().clone()) ); assert_eq!( provider.sealed_header_by_id(block_number.into()).unwrap(), - Some(database_block.header.clone()) + Some(database_block.sealed_header().clone()) ); assert_eq!( provider.header_by_id(block_hash.into()).unwrap(), - Some(database_block.header.clone().unseal()) + Some(database_block.header().clone()) ); assert_eq!( provider.sealed_header_by_id(block_hash.into()).unwrap(), - Some(database_block.header) + Some(database_block.sealed_header().clone()) ); let block_number = in_memory_block.number; @@ -1624,20 +1624,20 @@ mod tests { assert_eq!( provider.header_by_id(block_number.into()).unwrap(), - Some(in_memory_block.header.clone().unseal()) + Some(in_memory_block.header().clone()) ); assert_eq!( provider.sealed_header_by_id(block_number.into()).unwrap(), - Some(in_memory_block.header.clone()) + Some(in_memory_block.sealed_header().clone()) ); assert_eq!( provider.header_by_id(block_hash.into()).unwrap(), - Some(in_memory_block.header.clone().unseal()) + Some(in_memory_block.header().clone()) ); assert_eq!( provider.sealed_header_by_id(block_hash.into()).unwrap(), - Some(in_memory_block.header) + Some(in_memory_block.sealed_header().clone()) ); Ok(()) @@ -2023,7 +2023,7 @@ mod tests { ); // test state by block tag for safe block let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); - in_memory_provider.canonical_in_memory_state.set_safe(safe_block.header.clone()); + in_memory_provider.canonical_in_memory_state.set_safe(safe_block.sealed_header().clone()); assert_eq!( safe_block.hash(), in_memory_provider @@ -2033,7 +2033,9 @@ mod tests { ); // test state by block tag for finalized block let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); - in_memory_provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); + in_memory_provider + .canonical_in_memory_state + .set_finalized(finalized_block.sealed_header().clone()); assert_eq!( finalized_block.hash(), in_memory_provider @@ -2106,11 +2108,11 @@ mod tests { // Set the safe block in memory let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); - provider.canonical_in_memory_state.set_safe(safe_block.header.clone()); + provider.canonical_in_memory_state.set_safe(safe_block.sealed_header().clone()); // Set the finalized block in memory let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); - provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); + provider.canonical_in_memory_state.set_finalized(finalized_block.sealed_header().clone()); // Verify the pending block number and hash assert_eq!( @@ -2325,7 +2327,7 @@ mod tests { // instead start end test_by_block_range!([ (headers_range, |block: &SealedBlock| block.header().clone()), - (sealed_headers_range, |block: &SealedBlock| block.header.clone()), + (sealed_headers_range, |block: &SealedBlock| block.sealed_header().clone()), (block_range, |block: &SealedBlock| block.clone().unseal()), (block_with_senders_range, |block: &SealedBlock| block .clone() @@ -2458,7 +2460,7 @@ mod tests { header_by_number, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( block.number, - Some(block.header.header().clone()) + Some(block.header().clone()) ), u64::MAX ), @@ -2467,7 +2469,7 @@ mod tests { sealed_header, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( block.number, - Some(block.header.clone()) + Some(block.sealed_header().clone()) ), u64::MAX ), diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 049fd1b4b1f80..e0022a61d2f04 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -631,7 +631,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( (*block_hash).into(), |db_provider| db_provider.header(block_hash), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + |block_state| Ok(Some(block_state.block_ref().block().header().clone())), ) } @@ -639,7 +639,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( num.into(), |db_provider| db_provider.header_by_number(num), - |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), + |block_state| Ok(Some(block_state.block_ref().block().header().clone())), ) } @@ -681,7 +681,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.header().clone()), + |block_state, _| Some(block_state.block_ref().block().header().clone()), |_| true, ) } @@ -693,7 +693,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( number.into(), |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block_ref().block().header.clone())), + |block_state| Ok(Some(block_state.block_ref().block().sealed_header().clone())), ) } @@ -704,7 +704,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header.clone()), + |block_state, _| Some(block_state.block_ref().block().sealed_header().clone()), |_| true, ) } @@ -718,7 +718,7 @@ impl HeaderProvider for ConsistentProvider { range, |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), |block_state, predicate| { - let header = &block_state.block_ref().block().header; + let header = block_state.block_ref().block().sealed_header(); predicate(header).then(|| header.clone()) }, predicate, diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index c0a98f95e9731..faf35d6416bb9 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -426,12 +426,12 @@ impl< &self, block: SealedBlockWithSenders<::Block>, ) -> ProviderResult { - let ttd = if block.number == 0 { - block.difficulty + let ttd = if block.number() == 0 { + block.difficulty() } else { - let parent_block_number = block.number - 1; + let parent_block_number = block.number() - 1; let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); - parent_ttd + block.difficulty + parent_ttd + block.difficulty() }; let mut writer = self.static_file_provider.latest_writer(StaticFileSegment::Headers)?; @@ -439,14 +439,14 @@ impl< // Backfill: some tests start at a forward block number, but static files require no gaps. let segment_header = writer.user_header(); if segment_header.block_end().is_none() && segment_header.expected_block_start() == 0 { - for block_number in 0..block.number { - let mut prev = block.header.clone().unseal(); + for block_number in 0..block.number() { + let mut prev = block.sealed_header().clone().unseal(); prev.number = block_number; writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; } } - writer.append_header(block.header.as_ref(), ttd, &block.hash())?; + writer.append_header(block.header(), ttd, &block.hash())?; self.insert_block(block, StorageLocation::Database) } @@ -2769,8 +2769,7 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::InsertCanonicalHeaders); // Put header with canonical hashes. - self.tx - .put::>>(block_number, block.header.as_ref().clone())?; + self.tx.put::>>(block_number, block.header().clone())?; durations_recorder.record_relative(metrics::Action::InsertHeaders); self.tx.put::(block_number, ttd.into())?; @@ -2780,7 +2779,7 @@ impl BlockWrite if write_to.static_files() { let mut writer = self.static_file_provider.get_writer(block_number, StaticFileSegment::Headers)?; - writer.append_header(&block.header, ttd, &block.hash())?; + writer.append_header(block.header(), ttd, &block.hash())?; } self.tx.put::(block.hash(), block_number)?; diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 5ed8b09ee0b84..2f46ef5c1e70b 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -29,7 +29,7 @@ pub fn assert_genesis_block( let tx = provider; // check if all tables are empty - assert_eq!(tx.table::().unwrap(), vec![(g.number, g.header.clone().unseal())]); + assert_eq!(tx.table::().unwrap(), vec![(g.number, g.header().clone())]); assert_eq!(tx.table::().unwrap(), vec![(h, n)]); assert_eq!(tx.table::().unwrap(), vec![(n, h)]); From 7fca8ceb3fe0809fd5583f8e203001332e7d4f24 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jan 2025 21:21:05 +0100 Subject: [PATCH 007/113] feat: add standalone rayon recovery functions (#13710) --- crates/primitives-traits/src/block/body.rs | 21 +----- crates/primitives-traits/src/lib.rs | 1 + .../primitives-traits/src/transaction/mod.rs | 1 + .../src/transaction/recover.rs | 68 +++++++++++++++++++ 4 files changed, 72 insertions(+), 19 deletions(-) create mode 100644 crates/primitives-traits/src/transaction/recover.rs diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 58fe3c4b43e97..279e7d45cc8d1 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -13,9 +13,6 @@ pub trait FullBlockBody: BlockBody + MaybeSerdeBincod impl FullBlockBody for T where T: BlockBody + MaybeSerdeBincodeCompat {} -#[cfg(feature = "rayon")] -use rayon::prelude::*; - /// Abstraction for block's body. pub trait BlockBody: Send @@ -115,14 +112,7 @@ pub trait BlockBody: where Self::Transaction: SignedTransaction, { - #[cfg(feature = "rayon")] - { - self.transactions().into_par_iter().map(|tx| tx.recover_signer()).collect() - } - #[cfg(not(feature = "rayon"))] - { - self.transactions().iter().map(|tx| tx.recover_signer()).collect() - } + crate::transaction::recover::recover_signers(self.transactions()) } /// Recover signer addresses for all transactions in the block body _without ensuring that the @@ -133,14 +123,7 @@ pub trait BlockBody: where Self::Transaction: SignedTransaction, { - #[cfg(feature = "rayon")] - { - self.transactions().into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() - } - #[cfg(not(feature = "rayon"))] - { - self.transactions().iter().map(|tx| tx.recover_signer_unchecked()).collect() - } + crate::transaction::recover::recover_signers_unchecked(self.transactions()) } } diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 322ed33edae36..c5d9b710c1f0d 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -16,6 +16,7 @@ //! - `serde`: Adds serde support for all types. //! - `secp256k1`: Adds secp256k1 support for transaction signing/recovery. (By default the no-std //! friendly `k256` is used) +//! - `rayon`: Uses `rayon` for parallel transaction sender recovery in [`BlockBody`] by default. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", diff --git a/crates/primitives-traits/src/transaction/mod.rs b/crates/primitives-traits/src/transaction/mod.rs index 15b3df7fdb8b7..43fe7899d99b3 100644 --- a/crates/primitives-traits/src/transaction/mod.rs +++ b/crates/primitives-traits/src/transaction/mod.rs @@ -5,6 +5,7 @@ pub mod signature; pub mod signed; pub mod error; +pub mod recover; pub use alloy_consensus::transaction::{TransactionInfo, TransactionMeta}; diff --git a/crates/primitives-traits/src/transaction/recover.rs b/crates/primitives-traits/src/transaction/recover.rs new file mode 100644 index 0000000000000..cad57bc266079 --- /dev/null +++ b/crates/primitives-traits/src/transaction/recover.rs @@ -0,0 +1,68 @@ +//! Helpers for recovering signers from a set of transactions + +#[cfg(feature = "rayon")] +pub use rayon::*; + +#[cfg(not(feature = "rayon"))] +pub use iter::*; + +#[cfg(feature = "rayon")] +mod rayon { + use crate::SignedTransaction; + use alloc::vec::Vec; + use alloy_primitives::Address; + use rayon::prelude::{IntoParallelIterator, ParallelIterator}; + + /// Recovers a list of signers from a transaction list iterator. + /// + /// Returns `None`, if some transaction's signature is invalid + pub fn recover_signers<'a, I, T>(txes: I) -> Option> + where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, + { + txes.into_par_iter().map(|tx| tx.recover_signer()).collect() + } + + /// Recovers a list of signers from a transaction list iterator _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid. + pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Option> + where + T: SignedTransaction, + I: IntoParallelIterator + IntoIterator + Send, + { + txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } +} + +#[cfg(not(feature = "rayon"))] +mod iter { + use crate::SignedTransaction; + use alloc::vec::Vec; + use alloy_primitives::Address; + + /// Recovers a list of signers from a transaction list iterator. + /// + /// Returns `None`, if some transaction's signature is invalid + pub fn recover_signers<'a, I, T>(txes: I) -> Option> + where + T: SignedTransaction, + I: IntoIterator + IntoIterator, + { + txes.into_iter().map(|tx| tx.recover_signer()).collect() + } + + /// Recovers a list of signers from a transaction list iterator _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns `None`, if some transaction's signature is invalid. + pub fn recover_signers_unchecked<'a, I, T>(txes: I) -> Option> + where + T: SignedTransaction, + I: IntoIterator + IntoIterator, + { + txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() + } +} From 760062288e11ca0394df0c212286e7c550f6d667 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jan 2025 22:00:48 +0100 Subject: [PATCH 008/113] chore: remove rayon from reth primitives (#13711) --- Cargo.lock | 1 - crates/primitives/Cargo.toml | 1 - crates/primitives/src/transaction/mod.rs | 41 ------------------- .../provider/src/providers/static_file/jar.rs | 6 +-- .../src/providers/static_file/manager.rs | 4 +- 5 files changed, 5 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0b398215a84d0..784f629e527e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8660,7 +8660,6 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rayon", "reth-chainspec", "reth-codecs", "reth-ethereum-forks", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index e7036f1752bc1..6937ec2859abb 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -53,7 +53,6 @@ derive_more.workspace = true modular-bitfield = { workspace = true, optional = true } once_cell.workspace = true rand = { workspace = true, optional = true } -rayon.workspace = true serde.workspace = true serde_with = { workspace = true, optional = true } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 64531d29ad395..6189eb10c20c0 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -26,7 +26,6 @@ use op_alloy_consensus::DepositTransaction; #[cfg(feature = "optimism")] use op_alloy_consensus::TxDeposit; pub use pooled::PooledTransactionsElementEcRecovered; -use rayon::prelude::{IntoParallelIterator, ParallelIterator}; pub use reth_primitives_traits::{ sync::{LazyLock, OnceLock}, transaction::{ @@ -51,15 +50,6 @@ pub mod util; mod pooled; mod tx_type; -/// Expected number of transactions where we can expect a speed-up by recovering the senders in -/// parallel. -pub static PARALLEL_SENDER_RECOVERY_THRESHOLD: LazyLock = - LazyLock::new(|| match rayon::current_num_threads() { - 0..=1 => usize::MAX, - 2..=8 => 10, - _ => 5, - }); - /// A raw transaction. /// /// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). @@ -1724,37 +1714,6 @@ pub mod serde_bincode_compat { } } -/// Recovers a list of signers from a transaction list iterator. -/// -/// Returns `None`, if some transaction's signature is invalid -pub fn recover_signers<'a, I, T>(txes: I, num_txes: usize) -> Option> -where - T: SignedTransaction, - I: IntoParallelIterator + IntoIterator + Send, -{ - if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { - txes.into_iter().map(|tx| tx.recover_signer()).collect() - } else { - txes.into_par_iter().map(|tx| tx.recover_signer()).collect() - } -} - -/// Recovers a list of signers from a transaction list iterator _without ensuring that the -/// signature has a low `s` value_. -/// -/// Returns `None`, if some transaction's signature is invalid. -pub fn recover_signers_unchecked<'a, I, T>(txes: I, num_txes: usize) -> Option> -where - T: SignedTransaction, - I: IntoParallelIterator + IntoIterator + Send, -{ - if num_txes < *PARALLEL_SENDER_RECOVERY_THRESHOLD { - txes.into_iter().map(|tx| tx.recover_signer_unchecked()).collect() - } else { - txes.into_par_iter().map(|tx| tx.recover_signer_unchecked()).collect() - } -} - #[cfg(test)] mod tests { use crate::{ diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 598e726ab08e2..d4a7bbf345400 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -18,8 +18,7 @@ use reth_db::{ table::{Decompress, Value}, }; use reth_node_types::NodePrimitives; -use reth_primitives::{transaction::recover_signers, SealedHeader}; -use reth_primitives_traits::SignedTransaction; +use reth_primitives_traits::{SealedHeader, SignedTransaction}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ fmt::Debug, @@ -297,7 +296,8 @@ impl> TransactionsPr range: impl RangeBounds, ) -> ProviderResult> { let txs = self.transactions_by_tx_range(range)?; - recover_signers(&txs, txs.len()).ok_or(ProviderError::SenderRecoveryError) + reth_primitives_traits::transaction::recover::recover_signers(&txs) + .ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, num: TxNumber) -> ProviderResult> { diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index cb8be0f922fc1..e81c42284d412 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -33,7 +33,6 @@ use reth_primitives::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, - transaction::recover_signers, BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, TransactionSigned, }; @@ -1554,7 +1553,8 @@ impl> TransactionsPr range: impl RangeBounds, ) -> ProviderResult> { let txes = self.transactions_by_tx_range(range)?; - recover_signers(&txes, txes.len()).ok_or(ProviderError::SenderRecoveryError) + reth_primitives_traits::transaction::recover::recover_signers(&txes) + .ok_or(ProviderError::SenderRecoveryError) } fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { From 027f80ebb77eaf0f5800aeca36922d5214ca2caa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jan 2025 22:28:40 +0100 Subject: [PATCH 009/113] chore: remove blockchaintree dep from reth bin (#13712) --- Cargo.lock | 2 - bin/reth/Cargo.toml | 2 - .../src/commands/debug_cmd/build_block.rs | 16 +- bin/reth/src/commands/debug_cmd/mod.rs | 4 - .../src/commands/debug_cmd/replay_engine.rs | 212 ------------------ bin/reth/src/lib.rs | 4 - book/SUMMARY.md | 1 - book/cli/SUMMARY.md | 1 - book/cli/reth/debug.md | 1 - examples/rpc-db/src/main.rs | 6 +- 10 files changed, 5 insertions(+), 244 deletions(-) delete mode 100644 bin/reth/src/commands/debug_cmd/replay_engine.rs diff --git a/Cargo.lock b/Cargo.lock index 784f629e527e9..33c431f6dd946 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6401,7 +6401,6 @@ dependencies = [ "futures", "reth-basic-payload-builder", "reth-beacon-consensus", - "reth-blockchain-tree", "reth-chainspec", "reth-cli", "reth-cli-commands", @@ -6413,7 +6412,6 @@ dependencies = [ "reth-db", "reth-db-api", "reth-downloaders", - "reth-engine-util", "reth-errors", "reth-ethereum-cli", "reth-ethereum-payload-builder", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index fb86a8ced2b33..9730c18044b07 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -36,7 +36,6 @@ reth-cli-runner.workspace = true reth-cli-commands.workspace = true reth-cli-util.workspace = true reth-consensus-common.workspace = true -reth-blockchain-tree.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true reth-rpc-types-compat.workspace = true @@ -64,7 +63,6 @@ reth-node-builder.workspace = true reth-node-events.workspace = true reth-node-metrics.workspace = true reth-consensus.workspace = true -reth-engine-util.workspace = true reth-prune.workspace = true # crypto diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 55082c1c37996..1fd437697c995 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -13,9 +13,6 @@ use reth_basic_payload_builder::{ BuildArguments, BuildOutcome, Cancelled, PayloadBuilder, PayloadConfig, }; use reth_beacon_consensus::EthBeaconConsensus; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; @@ -33,7 +30,7 @@ use reth_primitives::{ TransactionSigned, }; use reth_provider::{ - providers::{BlockchainProvider, ProviderNodeTypes}, + providers::{BlockchainProvider2, ProviderNodeTypes}, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; @@ -131,21 +128,12 @@ impl> Command { let consensus: Arc> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); - - // configure blockchain tree - let tree_externals = - TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default())?; - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - // fetch the best block from the database let best_block = self .lookup_best_block(provider_factory.clone()) .wrap_err("the head block is missing")?; - let blockchain_db = - BlockchainProvider::new(provider_factory.clone(), blockchain_tree.clone())?; + let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; let blob_store = InMemoryBlobStore::default(); let validator = diff --git a/bin/reth/src/commands/debug_cmd/mod.rs b/bin/reth/src/commands/debug_cmd/mod.rs index 4aaa1b1c82ec2..26077a1274fbe 100644 --- a/bin/reth/src/commands/debug_cmd/mod.rs +++ b/bin/reth/src/commands/debug_cmd/mod.rs @@ -12,7 +12,6 @@ mod build_block; mod execution; mod in_memory_merkle; mod merkle; -mod replay_engine; /// `reth debug` command #[derive(Debug, Parser)] @@ -32,8 +31,6 @@ pub enum Subcommands { InMemoryMerkle(in_memory_merkle::Command), /// Debug block building. BuildBlock(build_block::Command), - /// Debug engine API by replaying stored messages. - ReplayEngine(replay_engine::Command), } impl> Command { @@ -49,7 +46,6 @@ impl> Command { Subcommands::Merkle(command) => command.execute::(ctx).await, Subcommands::InMemoryMerkle(command) => command.execute::(ctx).await, Subcommands::BuildBlock(command) => command.execute::(ctx).await, - Subcommands::ReplayEngine(command) => command.execute::(ctx).await, } } } diff --git a/bin/reth/src/commands/debug_cmd/replay_engine.rs b/bin/reth/src/commands/debug_cmd/replay_engine.rs deleted file mode 100644 index 80d60cfb39bea..0000000000000 --- a/bin/reth/src/commands/debug_cmd/replay_engine.rs +++ /dev/null @@ -1,212 +0,0 @@ -use crate::args::NetworkArgs; -use clap::Parser; -use eyre::Context; -use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; -use reth_beacon_consensus::{hooks::EngineHooks, BeaconConsensusEngine, EthBeaconConsensus}; -use reth_blockchain_tree::{ - BlockchainTree, BlockchainTreeConfig, ShareableBlockchainTree, TreeExternals, -}; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_cli_util::get_secret_key; -use reth_config::Config; -use reth_consensus::{ConsensusError, FullConsensus}; -use reth_db::DatabaseEnv; -use reth_engine_util::engine_store::{EngineMessageStore, StoredEngineApiMessage}; -use reth_ethereum_payload_builder::EthereumBuilderConfig; -use reth_fs_util as fs; -use reth_network::{BlockDownloaderProvider, NetworkHandle}; -use reth_network_api::NetworkInfo; -use reth_node_api::{EngineApiMessageVersion, NodePrimitives, NodeTypesWithDBAdapter}; -use reth_node_ethereum::{EthEngineTypes, EthEvmConfig, EthExecutorProvider}; -use reth_payload_builder::{PayloadBuilderHandle, PayloadBuilderService}; -use reth_primitives::EthPrimitives; -use reth_provider::{ - providers::{BlockchainProvider, ProviderNodeTypes}, - CanonStateSubscriptions, ChainSpecProvider, ProviderFactory, -}; -use reth_prune::PruneModes; -use reth_stages::Pipeline; -use reth_static_file::StaticFileProducer; -use reth_tasks::TaskExecutor; -use reth_transaction_pool::noop::NoopTransactionPool; -use std::{path::PathBuf, sync::Arc, time::Duration}; -use tokio::sync::oneshot; -use tracing::*; - -/// `reth debug replay-engine` command -/// This script will read stored engine API messages and replay them by the timestamp. -/// It does not require -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[command(flatten)] - network: NetworkArgs, - - /// The path to read engine API messages from. - #[arg(long = "engine-api-store", value_name = "PATH")] - engine_api_store: PathBuf, - - /// The number of milliseconds between Engine API messages. - #[arg(long = "interval", default_value_t = 1_000)] - interval: u64, -} - -impl> Command { - async fn build_network< - N: ProviderNodeTypes< - ChainSpec = C::ChainSpec, - Primitives: NodePrimitives< - Block = reth_primitives::Block, - Receipt = reth_primitives::Receipt, - BlockHeader = reth_primitives::Header, - >, - >, - >( - &self, - config: &Config, - task_executor: TaskExecutor, - provider_factory: ProviderFactory, - network_secret_path: PathBuf, - default_peers_path: PathBuf, - ) -> eyre::Result { - let secret_key = get_secret_key(&network_secret_path)?; - let network = self - .network - .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) - .with_task_executor(Box::new(task_executor)) - .build(provider_factory) - .start_network() - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - Ok(network) - } - - /// Execute `debug replay-engine` command - pub async fn execute< - N: CliNodeTypes, - >( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = - self.env.init::(AccessRights::RW)?; - - let consensus: Arc> = - Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - - let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()); - - // Configure blockchain tree - let tree_externals = - TreeExternals::new(provider_factory.clone(), Arc::clone(&consensus), executor); - let tree = BlockchainTree::new(tree_externals, BlockchainTreeConfig::default())?; - let blockchain_tree = Arc::new(ShareableBlockchainTree::new(tree)); - - // Set up the blockchain provider - let blockchain_db = BlockchainProvider::new(provider_factory.clone(), blockchain_tree)?; - - // Set up network - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let network = self - .build_network( - &config, - ctx.task_executor.clone(), - provider_factory.clone(), - network_secret_path, - data_dir.known_peers(), - ) - .await?; - - // Set up payload builder - let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( - EthEvmConfig::new(provider_factory.chain_spec()), - EthereumBuilderConfig::new(Default::default()), - ); - - let payload_generator = BasicPayloadJobGenerator::with_builder( - blockchain_db.clone(), - NoopTransactionPool::default(), - ctx.task_executor.clone(), - BasicPayloadJobGeneratorConfig::default(), - payload_builder, - ); - - let (payload_service, payload_builder): (_, PayloadBuilderHandle) = - PayloadBuilderService::new(payload_generator, blockchain_db.canonical_state_stream()); - - ctx.task_executor.spawn_critical("payload builder service", payload_service); - - // Configure the consensus engine - let network_client = network.fetch_client().await?; - let (beacon_consensus_engine, beacon_engine_handle) = BeaconConsensusEngine::new( - network_client, - Pipeline::>>::builder().build( - provider_factory.clone(), - StaticFileProducer::new(provider_factory.clone(), PruneModes::none()), - ), - blockchain_db.clone(), - Box::new(ctx.task_executor.clone()), - Box::new(network), - None, - payload_builder, - None, - u64::MAX, - EngineHooks::new(), - )?; - info!(target: "reth::cli", "Consensus engine initialized"); - - // Run consensus engine to completion - let (tx, rx) = oneshot::channel(); - info!(target: "reth::cli", "Starting consensus engine"); - ctx.task_executor.spawn_critical_blocking("consensus engine", async move { - let res = beacon_consensus_engine.await; - let _ = tx.send(res); - }); - - let engine_api_store = EngineMessageStore::new(self.engine_api_store.clone()); - for filepath in engine_api_store.engine_messages_iter()? { - let contents = - fs::read(&filepath).wrap_err(format!("failed to read: {}", filepath.display()))?; - let message = serde_json::from_slice(&contents) - .wrap_err(format!("failed to parse: {}", filepath.display()))?; - debug!(target: "reth::cli", filepath = %filepath.display(), ?message, "Forwarding Engine API message"); - match message { - StoredEngineApiMessage::ForkchoiceUpdated { state, payload_attrs } => { - let response = beacon_engine_handle - .fork_choice_updated( - state, - payload_attrs, - EngineApiMessageVersion::default(), - ) - .await?; - debug!(target: "reth::cli", ?response, "Received for forkchoice updated"); - } - StoredEngineApiMessage::NewPayload { payload, sidecar } => { - let response = beacon_engine_handle.new_payload(payload, sidecar).await?; - debug!(target: "reth::cli", ?response, "Received for new payload"); - } - }; - - // Pause before next message - tokio::time::sleep(Duration::from_millis(self.interval)).await; - } - - info!(target: "reth::cli", "Finished replaying engine API messages"); - - match rx.await? { - Ok(()) => info!("Beacon consensus engine exited successfully"), - Err(error) => { - error!(target: "reth::cli", %error, "Beacon consensus engine exited with an error") - } - }; - - Ok(()) - } -} diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 53c592063eca6..1c55669bec919 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -107,10 +107,6 @@ pub mod primitives { pub mod beacon_consensus { pub use reth_beacon_consensus::*; } -/// Re-exported from `reth_blockchain_tree`. -pub mod blockchain_tree { - pub use reth_blockchain_tree::*; -} /// Re-exported from `reth_consensus`. pub mod consensus { diff --git a/book/SUMMARY.md b/book/SUMMARY.md index f93daeaba3979..666f4e4ca5593 100644 --- a/book/SUMMARY.md +++ b/book/SUMMARY.md @@ -70,7 +70,6 @@ - [`reth debug merkle`](./cli/reth/debug/merkle.md) - [`reth debug in-memory-merkle`](./cli/reth/debug/in-memory-merkle.md) - [`reth debug build-block`](./cli/reth/debug/build-block.md) - - [`reth debug replay-engine`](./cli/reth/debug/replay-engine.md) - [`reth recover`](./cli/reth/recover.md) - [`reth recover storage-tries`](./cli/reth/recover/storage-tries.md) - [`reth prune`](./cli/reth/prune.md) diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md index 5f338a0d1ec72..6a18ff0cdfe21 100644 --- a/book/cli/SUMMARY.md +++ b/book/cli/SUMMARY.md @@ -40,7 +40,6 @@ - [`reth debug merkle`](./reth/debug/merkle.md) - [`reth debug in-memory-merkle`](./reth/debug/in-memory-merkle.md) - [`reth debug build-block`](./reth/debug/build-block.md) - - [`reth debug replay-engine`](./reth/debug/replay-engine.md) - [`reth recover`](./reth/recover.md) - [`reth recover storage-tries`](./reth/recover/storage-tries.md) - [`reth prune`](./reth/prune.md) diff --git a/book/cli/reth/debug.md b/book/cli/reth/debug.md index ab016d631d61d..c3f98e1452edc 100644 --- a/book/cli/reth/debug.md +++ b/book/cli/reth/debug.md @@ -13,7 +13,6 @@ Commands: merkle Debug the clean & incremental state root calculations in-memory-merkle Debug in-memory state root calculation build-block Debug block building - replay-engine Debug engine API by replaying stored messages help Print this message or the help of the given subcommand(s) Options: diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index cde891036e6a1..1af1d3e5e5ffb 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -18,7 +18,7 @@ use reth::{ api::NodeTypesWithDBAdapter, beacon_consensus::EthBeaconConsensus, providers::{ - providers::{BlockchainProvider, StaticFileProvider}, + providers::{BlockchainProvider2, StaticFileProvider}, ProviderFactory, }, rpc::eth::EthApi, @@ -33,7 +33,7 @@ use reth::rpc::builder::{ }; // Configuring the network parts, ideally also wouldn't need to think about this. use myrpc_ext::{MyRpcExt, MyRpcExtApiServer}; -use reth::{blockchain_tree::noop::NoopBlockchainTree, tasks::TokioTaskExecutor}; +use reth::tasks::TokioTaskExecutor; use reth_node_ethereum::{ node::EthereumEngineValidator, EthEvmConfig, EthExecutorProvider, EthereumNode, }; @@ -61,7 +61,7 @@ async fn main() -> eyre::Result<()> { // 2. Setup the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the // disk and don't handle new blocks/live sync etc, which is done by the blockchain tree. - let provider = BlockchainProvider::new(factory, Arc::new(NoopBlockchainTree::default()))?; + let provider = BlockchainProvider2::new(factory)?; let rpc_builder = RpcModuleBuilder::default() .with_provider(provider.clone()) From 818eb7d4085ecf2dc193639a48b2e5ee5995e275 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jan 2025 22:36:33 +0100 Subject: [PATCH 010/113] chore: move beacon handle type (#13714) --- crates/consensus/beacon/src/engine/error.rs | 37 +------- crates/consensus/beacon/src/engine/handle.rs | 93 +------------------- crates/engine/primitives/src/error.rs | 26 ++++++ crates/engine/primitives/src/lib.rs | 4 +- crates/engine/primitives/src/message.rs | 88 +++++++++++++++++- 5 files changed, 115 insertions(+), 133 deletions(-) diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs index 0eef90ea7e975..6fabfbf031b13 100644 --- a/crates/consensus/beacon/src/engine/error.rs +++ b/crates/consensus/beacon/src/engine/error.rs @@ -1,5 +1,5 @@ use crate::engine::hooks::EngineHookError; -use alloy_rpc_types_engine::ForkchoiceUpdateError; +pub use reth_engine_primitives::BeaconForkChoiceUpdateError; use reth_errors::{DatabaseError, RethError}; use reth_stages_api::PipelineError; @@ -42,38 +42,3 @@ impl From for BeaconConsensusEngineError { Self::Common(e.into()) } } - -/// Represents error cases for an applied forkchoice update. -/// -/// This represents all possible error cases, that must be returned as JSON RPC errors back to the -/// beacon node. -#[derive(Debug, thiserror::Error)] -pub enum BeaconForkChoiceUpdateError { - /// Thrown when a forkchoice update resulted in an error. - #[error("forkchoice update error: {0}")] - ForkchoiceUpdateError(#[from] ForkchoiceUpdateError), - /// Thrown when the engine task is unavailable/stopped. - #[error("beacon consensus engine task stopped")] - EngineUnavailable, - /// An internal error occurred, not necessarily related to the update. - #[error(transparent)] - Internal(Box), -} - -impl BeaconForkChoiceUpdateError { - /// Create a new internal error. - pub fn internal(e: E) -> Self { - Self::Internal(Box::new(e)) - } -} - -impl From for BeaconForkChoiceUpdateError { - fn from(e: RethError) -> Self { - Self::internal(e) - } -} -impl From for BeaconForkChoiceUpdateError { - fn from(e: DatabaseError) -> Self { - Self::internal(e) - } -} diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 7d6dd3cff3178..e4f291c06451a 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -1,94 +1,3 @@ //! `BeaconConsensusEngine` external API -use crate::BeaconForkChoiceUpdateError; -use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, -}; -use futures::TryFutureExt; -use reth_engine_primitives::{ - BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - OnForkChoiceUpdated, -}; -use reth_errors::RethResult; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; - -/// A _shareable_ beacon consensus frontend type. Used to interact with the spawned beacon consensus -/// engine task. -/// -/// See also `BeaconConsensusEngine` -#[derive(Debug, Clone)] -pub struct BeaconConsensusEngineHandle -where - Engine: EngineTypes, -{ - pub(crate) to_engine: UnboundedSender>, -} - -// === impl BeaconConsensusEngineHandle === - -impl BeaconConsensusEngineHandle -where - Engine: EngineTypes, -{ - /// Creates a new beacon consensus engine handle. - pub const fn new(to_engine: UnboundedSender>) -> Self { - Self { to_engine } - } - - /// Sends a new payload message to the beacon consensus engine and waits for a response. - /// - /// See also - pub async fn new_payload( - &self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); - rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? - } - - /// Sends a forkchoice update message to the beacon consensus engine and waits for a response. - /// - /// See also - pub async fn fork_choice_updated( - &self, - state: ForkchoiceState, - payload_attrs: Option, - version: EngineApiMessageVersion, - ) -> Result { - Ok(self - .send_fork_choice_updated(state, payload_attrs, version) - .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) - .await?? - .await?) - } - - /// Sends a forkchoice update message to the beacon consensus engine and returns the receiver to - /// wait for a response. - fn send_fork_choice_updated( - &self, - state: ForkchoiceState, - payload_attrs: Option, - version: EngineApiMessageVersion, - ) -> oneshot::Receiver> { - let (tx, rx) = oneshot::channel(); - let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - version, - }); - rx - } - - /// Sends a transition configuration exchange message to the beacon consensus engine. - /// - /// See also - /// - /// This only notifies about the exchange. The actual exchange is done by the engine API impl - /// itself. - pub fn transition_configuration_exchanged(&self) { - let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); - } -} +pub use reth_engine_primitives::BeaconConsensusEngineHandle; diff --git a/crates/engine/primitives/src/error.rs b/crates/engine/primitives/src/error.rs index b7deb607bcf9d..18e72fe83e72c 100644 --- a/crates/engine/primitives/src/error.rs +++ b/crates/engine/primitives/src/error.rs @@ -1,3 +1,5 @@ +use alloy_rpc_types_engine::ForkchoiceUpdateError; + /// Represents all error cases when handling a new payload. /// /// This represents all possible error cases that must be returned as JSON RCP errors back to the @@ -18,3 +20,27 @@ impl BeaconOnNewPayloadError { Self::Internal(Box::new(e)) } } + +/// Represents error cases for an applied forkchoice update. +/// +/// This represents all possible error cases, that must be returned as JSON RPC errors back to the +/// beacon node. +#[derive(Debug, thiserror::Error)] +pub enum BeaconForkChoiceUpdateError { + /// Thrown when a forkchoice update resulted in an error. + #[error("forkchoice update error: {0}")] + ForkchoiceUpdateError(#[from] ForkchoiceUpdateError), + /// Thrown when the engine task is unavailable/stopped. + #[error("beacon consensus engine task stopped")] + EngineUnavailable, + /// An internal error occurred, not necessarily related to the update. + #[error(transparent)] + Internal(Box), +} + +impl BeaconForkChoiceUpdateError { + /// Create a new internal error. + pub fn internal(e: E) -> Self { + Self::Internal(Box::new(e)) + } +} diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 9921023c4a1dc..f7877257c11bb 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -14,13 +14,13 @@ use core::fmt; use alloy_consensus::BlockHeader; use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; -pub use error::BeaconOnNewPayloadError; +pub use error::*; mod forkchoice; pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus}; mod message; -pub use message::{BeaconEngineMessage, OnForkChoiceUpdated}; +pub use message::*; mod invalid_block_hook; pub use invalid_block_hook::InvalidBlockHook; diff --git a/crates/engine/primitives/src/message.rs b/crates/engine/primitives/src/message.rs index d8a4c1322ad08..6e4f4629276b4 100644 --- a/crates/engine/primitives/src/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -1,9 +1,12 @@ -use crate::{BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, ForkchoiceStatus}; +use crate::{ + error::BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, EngineApiMessageVersion, + EngineTypes, ForkchoiceStatus, +}; use alloy_rpc_types_engine::{ ExecutionPayload, ExecutionPayloadSidecar, ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, PayloadStatus, PayloadStatusEnum, }; -use futures::{future::Either, FutureExt}; +use futures::{future::Either, FutureExt, TryFutureExt}; use reth_errors::RethResult; use reth_payload_builder_primitives::PayloadBuilderError; use std::{ @@ -12,7 +15,7 @@ use std::{ pin::Pin, task::{ready, Context, Poll}, }; -use tokio::sync::oneshot; +use tokio::sync::{mpsc::UnboundedSender, oneshot}; /// Represents the outcome of forkchoice update. /// @@ -191,3 +194,82 @@ impl Display for BeaconEngineMessage { } } } + +/// A clonable sender type that can be used to send engine API messages. +/// +/// This type mirrors consensus related functions of the engine API. +#[derive(Debug, Clone)] +pub struct BeaconConsensusEngineHandle +where + Engine: EngineTypes, +{ + to_engine: UnboundedSender>, +} + +impl BeaconConsensusEngineHandle +where + Engine: EngineTypes, +{ + /// Creates a new beacon consensus engine handle. + pub const fn new(to_engine: UnboundedSender>) -> Self { + Self { to_engine } + } + + /// Sends a new payload message to the beacon consensus engine and waits for a response. + /// + /// See also + pub async fn new_payload( + &self, + payload: ExecutionPayload, + sidecar: ExecutionPayloadSidecar, + ) -> Result { + let (tx, rx) = oneshot::channel(); + let _ = self.to_engine.send(BeaconEngineMessage::NewPayload { payload, sidecar, tx }); + rx.await.map_err(|_| BeaconOnNewPayloadError::EngineUnavailable)? + } + + /// Sends a forkchoice update message to the beacon consensus engine and waits for a response. + /// + /// See also + pub async fn fork_choice_updated( + &self, + state: ForkchoiceState, + payload_attrs: Option, + version: EngineApiMessageVersion, + ) -> Result { + Ok(self + .send_fork_choice_updated(state, payload_attrs, version) + .map_err(|_| BeaconForkChoiceUpdateError::EngineUnavailable) + .await? + .map_err(BeaconForkChoiceUpdateError::internal)? + .await?) + } + + /// Sends a forkchoice update message to the beacon consensus engine and returns the receiver to + /// wait for a response. + fn send_fork_choice_updated( + &self, + state: ForkchoiceState, + payload_attrs: Option, + version: EngineApiMessageVersion, + ) -> oneshot::Receiver> { + let (tx, rx) = oneshot::channel(); + let _ = self.to_engine.send(BeaconEngineMessage::ForkchoiceUpdated { + state, + payload_attrs, + tx, + version, + }); + rx + } + + /// Sends a transition configuration exchange message to the beacon consensus engine. + /// + /// See also + /// + /// This only notifies about the exchange. The actual exchange is done by the engine API impl + /// itself. + pub fn transition_configuration_exchanged(&self) { + let _ = self.to_engine.send(BeaconEngineMessage::TransitionConfigurationExchanged); + } +} From ccaf9da732512d26a113e39a23a30e3510627d92 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jan 2025 23:18:10 +0100 Subject: [PATCH 011/113] chore: relax arb for sealedwithsenders (#13715) --- crates/primitives/src/block.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index bcf5ad71abed5..2891d1285faa7 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -503,17 +503,22 @@ impl SealedBlockWithSenders { } #[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for SealedBlockWithSenders { +impl<'a, B> arbitrary::Arbitrary<'a> for SealedBlockWithSenders +where + B: reth_primitives_traits::Block + arbitrary::Arbitrary<'a>, +{ fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let block: SealedBlock = SealedBlock::arbitrary(u)?; + let block = B::arbitrary(u)?; let senders = block - .body - .transactions + .body() + .transactions() .iter() .map(|tx| tx.recover_signer().unwrap()) .collect::>(); + let (header, body) = block.split(); + let block = SealedBlock::new(SealedHeader::seal(header), body); Ok(Self { block, senders }) } } From fb6902880c42eaf6e1ff80a6ce04050a5244d514 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 7 Jan 2025 23:31:04 +0100 Subject: [PATCH 012/113] chore: rm beacon consensus deps (#13716) --- Cargo.lock | 8 +++----- crates/cli/commands/Cargo.toml | 2 +- crates/cli/commands/src/import.rs | 2 +- crates/cli/commands/src/stage/run.rs | 2 +- crates/engine/tree/Cargo.toml | 1 + crates/engine/tree/src/tree/mod.rs | 2 +- crates/ethereum/engine-primitives/src/lib.rs | 3 +-- crates/ethereum/node/Cargo.toml | 2 +- crates/ethereum/node/src/node.rs | 7 +++---- crates/node/api/Cargo.toml | 1 - crates/node/api/src/node.rs | 2 +- crates/rpc/rpc-builder/Cargo.toml | 1 - crates/rpc/rpc-builder/tests/it/utils.rs | 2 +- crates/rpc/rpc-engine-api/Cargo.toml | 1 - crates/rpc/rpc-engine-api/src/engine_api.rs | 3 +-- crates/rpc/rpc-engine-api/src/error.rs | 3 +-- 16 files changed, 17 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 33c431f6dd946..c6fd91d3892c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6710,7 +6710,6 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "ratatui", - "reth-beacon-consensus", "reth-chainspec", "reth-cli", "reth-cli-runner", @@ -6726,6 +6725,7 @@ dependencies = [ "reth-ecies", "reth-eth-wire", "reth-ethereum-cli", + "reth-ethereum-consensus", "reth-evm", "reth-exex", "reth-fs-util", @@ -7299,6 +7299,7 @@ dependencies = [ "reth-db", "reth-engine-primitives", "reth-errors", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-evm", "reth-exex-types", @@ -8014,7 +8015,6 @@ version = "1.1.5" dependencies = [ "alloy-rpc-types-engine", "eyre", - "reth-beacon-consensus", "reth-consensus", "reth-db-api", "reth-engine-primitives", @@ -8161,11 +8161,11 @@ dependencies = [ "futures", "rand 0.8.5", "reth-basic-payload-builder", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-db", "reth-e2e-test-utils", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-ethereum-payload-builder", "reth-evm", @@ -8956,7 +8956,6 @@ dependencies = [ "jsonrpsee", "metrics", "pin-project", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-engine-primitives", @@ -9006,7 +9005,6 @@ dependencies = [ "jsonrpsee-types", "metrics", "parking_lot", - "reth-beacon-consensus", "reth-chainspec", "reth-engine-primitives", "reth-ethereum-engine-primitives", diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 2220efda5c6eb..c5f6cef0d9085 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -11,7 +11,7 @@ repository.workspace = true [dependencies] # reth -reth-beacon-consensus.workspace = true +reth-ethereum-consensus.workspace = true reth-chainspec.workspace = true reth-cli.workspace = true reth-ethereum-cli.workspace = true diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index a73322a903f4d..7f5fd2d2f1b38 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -3,7 +3,6 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_primitives::B256; use clap::Parser; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_config::Config; @@ -15,6 +14,7 @@ use reth_downloaders::{ file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; +use reth_ethereum_consensus::EthBeaconConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{ bodies::downloader::BodyDownloader, diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 1a4783b9d487d..c7655f0acb750 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -6,7 +6,6 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::Sealable; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; @@ -18,6 +17,7 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_eth_wire::NetPrimitivesFor; +use reth_ethereum_consensus::EthBeaconConsensus; use reth_evm::execute::BlockExecutorProvider; use reth_exex::ExExManagerHandle; use reth_network::BlockDownloaderProvider; diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 0d2284e39b3e6..572f0955045b9 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -73,6 +73,7 @@ reth-chain-state = { workspace = true, features = ["test-utils"] } reth-chainspec.workspace = true reth-db = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true +reth-ethereum-consensus.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 00531da7f7d21..7726ba4897a5e 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2785,10 +2785,10 @@ mod tests { use alloy_rlp::Decodable; use alloy_rpc_types_engine::{CancunPayloadFields, ExecutionPayloadSidecar}; use assert_matches::assert_matches; - use reth_beacon_consensus::EthBeaconConsensus; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; use reth_engine_primitives::ForkchoiceStatus; + use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; use reth_primitives::{Block, BlockExt, EthPrimitives}; diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index d1301882c638c..350780d0bdad3 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -9,8 +9,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod payload; -use std::sync::Arc; - use alloy_rpc_types_engine::{ExecutionPayload, ExecutionPayloadSidecar, PayloadError}; pub use alloy_rpc_types_engine::{ ExecutionPayloadEnvelopeV2, ExecutionPayloadEnvelopeV3, ExecutionPayloadEnvelopeV4, @@ -26,6 +24,7 @@ use reth_payload_primitives::{ use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{Block, NodePrimitives, SealedBlock, SealedBlockFor}; use reth_rpc_types_compat::engine::payload::block_to_payload; +use std::sync::Arc; /// The types used in the default mainnet ethereum beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index e532b844a529e..4f4f7b0f82e02 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -16,6 +16,7 @@ reth-payload-builder.workspace = true reth-ethereum-engine-primitives.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true +reth-ethereum-consensus.workspace = true reth-node-builder.workspace = true reth-tracing.workspace = true reth-provider.workspace = true @@ -24,7 +25,6 @@ reth-network.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true reth-consensus.workspace = true -reth-beacon-consensus.workspace = true reth-rpc.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index f37e08ef553f1..5c5879983e56f 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -1,9 +1,11 @@ //! Ethereum Node types config. +pub use crate::payload::EthereumPayloadBuilder; use crate::{EthEngineTypes, EthEvmConfig}; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_consensus::{ConsensusError, FullConsensus}; +use reth_ethereum_consensus::EthBeaconConsensus; +pub use reth_ethereum_engine_primitives::EthereumEngineValidator; use reth_ethereum_engine_primitives::{ EthBuiltPayload, EthPayloadAttributes, EthPayloadBuilderAttributes, }; @@ -30,9 +32,6 @@ use reth_transaction_pool::{ use reth_trie_db::MerklePatriciaTrie; use std::sync::Arc; -pub use crate::payload::EthereumPayloadBuilder; -pub use reth_ethereum_engine_primitives::EthereumEngineValidator; - /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] #[non_exhaustive] diff --git a/crates/node/api/Cargo.toml b/crates/node/api/Cargo.toml index d952a8abfaae0..fa323108dc666 100644 --- a/crates/node/api/Cargo.toml +++ b/crates/node/api/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth reth-db-api.workspace = true -reth-beacon-consensus.workspace = true reth-consensus.workspace = true reth-evm.workspace = true reth-provider.workspace = true diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 66c131581892c..498297c2db8b2 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -2,12 +2,12 @@ use crate::ConfigureEvm; use alloy_rpc_types_engine::JwtSecret; -use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_consensus::{ConsensusError, FullConsensus}; use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, Database, }; +use reth_engine_primitives::BeaconConsensusEngineHandle; use reth_evm::execute::BlockExecutorProvider; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 7dbbe7608a72b..957e2e33cf1ed 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -51,7 +51,6 @@ tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } [dev-dependencies] reth-chainspec.workspace = true -reth-beacon-consensus.workspace = true reth-network-api.workspace = true reth-network-peers.workspace = true reth-evm-ethereum.workspace = true diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index d9d8a9e45a774..dab7715033c97 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -4,9 +4,9 @@ use std::{ }; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; -use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::MAINNET; use reth_consensus::noop::NoopConsensus; +use reth_engine_primitives::BeaconConsensusEngineHandle; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::{execute::EthExecutionStrategyFactory, EthEvmConfig}; diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index ae3fc490d5c78..9bf9ae70710b4 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -17,7 +17,6 @@ reth-chainspec.workspace = true reth-primitives.workspace = true reth-rpc-api.workspace = true reth-storage-api.workspace = true -reth-beacon-consensus.workspace = true reth-payload-builder.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index db27d8a1e35dd..9e1f68072849c 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -16,9 +16,8 @@ use alloy_rpc_types_engine::{ use async_trait::async_trait; use jsonrpsee_core::RpcResult; use parking_lot::Mutex; -use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::{EthereumHardforks, Hardforks}; -use reth_engine_primitives::{EngineTypes, EngineValidator}; +use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes, EngineValidator}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, diff --git a/crates/rpc/rpc-engine-api/src/error.rs b/crates/rpc/rpc-engine-api/src/error.rs index 4210d415bfed2..f5c12f80053fc 100644 --- a/crates/rpc/rpc-engine-api/src/error.rs +++ b/crates/rpc/rpc-engine-api/src/error.rs @@ -2,8 +2,7 @@ use alloy_primitives::{B256, U256}; use jsonrpsee_types::error::{ INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE, INVALID_PARAMS_MSG, SERVER_ERROR_MSG, }; -use reth_beacon_consensus::BeaconForkChoiceUpdateError; -use reth_engine_primitives::BeaconOnNewPayloadError; +use reth_engine_primitives::{BeaconForkChoiceUpdateError, BeaconOnNewPayloadError}; use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::EngineObjectValidationError; use thiserror::Error; From 93667e082054c028264023c8250858cbeb2e4bd3 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 8 Jan 2025 00:51:58 +0100 Subject: [PATCH 013/113] feat(no-std): add `no_std` support for `reth-optimism-consensus` (#13692) --- crates/optimism/consensus/Cargo.toml | 15 ++++++ crates/optimism/consensus/src/lib.rs | 57 +++++++++------------ crates/optimism/consensus/src/proof.rs | 1 + crates/optimism/consensus/src/validation.rs | 1 + crates/optimism/evm/Cargo.toml | 3 +- 5 files changed, 44 insertions(+), 33 deletions(-) diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 23f5206c6c035..024bb957f8153 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -38,4 +38,19 @@ op-alloy-consensus.workspace = true reth-optimism-chainspec.workspace = true [features] +default = ["std"] +std = [ + "reth-chainspec/std", + "reth-consensus/std", + "reth-consensus-common/std", + "reth-primitives/std", + "reth-optimism-forks/std", + "reth-optimism-chainspec/std", + "reth-optimism-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "alloy-trie/std", + "op-alloy-consensus/std", +] optimism = ["reth-primitives/optimism", "reth-optimism-primitives/optimism"] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 7d54b8a049bb2..cedc8c462929f 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -6,9 +6,13 @@ issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![cfg_attr(not(feature = "std"), no_std)] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] +extern crate alloc; + +use alloc::sync::Arc; use alloy_consensus::{BlockHeader, Header, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{B64, U256}; @@ -26,7 +30,6 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpBlock, OpBlockBody, OpPrimitives, OpReceipt}; use reth_primitives::{BlockWithSenders, GotExpected, SealedBlockFor, SealedHeader}; -use std::{sync::Arc, time::SystemTime}; mod proof; pub use proof::calculate_receipt_root_no_memo_optimism; @@ -157,42 +160,32 @@ impl HeaderValidator for OpBeaconConsensus { _total_difficulty: U256, ) -> Result<(), ConsensusError> { // with OP-stack Bedrock activation number determines when TTD (eth Merge) has been reached. - let is_post_merge = self.chain_spec.is_bedrock_active_at_block(header.number); + debug_assert!( + self.chain_spec.is_bedrock_active_at_block(header.number), + "manually import OVM blocks" + ); - if is_post_merge { - if header.nonce != B64::ZERO { - return Err(ConsensusError::TheMergeNonceIsNotZero) - } + if header.nonce != B64::ZERO { + return Err(ConsensusError::TheMergeNonceIsNotZero) + } - if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { - return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) - } + if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { + return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) + } - // Post-merge, the consensus layer is expected to perform checks such that the block - // timestamp is a function of the slot. This is different from pre-merge, where blocks - // are only allowed to be in the future (compared to the system's clock) by a certain - // threshold. - // - // Block validation with respect to the parent should ensure that the block timestamp - // is greater than its parent timestamp. + // Post-merge, the consensus layer is expected to perform checks such that the block + // timestamp is a function of the slot. This is different from pre-merge, where blocks + // are only allowed to be in the future (compared to the system's clock) by a certain + // threshold. + // + // Block validation with respect to the parent should ensure that the block timestamp + // is greater than its parent timestamp. - // validate header extra data for all networks post merge - validate_header_extra_data(header)?; + // validate header extra data for all networks post merge + validate_header_extra_data(header)?; - // mixHash is used instead of difficulty inside EVM - // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty - } else { - // Check if timestamp is in the future. Clock can drift but this can be consensus issue. - let present_timestamp = - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(); - - if header.exceeds_allowed_future_timestamp(present_timestamp) { - return Err(ConsensusError::TimestampIsInFuture { - timestamp: header.timestamp, - present_timestamp, - }) - } - } + // mixHash is used instead of difficulty inside EVM + // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty Ok(()) } diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index 6f86e70f9c332..d539f5739f9b6 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -1,5 +1,6 @@ //! Helper function for Receipt root calculation for Optimism hardforks. +use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 9335917ddf9df..8aef0086375b8 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,4 +1,5 @@ use crate::proof::calculate_receipt_root_optimism; +use alloc::vec::Vec; use alloy_consensus::TxReceipt; use alloy_primitives::{Bloom, B256}; use reth_chainspec::{ChainSpec, EthereumHardforks}; diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index e2ec79401c82c..19b63d9fe0336 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -74,7 +74,8 @@ std = [ "thiserror/std", "op-alloy-consensus/std", "reth-chainspec/std", - "reth-consensus-common/std" + "reth-optimism-consensus/std", + "reth-consensus-common/std", ] optimism = [ "reth-primitives/optimism", From 7876a89191398846fcf76f97f8735389786a120b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 01:36:27 +0100 Subject: [PATCH 014/113] chore: move beacon consensus engine event (#13718) --- crates/consensus/beacon/src/engine/mod.rs | 3 +-- .../beacon/src/engine => engine/primitives/src}/event.rs | 9 ++++++--- crates/engine/primitives/src/lib.rs | 3 +++ 3 files changed, 10 insertions(+), 5 deletions(-) rename crates/{consensus/beacon/src/engine => engine/primitives/src}/event.rs (91%) diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 9c1adb2bd7838..0412e98776ab8 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -54,8 +54,7 @@ pub use error::{BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoice mod invalid_headers; pub use invalid_headers::InvalidHeaderCache; -mod event; -pub use event::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; +pub use reth_engine_primitives::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; mod handle; pub use handle::BeaconConsensusEngineHandle; diff --git a/crates/consensus/beacon/src/engine/event.rs b/crates/engine/primitives/src/event.rs similarity index 91% rename from crates/consensus/beacon/src/engine/event.rs rename to crates/engine/primitives/src/event.rs index acf056b3ff47f..fdf5b73f1ecaf 100644 --- a/crates/consensus/beacon/src/engine/event.rs +++ b/crates/engine/primitives/src/event.rs @@ -1,15 +1,18 @@ +//! Events emitted by the beacon consensus engine. + +use crate::ForkchoiceStatus; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; -use reth_engine_primitives::ForkchoiceStatus; -use reth_primitives::{EthPrimitives, NodePrimitives, SealedBlockFor, SealedHeader}; +use reth_primitives::{EthPrimitives, SealedBlockFor}; +use reth_primitives_traits::{NodePrimitives, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, sync::Arc, time::Duration, }; -/// Events emitted by [`crate::BeaconConsensusEngine`]. +/// Events emitted by the consensus engine. #[derive(Clone, Debug)] pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index f7877257c11bb..e0b465e985968 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -22,6 +22,9 @@ pub use forkchoice::{ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStat mod message; pub use message::*; +mod event; +pub use event::*; + mod invalid_block_hook; pub use invalid_block_hook::InvalidBlockHook; From 91fef2904a6876d253e8cd749128d67f44c9eb47 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 8 Jan 2025 11:19:28 +0100 Subject: [PATCH 015/113] feat: `SparseStateTrie::reveal_witness` (#13719) --- crates/trie/sparse/src/state.rs | 102 +++++++++++++++++++++++++++++++- 1 file changed, 100 insertions(+), 2 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 07264cbc728ba..fecb3c5fb40e4 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -15,10 +15,10 @@ use reth_primitives_traits::Account; use reth_tracing::tracing::trace; use reth_trie_common::{ updates::{StorageTrieUpdates, TrieUpdates}, - MultiProof, MultiProofTargets, Nibbles, TrieAccount, TrieNode, EMPTY_ROOT_HASH, + MultiProof, MultiProofTargets, Nibbles, RlpNode, TrieAccount, TrieNode, EMPTY_ROOT_HASH, TRIE_ACCOUNT_RLP_MAX_SIZE, }; -use std::{fmt, iter::Peekable}; +use std::{collections::VecDeque, fmt, iter::Peekable}; /// Sparse state trie representing lazy-loaded Ethereum state trie. pub struct SparseStateTrie { @@ -271,6 +271,104 @@ impl SparseStateTrie { Ok(()) } + /// Reveal state witness with the given state root. + /// The state witness is expected to be a map of `keccak(rlp(node)): rlp(node).` + /// NOTE: This method does not extensively validate the witness. + pub fn reveal_witness( + &mut self, + state_root: B256, + witness: B256HashMap, + ) -> SparseStateTrieResult<()> { + // Create a `(hash, path, maybe_account)` queue for traversing witness trie nodes + // starting from the root node. + let mut queue = VecDeque::from([(state_root, Nibbles::default(), None)]); + + while let Some((hash, path, maybe_account)) = queue.pop_front() { + // Retrieve the trie node and decode it. + let Some(trie_node_bytes) = witness.get(&hash) else { continue }; + let trie_node = TrieNode::decode(&mut &trie_node_bytes[..])?; + + // Push children nodes into the queue. + match &trie_node { + TrieNode::Branch(branch) => { + for (idx, maybe_child) in branch.as_ref().children() { + if let Some(child_hash) = maybe_child.and_then(RlpNode::as_hash) { + let mut child_path = path.clone(); + child_path.push_unchecked(idx); + queue.push_back((child_hash, child_path, maybe_account)); + } + } + } + TrieNode::Extension(ext) => { + if let Some(child_hash) = ext.child.as_hash() { + let mut child_path = path.clone(); + child_path.extend_from_slice_unchecked(&ext.key); + queue.push_back((child_hash, child_path, maybe_account)); + } + } + TrieNode::Leaf(leaf) => { + let mut full_path = path.clone(); + full_path.extend_from_slice_unchecked(&leaf.key); + if let Some(hashed_address) = maybe_account { + // Record storage slot in revealed. + let hashed_slot = B256::from_slice(&full_path.pack()); + self.revealed.entry(hashed_address).or_default().insert(hashed_slot); + } else { + let hashed_address = B256::from_slice(&full_path.pack()); + let account = TrieAccount::decode(&mut &leaf.value[..])?; + if account.storage_root != EMPTY_ROOT_HASH { + queue.push_back(( + account.storage_root, + Nibbles::default(), + Some(hashed_address), + )); + } + + // Record account in revealed. + self.revealed.entry(hashed_address).or_default(); + } + } + TrieNode::EmptyRoot => {} // nothing to do here + }; + + // Reveal the node itself. + if let Some(account) = maybe_account { + let storage_trie_entry = self.storages.entry(account).or_default(); + if path.is_empty() { + // Handle special storage state root node case. + storage_trie_entry.reveal_root_with_provider( + self.provider_factory.storage_node_provider(account), + trie_node, + None, + self.retain_updates, + )?; + } else { + // Reveal non-root storage trie node. + storage_trie_entry + .as_revealed_mut() + .ok_or(SparseTrieErrorKind::Blind)? + .reveal_node(path, trie_node, None)?; + } + } else if path.is_empty() { + // Handle special state root node case. + self.state.reveal_root_with_provider( + self.provider_factory.account_node_provider(), + trie_node, + None, + self.retain_updates, + )?; + } else { + // Reveal non-root state trie node. + self.state + .as_revealed_mut() + .ok_or(SparseTrieErrorKind::Blind)? + .reveal_node(path, trie_node, None)?; + } + } + + Ok(()) + } + /// Validates the root node of the proof and returns it if it exists and is valid. fn validate_root_node>( &self, From d9ab9ca4d4306b5d868c7e435e87d47c21a60066 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 11:46:03 +0100 Subject: [PATCH 016/113] chore: rm beacon consensus dep from engine-tree (#13720) --- Cargo.lock | 2 +- crates/engine/tree/Cargo.toml | 2 +- crates/engine/tree/src/download.rs | 2 +- crates/engine/tree/src/engine.rs | 3 +- crates/engine/tree/src/tree/config.rs | 9 ++ .../engine/tree/src/tree/invalid_headers.rs | 125 ++++++++++++++++++ crates/engine/tree/src/tree/mod.rs | 10 +- 7 files changed, 143 insertions(+), 10 deletions(-) create mode 100644 crates/engine/tree/src/tree/invalid_headers.rs diff --git a/Cargo.lock b/Cargo.lock index c6fd91d3892c9..2b68d5275d742 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7291,7 +7291,6 @@ dependencies = [ "proptest", "rand 0.8.5", "rayon", - "reth-beacon-consensus", "reth-blockchain-tree-api", "reth-chain-state", "reth-chainspec", @@ -7327,6 +7326,7 @@ dependencies = [ "reth-trie-parallel", "reth-trie-sparse", "revm-primitives", + "schnellru", "thiserror 2.0.9", "tokio", "tracing", diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 572f0955045b9..7376bf238f2f8 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -12,7 +12,6 @@ workspace = true [dependencies] # reth -reth-beacon-consensus.workspace = true reth-blockchain-tree-api.workspace = true reth-chain-state.workspace = true reth-chainspec = { workspace = true, optional = true } @@ -57,6 +56,7 @@ metrics.workspace = true reth-metrics = { workspace = true, features = ["common"] } # misc +schnellru.workspace = true rayon.workspace = true tracing.workspace = true derive_more.workspace = true diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 262c642f0a87f..26c5b405de064 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -323,8 +323,8 @@ mod tests { use alloy_consensus::Header; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; use assert_matches::assert_matches; - use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; + use reth_ethereum_consensus::EthBeaconConsensus; use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; use std::{future::poll_fn, sync::Arc}; diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index dfc68fb73b399..fa92cba28f8d1 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -7,9 +7,8 @@ use crate::{ }; use alloy_primitives::B256; use futures::{Stream, StreamExt}; -use reth_beacon_consensus::BeaconConsensusEngineEvent; use reth_chain_state::ExecutedBlock; -use reth_engine_primitives::{BeaconEngineMessage, EngineTypes}; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineTypes}; use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; use reth_primitives_traits::Block; use std::{ diff --git a/crates/engine/tree/src/tree/config.rs b/crates/engine/tree/src/tree/config.rs index 34a6e4d0095fb..c0c68799aee0a 100644 --- a/crates/engine/tree/src/tree/config.rs +++ b/crates/engine/tree/src/tree/config.rs @@ -1,5 +1,14 @@ //! Engine tree configuration. +use alloy_eips::merge::EPOCH_SLOTS; + +/// The largest gap for which the tree will be used for sync. See docs for `pipeline_run_threshold` +/// for more information. +/// +/// This is the default threshold, the distance to the head that the tree will be used for sync. +/// If the distance exceeds this threshold, the pipeline will be used for sync. +pub(crate) const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; + /// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; diff --git a/crates/engine/tree/src/tree/invalid_headers.rs b/crates/engine/tree/src/tree/invalid_headers.rs new file mode 100644 index 0000000000000..8472d44a32387 --- /dev/null +++ b/crates/engine/tree/src/tree/invalid_headers.rs @@ -0,0 +1,125 @@ +use alloy_eips::eip1898::BlockWithParent; +use alloy_primitives::B256; +use reth_metrics::{ + metrics::{Counter, Gauge}, + Metrics, +}; +use schnellru::{ByLength, LruMap}; +use std::fmt::Debug; +use tracing::warn; + +/// The max hit counter for invalid headers in the cache before it is forcefully evicted. +/// +/// In other words, if a header is referenced more than this number of times, it will be evicted to +/// allow for reprocessing. +const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; + +/// Keeps track of invalid headers. +#[derive(Debug)] +pub(super) struct InvalidHeaderCache { + /// This maps a header hash to a reference to its invalid ancestor. + headers: LruMap, + /// Metrics for the cache. + metrics: InvalidHeaderCacheMetrics, +} + +impl InvalidHeaderCache { + /// Invalid header cache constructor. + pub(super) fn new(max_length: u32) -> Self { + Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } + } + + fn insert_entry(&mut self, hash: B256, header: BlockWithParent) { + self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); + } + + /// Returns the invalid ancestor's header if it exists in the cache. + /// + /// If this is called, the hit count for the entry is incremented. + /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. + pub(super) fn get(&mut self, hash: &B256) -> Option { + { + let entry = self.headers.get(hash)?; + entry.hit_count += 1; + if entry.hit_count < INVALID_HEADER_HIT_EVICTION_THRESHOLD { + return Some(entry.header) + } + } + // if we get here, the entry has been hit too many times, so we evict it + self.headers.remove(hash); + self.metrics.hit_evictions.increment(1); + None + } + + /// Inserts an invalid block into the cache, with a given invalid ancestor. + pub(super) fn insert_with_invalid_ancestor( + &mut self, + header_hash: B256, + invalid_ancestor: BlockWithParent, + ) { + if self.get(&header_hash).is_none() { + warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); + self.insert_entry(header_hash, invalid_ancestor); + + // update metrics + self.metrics.known_ancestor_inserts.increment(1); + self.metrics.count.set(self.headers.len() as f64); + } + } + + /// Inserts an invalid ancestor into the map. + pub(super) fn insert(&mut self, invalid_ancestor: BlockWithParent) { + if self.get(&invalid_ancestor.block.hash).is_none() { + warn!(target: "consensus::engine", ?invalid_ancestor, "Bad block with hash"); + self.insert_entry(invalid_ancestor.block.hash, invalid_ancestor); + + // update metrics + self.metrics.unique_inserts.increment(1); + self.metrics.count.set(self.headers.len() as f64); + } + } +} + +struct HeaderEntry { + /// Keeps track how many times this header has been hit. + hit_count: u8, + /// The actual header entry + header: BlockWithParent, +} + +/// Metrics for the invalid headers cache. +#[derive(Metrics)] +#[metrics(scope = "consensus.engine.beacon.invalid_headers")] +struct InvalidHeaderCacheMetrics { + /// The total number of invalid headers in the cache. + count: Gauge, + /// The number of inserts with a known ancestor. + known_ancestor_inserts: Counter, + /// The number of unique invalid header inserts (i.e. without a known ancestor). + unique_inserts: Counter, + /// The number of times a header was evicted from the cache because it was hit too many times. + hit_evictions: Counter, +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::Header; + use reth_primitives::SealedHeader; + + #[test] + fn test_hit_eviction() { + let mut cache = InvalidHeaderCache::new(10); + let header = Header::default(); + let header = SealedHeader::seal(header); + cache.insert(header.block_with_parent()); + assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); + + for hit in 1..INVALID_HEADER_HIT_EVICTION_THRESHOLD { + assert!(cache.get(&header.hash()).is_some()); + assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, hit); + } + + assert!(cache.get(&header.hash()).is_none()); + } +} diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 7726ba4897a5e..1db3e4a70d980 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -16,9 +16,6 @@ use alloy_rpc_types_engine::{ PayloadValidationError, }; use block_buffer::BlockBuffer; -use reth_beacon_consensus::{ - BeaconConsensusEngineEvent, InvalidHeaderCache, MIN_BLOCKS_FOR_PIPELINE_RUN, -}; use reth_blockchain_tree_api::{ error::{InsertBlockErrorKindTwo, InsertBlockErrorTwo, InsertBlockFatalError}, BlockStatus2, InsertPayloadOk2, @@ -29,8 +26,9 @@ use reth_chain_state::{ use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; pub use reth_engine_primitives::InvalidBlockHook; use reth_engine_primitives::{ - BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - EngineValidator, ForkchoiceStateTracker, OnForkChoiceUpdated, + BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, + EngineApiMessageVersion, EngineTypes, EngineValidator, ForkchoiceStateTracker, + OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::{execute::BlockExecutorProvider, system_calls::OnStateHook}; @@ -81,11 +79,13 @@ use tracing::*; mod block_buffer; pub mod config; mod invalid_block_hook; +mod invalid_headers; mod metrics; mod persistence_state; pub mod root; mod trie_updates; +use crate::tree::{config::MIN_BLOCKS_FOR_PIPELINE_RUN, invalid_headers::InvalidHeaderCache}; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; From 856479bfcff09ab8244b3c39aa5c3e3d3d09710b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 12:21:47 +0100 Subject: [PATCH 017/113] chore: rm unused hooks (#13721) --- crates/node/builder/src/launch/engine.rs | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index e4e247e239d32..f1e5249be40de 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -2,10 +2,7 @@ use alloy_consensus::BlockHeader; use futures::{future::Either, stream, stream_select, StreamExt}; -use reth_beacon_consensus::{ - hooks::{EngineHooks, StaticFileHook}, - BeaconConsensusEngineHandle, -}; +use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; use reth_db_api::{ @@ -166,14 +163,9 @@ where .maybe_store_messages(node_config.debug.engine_api_store.clone()); let max_block = ctx.max_block(network_client.clone()).await?; - let mut hooks = EngineHooks::new(); let static_file_producer = ctx.static_file_producer(); let static_file_producer_events = static_file_producer.lock().events(); - hooks.add(StaticFileHook::new( - static_file_producer.clone(), - Box::new(ctx.task_executor().clone()), - )); info!(target: "reth::cli", "StaticFileProducer initialized"); let consensus = Arc::new(ctx.components().consensus().clone()); From baf92e33fb4367fc1d4f5fc5a207d0cdba039291 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 12:49:54 +0100 Subject: [PATCH 018/113] chore: rm beacon consensus deps everywhere (#13722) --- Cargo.lock | 7 +------ bin/reth/Cargo.toml | 1 - bin/reth/src/commands/debug_cmd/build_block.rs | 3 +-- bin/reth/src/commands/debug_cmd/execution.rs | 3 +-- bin/reth/src/commands/debug_cmd/in_memory_merkle.rs | 3 +-- bin/reth/src/commands/debug_cmd/merkle.rs | 3 +-- bin/reth/src/lib.rs | 4 ++-- crates/engine/local/Cargo.toml | 1 - crates/engine/local/src/service.rs | 8 +++++--- crates/engine/service/Cargo.toml | 2 +- crates/engine/service/src/service.rs | 10 ++++++---- crates/ethereum/node/src/lib.rs | 1 + crates/node/builder/Cargo.toml | 1 - crates/node/builder/src/launch/engine.rs | 5 ++--- crates/node/events/Cargo.toml | 1 - crates/node/events/src/node.rs | 5 +++-- crates/optimism/node/Cargo.toml | 2 -- crates/storage/provider/src/providers/mod.rs | 5 +++++ 18 files changed, 30 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b68d5275d742..0d37ea75b5ec9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6400,7 +6400,6 @@ dependencies = [ "eyre", "futures", "reth-basic-payload-builder", - "reth-beacon-consensus", "reth-chainspec", "reth-cli", "reth-cli-commands", @@ -7204,7 +7203,6 @@ dependencies = [ "eyre", "futures-util", "op-alloy-rpc-types-engine", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-engine-primitives", @@ -7251,11 +7249,11 @@ version = "1.1.5" dependencies = [ "futures", "pin-project", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-engine-primitives", "reth-engine-tree", + "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-evm", "reth-evm-ethereum", @@ -8043,7 +8041,6 @@ dependencies = [ "futures", "jsonrpsee", "rayon", - "reth-beacon-consensus", "reth-chain-state", "reth-chainspec", "reth-cli-util", @@ -8203,7 +8200,6 @@ dependencies = [ "futures", "humantime", "pin-project", - "reth-beacon-consensus", "reth-engine-primitives", "reth-network-api", "reth-primitives-traits", @@ -8398,7 +8394,6 @@ dependencies = [ "op-alloy-rpc-types-engine", "parking_lot", "reth-basic-payload-builder", - "reth-beacon-consensus", "reth-chainspec", "reth-consensus", "reth-db", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 9730c18044b07..f7bdfd8ceed20 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -31,7 +31,6 @@ reth-stages.workspace = true reth-execution-types.workspace = true reth-errors.workspace = true reth-transaction-pool.workspace = true -reth-beacon-consensus.workspace = true reth-cli-runner.workspace = true reth-cli-commands.workspace = true reth-cli-util.workspace = true diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 1fd437697c995..f4ab8150eb2d3 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -12,7 +12,6 @@ use eyre::Context; use reth_basic_payload_builder::{ BuildArguments, BuildOutcome, Cancelled, PayloadBuilder, PayloadConfig, }; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; @@ -24,7 +23,7 @@ use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; -use reth_node_ethereum::{EthEvmConfig, EthExecutorProvider}; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthExecutorProvider}; use reth_primitives::{ BlockExt, EthPrimitives, SealedBlockFor, SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index a7af54d573b7e..cc0c701067507 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -5,7 +5,6 @@ use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; use futures::StreamExt; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; @@ -24,7 +23,7 @@ use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; use reth_node_api::NodeTypesWithDBAdapter; -use reth_node_ethereum::EthExecutorProvider; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; use reth_node_events::node::NodeEvent; use reth_primitives::EthPrimitives; use reth_provider::{ diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index b0ac35ee577bb..d4c0f3c6c4088 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -7,7 +7,6 @@ use crate::{ use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; @@ -20,7 +19,7 @@ use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_node_api::{BlockTy, NodePrimitives}; -use reth_node_ethereum::EthExecutorProvider; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; use reth_primitives::{BlockExt, EthPrimitives}; use reth_provider::{ providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 98d0889c89c0c..acc346d9e3ad9 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -3,7 +3,6 @@ use crate::{args::NetworkArgs, utils::get_single_header}; use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; -use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; @@ -18,7 +17,7 @@ use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; use reth_network_p2p::full_block::FullBlockClient; use reth_node_api::{BlockTy, NodePrimitives}; -use reth_node_ethereum::EthExecutorProvider; +use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; use reth_primitives::EthPrimitives; use reth_provider::{ providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 1c55669bec919..cbe1a1660773f 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -103,9 +103,9 @@ pub mod primitives { pub use reth_primitives::*; } -/// Re-exported from `reth_beacon_consensus`. +/// Re-exported from `reth_ethereum_consensus`. pub mod beacon_consensus { - pub use reth_beacon_consensus::*; + pub use reth_node_ethereum::consensus::*; } /// Re-exported from `reth_consensus`. diff --git a/crates/engine/local/Cargo.toml b/crates/engine/local/Cargo.toml index 10837b174053c..ad7657de80518 100644 --- a/crates/engine/local/Cargo.toml +++ b/crates/engine/local/Cargo.toml @@ -10,7 +10,6 @@ exclude.workspace = true [dependencies] # reth -reth-beacon-consensus.workspace = true reth-chainspec.workspace = true reth-consensus.workspace = true reth-engine-primitives.workspace = true diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 12c24bd6816af..1b2888cee60f3 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -16,10 +16,9 @@ use std::{ use crate::miner::{LocalMiner, MiningMode}; use futures_util::{Stream, StreamExt}; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::{ConsensusError, FullConsensus}; -use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineValidator}; use reth_engine_service::service::EngineMessageStream; use reth_engine_tree::{ chain::{ChainEvent, HandlerEvent}, @@ -34,7 +33,10 @@ use reth_evm::execute::BlockExecutorProvider; use reth_node_types::BlockTy; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; -use reth_provider::{providers::BlockchainProvider2, ChainSpecProvider, ProviderFactory}; +use reth_provider::{ + providers::{BlockchainProvider2, EngineNodeTypes}, + ChainSpecProvider, ProviderFactory, +}; use reth_prune::PrunerWithFactory; use reth_stages_api::MetricEventsSender; use tokio::sync::mpsc::UnboundedSender; diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index 326bc06b5e350..ea5ce0e3a4609 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -12,7 +12,6 @@ workspace = true [dependencies] # reth -reth-beacon-consensus.workspace = true reth-consensus.workspace = true reth-engine-tree.workspace = true reth-evm.workspace = true @@ -36,6 +35,7 @@ thiserror.workspace = true [dev-dependencies] reth-engine-tree = { workspace = true, features = ["test-utils"] } +reth-ethereum-consensus.workspace = true reth-ethereum-engine-primitives.workspace = true reth-evm-ethereum.workspace = true reth-exex-types.workspace = true diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 5d60182b6e994..9ce69220be80a 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -1,9 +1,8 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, EngineNodeTypes}; use reth_chainspec::EthChainSpec; use reth_consensus::{ConsensusError, FullConsensus}; -use reth_engine_primitives::{BeaconEngineMessage, EngineValidator}; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineValidator}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, @@ -20,7 +19,10 @@ use reth_network_p2p::BlockClient; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::EthPrimitives; -use reth_provider::{providers::BlockchainProvider2, ProviderFactory}; +use reth_provider::{ + providers::{BlockchainProvider2, EngineNodeTypes}, + ProviderFactory, +}; use reth_prune::PrunerWithFactory; use reth_stages_api::{MetricEventsSender, Pipeline}; use reth_tasks::TaskSpawner; @@ -150,10 +152,10 @@ pub struct EngineServiceError {} #[cfg(test)] mod tests { use super::*; - use reth_beacon_consensus::EthBeaconConsensus; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; + use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_exex_types::FinishedExExHeight; diff --git a/crates/ethereum/node/src/lib.rs b/crates/ethereum/node/src/lib.rs index a51886dd1c410..0db3f2d17a110 100644 --- a/crates/ethereum/node/src/lib.rs +++ b/crates/ethereum/node/src/lib.rs @@ -21,6 +21,7 @@ pub use evm::{ BasicBlockExecutorProvider, EthEvmConfig, EthExecutionStrategyFactory, EthExecutorProvider, }; +pub use reth_ethereum_consensus as consensus; pub mod node; pub use node::EthereumNode; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 5218bc2d5e3db..9f08507f9f21b 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] ## reth -reth-beacon-consensus.workspace = true reth-chain-state.workspace = true reth-chainspec.workspace = true reth-cli-util.workspace = true diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index f1e5249be40de..6a6cab80c26b4 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -2,7 +2,6 @@ use alloy_consensus::BlockHeader; use futures::{future::Either, stream, stream_select, StreamExt}; -use reth_beacon_consensus::BeaconConsensusEngineHandle; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider}; use reth_db_api::{ @@ -20,8 +19,8 @@ use reth_exex::ExExManagerHandle; use reth_network::{NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ - BuiltPayload, FullNodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, - PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, + BeaconConsensusEngineHandle, BuiltPayload, FullNodeTypes, NodeTypesWithDBAdapter, + NodeTypesWithEngine, PayloadAttributesBuilder, PayloadBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, diff --git a/crates/node/events/Cargo.toml b/crates/node/events/Cargo.toml index 9629aecef9a20..0b57fa0110c65 100644 --- a/crates/node/events/Cargo.toml +++ b/crates/node/events/Cargo.toml @@ -13,7 +13,6 @@ workspace = true [dependencies] # reth reth-storage-api.workspace = true -reth-beacon-consensus.workspace = true reth-network-api.workspace = true reth-stages.workspace = true reth-prune-types.workspace = true diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index 4b38b4050bb40..00817b6a8834b 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -5,8 +5,9 @@ use alloy_consensus::{constants::GWEI_TO_WEI, BlockHeader}; use alloy_primitives::{BlockNumber, B256}; use alloy_rpc_types_engine::ForkchoiceState; use futures::Stream; -use reth_beacon_consensus::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; -use reth_engine_primitives::ForkchoiceStatus; +use reth_engine_primitives::{ + BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress, ForkchoiceStatus, +}; use reth_network_api::PeersInfo; use reth_primitives_traits::{format_gas, format_gas_throughput, BlockBody, NodePrimitives}; use reth_prune_types::PrunerEvent; diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index cad466dfc8da7..25adb3d108932 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -30,7 +30,6 @@ reth-transaction-pool.workspace = true reth-network.workspace = true reth-evm.workspace = true reth-revm = { workspace = true, features = ["std"] } -reth-beacon-consensus.workspace = true reth-trie-db.workspace = true reth-rpc-server-types.workspace = true reth-rpc-types-compat.workspace = true @@ -94,7 +93,6 @@ optimism = [ "reth-provider/optimism", "reth-optimism-evm/optimism", "reth-optimism-payload-builder/optimism", - "reth-beacon-consensus/optimism", "revm/optimism", "reth-optimism-rpc/optimism", "reth-engine-local/op", diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index b8c7ce0c8b81a..f1c799cd2e30a 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -114,6 +114,11 @@ impl NodeTypesForTree for T where { } +/// Helper trait expressing requirements for node types to be used in engine. +pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} + +impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} + /// Helper trait with requirements for [`ProviderNodeTypes`] to be used within legacy blockchain /// tree. pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} From dcd4b24ae1ce6f86642f2706820eb1a8e34bcd38 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 13:13:41 +0100 Subject: [PATCH 019/113] chore!:rm beacon consensus crate (#13723) --- Cargo.lock | 56 - Cargo.toml | 2 - crates/consensus/beacon/Cargo.toml | 94 - crates/consensus/beacon/src/engine/error.rs | 44 - crates/consensus/beacon/src/engine/handle.rs | 3 - .../beacon/src/engine/hooks/controller.rs | 390 --- .../consensus/beacon/src/engine/hooks/mod.rs | 129 - .../beacon/src/engine/hooks/prune.rs | 203 -- .../beacon/src/engine/hooks/static_file.rs | 209 -- .../beacon/src/engine/invalid_headers.rs | 125 - crates/consensus/beacon/src/engine/metrics.rs | 32 - crates/consensus/beacon/src/engine/mod.rs | 2961 ----------------- crates/consensus/beacon/src/engine/sync.rs | 672 ---- .../consensus/beacon/src/engine/test_utils.rs | 467 --- crates/consensus/beacon/src/lib.rs | 14 - 15 files changed, 5401 deletions(-) delete mode 100644 crates/consensus/beacon/Cargo.toml delete mode 100644 crates/consensus/beacon/src/engine/error.rs delete mode 100644 crates/consensus/beacon/src/engine/handle.rs delete mode 100644 crates/consensus/beacon/src/engine/hooks/controller.rs delete mode 100644 crates/consensus/beacon/src/engine/hooks/mod.rs delete mode 100644 crates/consensus/beacon/src/engine/hooks/prune.rs delete mode 100644 crates/consensus/beacon/src/engine/hooks/static_file.rs delete mode 100644 crates/consensus/beacon/src/engine/invalid_headers.rs delete mode 100644 crates/consensus/beacon/src/engine/metrics.rs delete mode 100644 crates/consensus/beacon/src/engine/mod.rs delete mode 100644 crates/consensus/beacon/src/engine/sync.rs delete mode 100644 crates/consensus/beacon/src/engine/test_utils.rs delete mode 100644 crates/consensus/beacon/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 0d37ea75b5ec9..c5dff17cf4c4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6481,62 +6481,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-beacon-consensus" -version = "1.1.5" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", - "alloy-primitives", - "alloy-rpc-types-engine", - "assert_matches", - "futures", - "itertools 0.13.0", - "metrics", - "reth-blockchain-tree", - "reth-blockchain-tree-api", - "reth-chainspec", - "reth-codecs", - "reth-config", - "reth-consensus", - "reth-db", - "reth-db-api", - "reth-downloaders", - "reth-engine-primitives", - "reth-errors", - "reth-ethereum-consensus", - "reth-ethereum-engine-primitives", - "reth-evm", - "reth-evm-ethereum", - "reth-exex-types", - "reth-metrics", - "reth-network-p2p", - "reth-node-types", - "reth-payload-builder", - "reth-payload-builder-primitives", - "reth-payload-primitives", - "reth-payload-validator", - "reth-primitives", - "reth-primitives-traits", - "reth-provider", - "reth-prune", - "reth-prune-types", - "reth-rpc-types-compat", - "reth-stages", - "reth-stages-api", - "reth-static-file", - "reth-tasks", - "reth-testing-utils", - "reth-tokio-util", - "reth-tracing", - "schnellru", - "thiserror 2.0.9", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "reth-bench" version = "1.1.5" diff --git a/Cargo.toml b/Cargo.toml index 0f0ea08e530f2..47d802c5ac148 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,6 @@ members = [ "crates/cli/runner/", "crates/cli/util/", "crates/config/", - "crates/consensus/beacon/", "crates/consensus/common/", "crates/consensus/consensus/", "crates/consensus/debug-client/", @@ -304,7 +303,6 @@ overflow-checks = true op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } reth-basic-payload-builder = { path = "crates/payload/basic" } -reth-beacon-consensus = { path = "crates/consensus/beacon" } reth-bench = { path = "bin/reth-bench" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml deleted file mode 100644 index b937eb2b4683b..0000000000000 --- a/crates/consensus/beacon/Cargo.toml +++ /dev/null @@ -1,94 +0,0 @@ -[package] -name = "reth-beacon-consensus" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -# reth -reth-ethereum-consensus.workspace = true -reth-blockchain-tree-api.workspace = true -reth-codecs.workspace = true -reth-db-api.workspace = true -reth-primitives.workspace = true -reth-primitives-traits.workspace = true -reth-stages-api.workspace = true -reth-errors.workspace = true -reth-provider.workspace = true -reth-tasks.workspace = true -reth-payload-builder.workspace = true -reth-payload-builder-primitives.workspace = true -reth-payload-primitives.workspace = true -reth-payload-validator.workspace = true -reth-prune.workspace = true -reth-static-file.workspace = true -reth-tokio-util.workspace = true -reth-engine-primitives.workspace = true -reth-network-p2p.workspace = true -reth-node-types.workspace = true -reth-chainspec = { workspace = true, optional = true } - -# ethereum -alloy-primitives.workspace = true -alloy-rpc-types-engine = { workspace = true, features = ["std"] } -alloy-eips.workspace = true -alloy-consensus.workspace = true - -# async -tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true -futures.workspace = true - -# metrics -reth-metrics.workspace = true -metrics.workspace = true - -# misc -tracing.workspace = true -thiserror.workspace = true -schnellru.workspace = true -itertools.workspace = true - -[dev-dependencies] -# reth -reth-payload-builder = { workspace = true, features = ["test-utils"] } -reth-primitives = { workspace = true, features = ["test-utils"] } -reth-consensus = { workspace = true, features = ["test-utils"] } -reth-stages = { workspace = true, features = ["test-utils"] } -reth-blockchain-tree = { workspace = true, features = ["test-utils"] } -reth-db = { workspace = true, features = ["test-utils"] } -reth-db-api.workspace = true -reth-provider = { workspace = true, features = ["test-utils"] } -reth-evm = { workspace = true, features = ["test-utils"] } -reth-network-p2p = { workspace = true, features = ["test-utils"] } -reth-rpc-types-compat.workspace = true -reth-tracing.workspace = true -reth-downloaders.workspace = true -reth-evm-ethereum.workspace = true -reth-ethereum-engine-primitives.workspace = true -reth-config.workspace = true -reth-testing-utils.workspace = true -reth-exex-types.workspace = true -reth-prune-types.workspace = true -reth-chainspec.workspace = true -alloy-genesis.workspace = true -assert_matches.workspace = true - -[features] -optimism = [ - "reth-blockchain-tree/optimism", - "reth-codecs/op", - "reth-chainspec", - "reth-db-api/optimism", - "reth-db/optimism", - "reth-downloaders/optimism", - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-downloaders/optimism", -] diff --git a/crates/consensus/beacon/src/engine/error.rs b/crates/consensus/beacon/src/engine/error.rs deleted file mode 100644 index 6fabfbf031b13..0000000000000 --- a/crates/consensus/beacon/src/engine/error.rs +++ /dev/null @@ -1,44 +0,0 @@ -use crate::engine::hooks::EngineHookError; -pub use reth_engine_primitives::BeaconForkChoiceUpdateError; -use reth_errors::{DatabaseError, RethError}; -use reth_stages_api::PipelineError; - -/// Beacon engine result. -pub type BeaconEngineResult = Result; - -/// The error type for the beacon consensus engine service -/// [`BeaconConsensusEngine`](crate::BeaconConsensusEngine) -/// -/// Represents all possible error cases for the beacon consensus engine. -#[derive(Debug, thiserror::Error)] -pub enum BeaconConsensusEngineError { - /// Pipeline channel closed. - #[error("pipeline channel closed")] - PipelineChannelClosed, - /// Pipeline error. - #[error(transparent)] - Pipeline(#[from] Box), - /// Pruner channel closed. - #[error("pruner channel closed")] - PrunerChannelClosed, - /// Hook error. - #[error(transparent)] - Hook(#[from] EngineHookError), - /// Common error. Wrapper around [`RethError`]. - #[error(transparent)] - Common(#[from] RethError), -} - -// box the pipeline error as it is a large enum. -impl From for BeaconConsensusEngineError { - fn from(e: PipelineError) -> Self { - Self::Pipeline(Box::new(e)) - } -} - -// for convenience in the beacon engine -impl From for BeaconConsensusEngineError { - fn from(e: DatabaseError) -> Self { - Self::Common(e.into()) - } -} diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs deleted file mode 100644 index e4f291c06451a..0000000000000 --- a/crates/consensus/beacon/src/engine/handle.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! `BeaconConsensusEngine` external API - -pub use reth_engine_primitives::BeaconConsensusEngineHandle; diff --git a/crates/consensus/beacon/src/engine/hooks/controller.rs b/crates/consensus/beacon/src/engine/hooks/controller.rs deleted file mode 100644 index 544a4c564b786..0000000000000 --- a/crates/consensus/beacon/src/engine/hooks/controller.rs +++ /dev/null @@ -1,390 +0,0 @@ -use crate::hooks::{ - EngineHook, EngineHookContext, EngineHookDBAccessLevel, EngineHookError, EngineHookEvent, - EngineHooks, -}; -use std::{ - collections::VecDeque, - task::{Context, Poll}, -}; -use tracing::debug; - -#[derive(Debug)] -pub(crate) struct PolledHook { - pub(crate) name: &'static str, - pub(crate) event: EngineHookEvent, - pub(crate) db_access_level: EngineHookDBAccessLevel, -} - -/// Manages hooks under the control of the engine. -/// -/// This type polls the initialized hooks one by one, respecting the DB access level -/// (i.e. [`crate::hooks::EngineHookDBAccessLevel::ReadWrite`] that enforces running at most one -/// such hook). -pub(crate) struct EngineHooksController { - /// Collection of hooks. - /// - /// Hooks might be removed from the collection, and returned upon completion. - /// In the current implementation, it only happens when moved to `active_db_write_hook`. - hooks: VecDeque>, - /// Currently running hook with DB write access, if any. - active_db_write_hook: Option>, -} - -impl EngineHooksController { - /// Creates a new [`EngineHooksController`]. - pub(crate) fn new(hooks: EngineHooks) -> Self { - Self { hooks: hooks.inner.into(), active_db_write_hook: None } - } - - /// Polls currently running hook with DB write access, if any. - /// - /// Returns [`Poll::Ready`] if currently running hook with DB write access returned - /// an [event][`crate::hooks::EngineHookEvent`]. - /// - /// Returns [`Poll::Pending`] in all other cases: - /// 1. No hook with DB write access is running. - /// 2. Currently running hook with DB write access returned [`Poll::Pending`] on polling. - /// 3. Currently running hook with DB write access returned [`Poll::Ready`] on polling, but no - /// action to act upon. - pub(crate) fn poll_active_db_write_hook( - &mut self, - cx: &mut Context<'_>, - args: EngineHookContext, - ) -> Poll> { - let Some(mut hook) = self.active_db_write_hook.take() else { return Poll::Pending }; - - match hook.poll(cx, args)? { - Poll::Ready(event) => { - let result = PolledHook { - name: hook.name(), - event, - db_access_level: hook.db_access_level(), - }; - - debug!( - target: "consensus::engine::hooks", - hook = hook.name(), - ?result, - "Polled running hook with db write access" - ); - - if result.event.is_finished() { - self.hooks.push_back(hook); - } else { - self.active_db_write_hook = Some(hook); - } - - return Poll::Ready(Ok(result)) - } - Poll::Pending => { - self.active_db_write_hook = Some(hook); - } - } - - Poll::Pending - } - - /// Polls next engine from the collection. - /// - /// Returns [`Poll::Ready`] if next hook returned an [event][`crate::hooks::EngineHookEvent`]. - /// - /// Returns [`Poll::Pending`] in all other cases: - /// 1. Next hook is [`Option::None`], i.e. taken, meaning it's currently running and has a DB - /// write access. - /// 2. Next hook needs a DB write access, but either there's another hook with DB write access - /// running, or `db_write_active` passed into arguments is `true`. - /// 3. Next hook returned [`Poll::Pending`] on polling. - /// 4. Next hook returned [`Poll::Ready`] on polling, but no action to act upon. - pub(crate) fn poll_next_hook( - &mut self, - cx: &mut Context<'_>, - args: EngineHookContext, - db_write_active: bool, - ) -> Poll> { - let Some(mut hook) = self.hooks.pop_front() else { return Poll::Pending }; - - let result = self.poll_next_hook_inner(cx, &mut hook, args, db_write_active); - - if matches!( - result, - Poll::Ready(Ok(PolledHook { - event: EngineHookEvent::Started, - db_access_level: EngineHookDBAccessLevel::ReadWrite, - .. - })) - ) { - // If a read-write hook started, set `active_db_write_hook` to it - self.active_db_write_hook = Some(hook); - } else { - // Otherwise, push it back to the collection of hooks to poll it next time - self.hooks.push_back(hook); - } - - result - } - - fn poll_next_hook_inner( - &self, - cx: &mut Context<'_>, - hook: &mut Box, - args: EngineHookContext, - db_write_active: bool, - ) -> Poll> { - // Hook with DB write access level is not allowed to run due to any of the following - // reasons: - // - An already running hook with DB write access level - // - Active DB write according to passed argument - // - Missing a finalized block number. We might be on an optimistic sync scenario where we - // cannot skip the FCU with the finalized hash, otherwise CL might misbehave. - if hook.db_access_level().is_read_write() && - (self.active_db_write_hook.is_some() || - db_write_active || - args.finalized_block_number.is_none()) - { - return Poll::Pending - } - - if let Poll::Ready(event) = hook.poll(cx, args)? { - let result = - PolledHook { name: hook.name(), event, db_access_level: hook.db_access_level() }; - - debug!( - target: "consensus::engine::hooks", - hook = hook.name(), - ?result, - "Polled next hook" - ); - - return Poll::Ready(Ok(result)) - } - debug!(target: "consensus::engine::hooks", hook = hook.name(), "Next hook is not ready"); - - Poll::Pending - } - - /// Returns a running hook with DB write access, if there's any. - pub(crate) fn active_db_write_hook(&self) -> Option<&dyn EngineHook> { - self.active_db_write_hook.as_ref().map(|hook| hook.as_ref()) - } -} - -#[cfg(test)] -mod tests { - use crate::hooks::{ - EngineHook, EngineHookContext, EngineHookDBAccessLevel, EngineHookEvent, EngineHooks, - EngineHooksController, - }; - use futures::poll; - use reth_errors::{RethError, RethResult}; - use std::{ - collections::VecDeque, - future::poll_fn, - task::{Context, Poll}, - }; - - struct TestHook { - results: VecDeque>, - name: &'static str, - access_level: EngineHookDBAccessLevel, - } - - impl TestHook { - fn new_ro(name: &'static str) -> Self { - Self { - results: Default::default(), - name, - access_level: EngineHookDBAccessLevel::ReadOnly, - } - } - fn new_rw(name: &'static str) -> Self { - Self { - results: Default::default(), - name, - access_level: EngineHookDBAccessLevel::ReadWrite, - } - } - - fn add_result(&mut self, result: RethResult) { - self.results.push_back(result); - } - } - - impl EngineHook for TestHook { - fn name(&self) -> &'static str { - self.name - } - - fn poll( - &mut self, - _cx: &mut Context<'_>, - _ctx: EngineHookContext, - ) -> Poll> { - self.results.pop_front().map_or(Poll::Pending, Poll::Ready) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - self.access_level - } - } - - #[tokio::test] - async fn poll_active_db_write_hook() { - let mut controller = EngineHooksController::new(EngineHooks::new()); - - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - // No currently running hook with DB write access is set - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert!(result.is_pending()); - - // Currently running hook with DB write access returned `Pending` on polling - controller.active_db_write_hook = Some(Box::new(TestHook::new_rw("read-write"))); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert!(result.is_pending()); - - // Currently running hook with DB write access returned `Ready` on polling, but didn't - // return `EngineHookEvent::Finished` yet. - // Currently running hooks with DB write should still be set. - let mut hook = TestHook::new_rw("read-write"); - hook.add_result(Ok(EngineHookEvent::Started)); - controller.active_db_write_hook = Some(Box::new(hook)); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.event.is_started() && polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert!(controller.active_db_write_hook.is_some()); - assert!(controller.hooks.is_empty()); - - // Currently running hook with DB write access returned `Ready` on polling and - // `EngineHookEvent::Finished` inside. - // Currently running hooks with DB write should be moved to collection of hooks. - let mut hook = TestHook::new_rw("read-write"); - hook.add_result(Ok(EngineHookEvent::Finished(Ok(())))); - controller.active_db_write_hook = Some(Box::new(hook)); - - let result = poll!(poll_fn(|cx| controller.poll_active_db_write_hook(cx, context))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.event.is_finished() && polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert!(controller.active_db_write_hook.is_none()); - assert!(controller.hooks.pop_front().is_some()); - } - - #[tokio::test] - async fn poll_next_hook_db_write_active() { - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - let mut hook_rw = TestHook::new_rw("read-write"); - hook_rw.add_result(Ok(EngineHookEvent::Started)); - - let hook_ro_name = "read-only"; - let mut hook_ro = TestHook::new_ro(hook_ro_name); - hook_ro.add_result(Ok(EngineHookEvent::Started)); - - let mut hooks = EngineHooks::new(); - hooks.add(hook_rw); - hooks.add(hook_ro); - let mut controller = EngineHooksController::new(hooks); - - // Read-write hook can't be polled when external DB write is active - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, true))); - assert!(result.is_pending()); - assert!(controller.active_db_write_hook.is_none()); - - // Read-only hook can be polled when external DB write is active - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, true))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_ro_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_only() - }), - Poll::Ready(true) - ); - } - - #[tokio::test] - async fn poll_next_hook_db_write_inactive() { - let context = EngineHookContext { tip_block_number: 2, finalized_block_number: Some(1) }; - - let hook_rw_1_name = "read-write-1"; - let mut hook_rw_1 = TestHook::new_rw(hook_rw_1_name); - hook_rw_1.add_result(Ok(EngineHookEvent::Started)); - - let hook_rw_2_name = "read-write-2"; - let mut hook_rw_2 = TestHook::new_rw(hook_rw_2_name); - hook_rw_2.add_result(Ok(EngineHookEvent::Started)); - - let hook_ro_name = "read-only"; - let mut hook_ro = TestHook::new_ro(hook_ro_name); - hook_ro.add_result(Ok(EngineHookEvent::Started)); - hook_ro.add_result(Err(RethError::msg("something went wrong"))); - - let mut hooks = EngineHooks::new(); - hooks.add(hook_rw_1); - hooks.add(hook_rw_2); - hooks.add(hook_ro); - - let mut controller = EngineHooksController::new(hooks); - let hooks_len = controller.hooks.len(); - - // Read-write hook can be polled because external DB write is not active - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_1_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_rw_1_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_write() - }), - Poll::Ready(true) - ); - assert_eq!( - controller.active_db_write_hook.as_ref().map(|hook| hook.name()), - Some(hook_rw_1_name) - ); - - // Read-write hook cannot be polled because another read-write hook is running - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_2_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert!(result.is_pending()); - - // Read-only hook can be polled in parallel with already running read-write hook - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_ro_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!( - result.map(|result| { - let polled_hook = result.unwrap(); - polled_hook.name == hook_ro_name && - polled_hook.event.is_started() && - polled_hook.db_access_level.is_read_only() - }), - Poll::Ready(true) - ); - - // Read-write hook still cannot be polled because another read-write hook is running - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_rw_2_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert!(result.is_pending()); - - // Read-only hook has finished with error - assert_eq!(controller.hooks.front().map(|hook| hook.name()), Some(hook_ro_name)); - let result = poll!(poll_fn(|cx| controller.poll_next_hook(cx, context, false))); - assert_eq!(result.map(|result| { result.is_err() }), Poll::Ready(true)); - - assert!(controller.active_db_write_hook.is_some()); - assert_eq!(controller.hooks.len(), hooks_len - 1) - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/mod.rs b/crates/consensus/beacon/src/engine/hooks/mod.rs deleted file mode 100644 index 828a6f9685001..0000000000000 --- a/crates/consensus/beacon/src/engine/hooks/mod.rs +++ /dev/null @@ -1,129 +0,0 @@ -use alloy_primitives::BlockNumber; -use reth_errors::{RethError, RethResult}; -use std::{ - fmt, - task::{Context, Poll}, -}; - -mod controller; -pub(crate) use controller::{EngineHooksController, PolledHook}; - -mod prune; -pub use prune::PruneHook; - -mod static_file; -pub use static_file::StaticFileHook; - -/// Collection of [engine hooks][`EngineHook`]. -#[derive(Default)] -pub struct EngineHooks { - inner: Vec>, -} - -impl fmt::Debug for EngineHooks { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("EngineHooks").field("inner", &self.inner.len()).finish() - } -} - -impl EngineHooks { - /// Creates a new empty collection of [engine hooks][`EngineHook`]. - pub fn new() -> Self { - Self { inner: Vec::new() } - } - - /// Adds a new [engine hook][`EngineHook`] to the collection. - pub fn add(&mut self, hook: H) { - self.inner.push(Box::new(hook)) - } -} - -/// Hook that will be run during the main loop of -/// [consensus engine][`crate::engine::BeaconConsensusEngine`]. -pub trait EngineHook: Send + Sync + 'static { - /// Returns a human-readable name for the hook. - fn name(&self) -> &'static str; - - /// Advances the hook execution, emitting an [event][`EngineHookEvent`]. - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll>; - - /// Returns [db access level][`EngineHookDBAccessLevel`] the hook needs. - fn db_access_level(&self) -> EngineHookDBAccessLevel; -} - -/// Engine context passed to the [hook polling function][`EngineHook::poll`]. -#[derive(Copy, Clone, Debug)] -pub struct EngineHookContext { - /// Tip block number. - pub tip_block_number: BlockNumber, - /// Finalized block number, if known. - pub finalized_block_number: Option, -} - -/// An event emitted when [hook][`EngineHook`] is polled. -#[derive(Debug)] -pub enum EngineHookEvent { - /// Hook is not ready. - /// - /// If this is returned, the hook is idle. - NotReady, - /// Hook started. - /// - /// If this is returned, the hook is running. - Started, - /// Hook finished. - /// - /// If this is returned, the hook is idle. - Finished(Result<(), EngineHookError>), -} - -impl EngineHookEvent { - /// Returns `true` if the event is [`EngineHookEvent::Started`]. - pub const fn is_started(&self) -> bool { - matches!(self, Self::Started) - } - - /// Returns `true` if the event is [`EngineHookEvent::Finished`]. - pub const fn is_finished(&self) -> bool { - matches!(self, Self::Finished(_)) - } -} - -/// An error returned by [hook][`EngineHook`]. -#[derive(Debug, thiserror::Error)] -pub enum EngineHookError { - /// Hook channel closed. - #[error("hook channel closed")] - ChannelClosed, - /// Common error. Wrapper around [`RethError`]. - #[error(transparent)] - Common(#[from] RethError), - /// An internal error occurred. - #[error(transparent)] - Internal(#[from] Box), -} - -/// Level of database access the hook needs for execution. -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum EngineHookDBAccessLevel { - /// Read-only database access. - ReadOnly, - /// Read-write database access. - ReadWrite, -} - -impl EngineHookDBAccessLevel { - /// Returns `true` if the hook needs read-only access to the database. - pub const fn is_read_only(&self) -> bool { - matches!(self, Self::ReadOnly) - } - - /// Returns `true` if the hook needs read-write access to the database. - pub const fn is_read_write(&self) -> bool { - matches!(self, Self::ReadWrite) - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs deleted file mode 100644 index 409fc98b80bb2..0000000000000 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ /dev/null @@ -1,203 +0,0 @@ -//! Prune hook for the engine implementation. - -use crate::{ - engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, - hooks::EngineHookDBAccessLevel, -}; -use alloy_primitives::BlockNumber; -use futures::FutureExt; -use metrics::Counter; -use reth_errors::{RethError, RethResult}; -use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader, PruneCheckpointWriter}; -use reth_prune::{Pruner, PrunerError, PrunerWithResult}; -use reth_tasks::TaskSpawner; -use std::{ - fmt::{self, Debug}, - task::{ready, Context, Poll}, -}; -use tokio::sync::oneshot; - -/// Manages pruning under the control of the engine. -/// -/// This type controls the [Pruner]. -pub struct PruneHook { - /// The current state of the pruner. - pruner_state: PrunerState, - /// The type that can spawn the pruner task. - pruner_task_spawner: Box, - metrics: Metrics, -} - -impl fmt::Debug for PruneHook -where - PF: DatabaseProviderFactory + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PruneHook") - .field("pruner_state", &self.pruner_state) - .field("metrics", &self.metrics) - .finish() - } -} - -impl PruneHook { - /// Create a new instance - pub fn new( - pruner: Pruner, - pruner_task_spawner: Box, - ) -> Self { - Self { - pruner_state: PrunerState::Idle(Some(pruner)), - pruner_task_spawner, - metrics: Metrics::default(), - } - } - - /// Advances the pruner state. - /// - /// This checks for the result in the channel, or returns pending if the pruner is idle. - fn poll_pruner(&mut self, cx: &mut Context<'_>) -> Poll> { - let result = match self.pruner_state { - PrunerState::Idle(_) => return Poll::Pending, - PrunerState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - - let event = match result { - Ok((pruner, result)) => { - self.pruner_state = PrunerState::Idle(Some(pruner)); - - match result { - Ok(_) => EngineHookEvent::Finished(Ok(())), - Err(err) => EngineHookEvent::Finished(Err(err.into())), - } - } - Err(_) => { - // failed to receive the pruner - EngineHookEvent::Finished(Err(EngineHookError::ChannelClosed)) - } - }; - - Poll::Ready(Ok(event)) - } -} - -impl PruneHook -where - PF: DatabaseProviderFactory - + 'static, -{ - /// This will try to spawn the pruner if it is idle: - /// 1. Check if pruning is needed through [`Pruner::is_pruning_needed`]. - /// - /// 2.1. If pruning is needed, pass tip block number to the [`Pruner::run`] and spawn it in a - /// separate task. Set pruner state to [`PrunerState::Running`]. - /// 2.2. If pruning is not needed, set pruner state back to [`PrunerState::Idle`]. - /// - /// If pruner is already running, do nothing. - fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { - match &mut self.pruner_state { - PrunerState::Idle(pruner) => { - let mut pruner = pruner.take()?; - - // Check tip for pruning - if pruner.is_pruning_needed(tip_block_number) { - let (tx, rx) = oneshot::channel(); - self.pruner_task_spawner.spawn_critical_blocking( - "pruner task", - Box::pin(async move { - let result = pruner.run(tip_block_number); - let _ = tx.send((pruner, result)); - }), - ); - self.metrics.runs_total.increment(1); - self.pruner_state = PrunerState::Running(rx); - - Some(EngineHookEvent::Started) - } else { - self.pruner_state = PrunerState::Idle(Some(pruner)); - Some(EngineHookEvent::NotReady) - } - } - PrunerState::Running(_) => None, - } - } -} - -impl EngineHook for PruneHook -where - PF: DatabaseProviderFactory - + 'static, -{ - fn name(&self) -> &'static str { - "Prune" - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll> { - // Try to spawn a pruner - match self.try_spawn_pruner(ctx.tip_block_number) { - Some(EngineHookEvent::NotReady) => return Poll::Pending, - Some(event) => return Poll::Ready(Ok(event)), - None => (), - } - - // Poll pruner and check its status - self.poll_pruner(cx) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - EngineHookDBAccessLevel::ReadWrite - } -} - -/// The possible pruner states within the sync controller. -/// -/// [`PrunerState::Idle`] means that the pruner is currently idle. -/// [`PrunerState::Running`] means that the pruner is currently running. -/// -/// NOTE: The differentiation between these two states is important, because when the pruner is -/// running, it acquires the write lock over the database. This means that we cannot forward to the -/// blockchain tree any messages that would result in database writes, since it would result in a -/// deadlock. -enum PrunerState { - /// Pruner is idle. - Idle(Option>), - /// Pruner is running and waiting for a response - Running(oneshot::Receiver>), -} - -impl fmt::Debug for PrunerState -where - PF: DatabaseProviderFactory + Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Idle(f0) => f.debug_tuple("Idle").field(&f0).finish(), - Self::Running(f0) => f.debug_tuple("Running").field(&f0).finish(), - } - } -} - -#[derive(reth_metrics::Metrics)] -#[metrics(scope = "consensus.engine.prune")] -struct Metrics { - /// The number of times the pruner was run. - runs_total: Counter, -} - -impl From for EngineHookError { - fn from(err: PrunerError) -> Self { - match err { - PrunerError::PruneSegment(_) | PrunerError::InconsistentData(_) => { - Self::Internal(Box::new(err)) - } - PrunerError::Database(err) => RethError::Database(err).into(), - PrunerError::Provider(err) => RethError::Provider(err).into(), - } - } -} diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs deleted file mode 100644 index 99387492c3bfa..0000000000000 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ /dev/null @@ -1,209 +0,0 @@ -//! `StaticFile` hook for the engine implementation. - -use crate::{ - engine::hooks::{EngineHook, EngineHookContext, EngineHookError, EngineHookEvent}, - hooks::EngineHookDBAccessLevel, -}; -use alloy_primitives::BlockNumber; -use futures::FutureExt; -use reth_codecs::Compact; -use reth_db_api::table::Value; -use reth_errors::RethResult; -use reth_primitives::{static_file::HighestStaticFiles, NodePrimitives}; -use reth_provider::{ - BlockReader, ChainStateBlockReader, DatabaseProviderFactory, StageCheckpointReader, - StaticFileProviderFactory, -}; -use reth_static_file::{StaticFileProducer, StaticFileProducerWithResult}; -use reth_tasks::TaskSpawner; -use std::task::{ready, Context, Poll}; -use tokio::sync::oneshot; -use tracing::trace; - -/// Manages producing static files under the control of the engine. -/// -/// This type controls the [`StaticFileProducer`]. -#[derive(Debug)] -pub struct StaticFileHook { - /// The current state of the `static_file_producer`. - state: StaticFileProducerState, - /// The type that can spawn the `static_file_producer` task. - task_spawner: Box, -} - -impl StaticFileHook -where - Provider: StaticFileProviderFactory - + DatabaseProviderFactory< - Provider: StaticFileProviderFactory< - Primitives: NodePrimitives< - SignedTx: Value + Compact, - BlockHeader: Value + Compact, - Receipt: Value + Compact, - >, - > + StageCheckpointReader - + BlockReader - + ChainStateBlockReader, - > + 'static, -{ - /// Create a new instance - pub fn new( - static_file_producer: StaticFileProducer, - task_spawner: Box, - ) -> Self { - Self { state: StaticFileProducerState::Idle(Some(static_file_producer)), task_spawner } - } - - /// Advances the `static_file_producer` state. - /// - /// This checks for the result in the channel, or returns pending if the `static_file_producer` - /// is idle. - fn poll_static_file_producer( - &mut self, - cx: &mut Context<'_>, - ) -> Poll> { - let result = match self.state { - StaticFileProducerState::Idle(_) => return Poll::Pending, - StaticFileProducerState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - - let event = match result { - Ok((static_file_producer, result)) => { - self.state = StaticFileProducerState::Idle(Some(static_file_producer)); - - match result { - Ok(_) => EngineHookEvent::Finished(Ok(())), - Err(err) => EngineHookEvent::Finished(Err(EngineHookError::Common(err.into()))), - } - } - Err(_) => { - // failed to receive the static_file_producer - EngineHookEvent::Finished(Err(EngineHookError::ChannelClosed)) - } - }; - - Poll::Ready(Ok(event)) - } - - /// This will try to spawn the `static_file_producer` if it is idle: - /// 1. Check if producing static files is needed through - /// [`StaticFileProducer::get_static_file_targets`](reth_static_file::StaticFileProducerInner::get_static_file_targets) - /// and then [`StaticFileTargets::any`](reth_static_file::StaticFileTargets::any). - /// - /// 2.1. If producing static files is needed, pass static file request to the - /// [`StaticFileProducer::run`](reth_static_file::StaticFileProducerInner::run) and - /// spawn it in a separate task. Set static file producer state to - /// [`StaticFileProducerState::Running`]. - /// 2.2. If producing static files is not needed, set static file producer state back to - /// [`StaticFileProducerState::Idle`]. - /// - /// If `static_file_producer` is already running, do nothing. - fn try_spawn_static_file_producer( - &mut self, - finalized_block_number: BlockNumber, - ) -> RethResult> { - Ok(match &mut self.state { - StaticFileProducerState::Idle(static_file_producer) => { - let Some(static_file_producer) = static_file_producer.take() else { - trace!(target: "consensus::engine::hooks::static_file", "StaticFileProducer is already running but the state is idle"); - return Ok(None) - }; - - let Some(locked_static_file_producer) = static_file_producer.try_lock_arc() else { - trace!(target: "consensus::engine::hooks::static_file", "StaticFileProducer lock is already taken"); - return Ok(None) - }; - - let finalized_block_number = locked_static_file_producer - .last_finalized_block()? - .map(|on_disk| finalized_block_number.min(on_disk)) - .unwrap_or(finalized_block_number); - - let targets = - locked_static_file_producer.get_static_file_targets(HighestStaticFiles { - headers: Some(finalized_block_number), - receipts: Some(finalized_block_number), - transactions: Some(finalized_block_number), - })?; - - // Check if the moving data to static files has been requested. - if targets.any() { - let (tx, rx) = oneshot::channel(); - self.task_spawner.spawn_critical_blocking( - "static_file_producer task", - Box::pin(async move { - let result = locked_static_file_producer.run(targets); - let _ = tx.send((static_file_producer, result)); - }), - ); - self.state = StaticFileProducerState::Running(rx); - - Some(EngineHookEvent::Started) - } else { - self.state = StaticFileProducerState::Idle(Some(static_file_producer)); - Some(EngineHookEvent::NotReady) - } - } - StaticFileProducerState::Running(_) => None, - }) - } -} - -impl EngineHook for StaticFileHook -where - Provider: StaticFileProviderFactory - + DatabaseProviderFactory< - Provider: StaticFileProviderFactory< - Primitives: NodePrimitives< - SignedTx: Value + Compact, - BlockHeader: Value + Compact, - Receipt: Value + Compact, - >, - > + StageCheckpointReader - + BlockReader - + ChainStateBlockReader, - > + 'static, -{ - fn name(&self) -> &'static str { - "StaticFile" - } - - fn poll( - &mut self, - cx: &mut Context<'_>, - ctx: EngineHookContext, - ) -> Poll> { - let Some(finalized_block_number) = ctx.finalized_block_number else { - trace!(target: "consensus::engine::hooks::static_file", ?ctx, "Finalized block number is not available"); - return Poll::Pending - }; - - // Try to spawn a static_file_producer - match self.try_spawn_static_file_producer(finalized_block_number)? { - Some(EngineHookEvent::NotReady) => return Poll::Pending, - Some(event) => return Poll::Ready(Ok(event)), - None => (), - } - - // Poll static_file_producer and check its status - self.poll_static_file_producer(cx) - } - - fn db_access_level(&self) -> EngineHookDBAccessLevel { - EngineHookDBAccessLevel::ReadOnly - } -} - -/// The possible `static_file_producer` states within the sync controller. -/// -/// [`StaticFileProducerState::Idle`] means that the static file producer is currently idle. -/// [`StaticFileProducerState::Running`] means that the static file producer is currently running. -#[derive(Debug)] -enum StaticFileProducerState { - /// [`StaticFileProducer`] is idle. - Idle(Option>), - /// [`StaticFileProducer`] is running and waiting for a response - Running(oneshot::Receiver>), -} diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs deleted file mode 100644 index 384820ca9f3fc..0000000000000 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ /dev/null @@ -1,125 +0,0 @@ -use alloy_eips::eip1898::BlockWithParent; -use alloy_primitives::B256; -use reth_metrics::{ - metrics::{Counter, Gauge}, - Metrics, -}; -use schnellru::{ByLength, LruMap}; -use std::fmt::Debug; -use tracing::warn; - -/// The max hit counter for invalid headers in the cache before it is forcefully evicted. -/// -/// In other words, if a header is referenced more than this number of times, it will be evicted to -/// allow for reprocessing. -const INVALID_HEADER_HIT_EVICTION_THRESHOLD: u8 = 128; - -/// Keeps track of invalid headers. -#[derive(Debug)] -pub struct InvalidHeaderCache { - /// This maps a header hash to a reference to its invalid ancestor. - headers: LruMap, - /// Metrics for the cache. - metrics: InvalidHeaderCacheMetrics, -} - -impl InvalidHeaderCache { - /// Invalid header cache constructor. - pub fn new(max_length: u32) -> Self { - Self { headers: LruMap::new(ByLength::new(max_length)), metrics: Default::default() } - } - - fn insert_entry(&mut self, hash: B256, header: BlockWithParent) { - self.headers.insert(hash, HeaderEntry { header, hit_count: 0 }); - } - - /// Returns the invalid ancestor's header if it exists in the cache. - /// - /// If this is called, the hit count for the entry is incremented. - /// If the hit count exceeds the threshold, the entry is evicted and `None` is returned. - pub fn get(&mut self, hash: &B256) -> Option { - { - let entry = self.headers.get(hash)?; - entry.hit_count += 1; - if entry.hit_count < INVALID_HEADER_HIT_EVICTION_THRESHOLD { - return Some(entry.header) - } - } - // if we get here, the entry has been hit too many times, so we evict it - self.headers.remove(hash); - self.metrics.hit_evictions.increment(1); - None - } - - /// Inserts an invalid block into the cache, with a given invalid ancestor. - pub fn insert_with_invalid_ancestor( - &mut self, - header_hash: B256, - invalid_ancestor: BlockWithParent, - ) { - if self.get(&header_hash).is_none() { - warn!(target: "consensus::engine", hash=?header_hash, ?invalid_ancestor, "Bad block with existing invalid ancestor"); - self.insert_entry(header_hash, invalid_ancestor); - - // update metrics - self.metrics.known_ancestor_inserts.increment(1); - self.metrics.count.set(self.headers.len() as f64); - } - } - - /// Inserts an invalid ancestor into the map. - pub fn insert(&mut self, invalid_ancestor: BlockWithParent) { - if self.get(&invalid_ancestor.block.hash).is_none() { - warn!(target: "consensus::engine", ?invalid_ancestor, "Bad block with hash"); - self.insert_entry(invalid_ancestor.block.hash, invalid_ancestor); - - // update metrics - self.metrics.unique_inserts.increment(1); - self.metrics.count.set(self.headers.len() as f64); - } - } -} - -struct HeaderEntry { - /// Keeps track how many times this header has been hit. - hit_count: u8, - /// The actual header entry - header: BlockWithParent, -} - -/// Metrics for the invalid headers cache. -#[derive(Metrics)] -#[metrics(scope = "consensus.engine.beacon.invalid_headers")] -struct InvalidHeaderCacheMetrics { - /// The total number of invalid headers in the cache. - count: Gauge, - /// The number of inserts with a known ancestor. - known_ancestor_inserts: Counter, - /// The number of unique invalid header inserts (i.e. without a known ancestor). - unique_inserts: Counter, - /// The number of times a header was evicted from the cache because it was hit too many times. - hit_evictions: Counter, -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::Header; - use reth_primitives::SealedHeader; - - #[test] - fn test_hit_eviction() { - let mut cache = InvalidHeaderCache::new(10); - let header = Header::default(); - let header = SealedHeader::seal(header); - cache.insert(header.block_with_parent()); - assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); - - for hit in 1..INVALID_HEADER_HIT_EVICTION_THRESHOLD { - assert!(cache.get(&header.hash()).is_some()); - assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, hit); - } - - assert!(cache.get(&header.hash()).is_none()); - } -} diff --git a/crates/consensus/beacon/src/engine/metrics.rs b/crates/consensus/beacon/src/engine/metrics.rs deleted file mode 100644 index 67bae71be8b70..0000000000000 --- a/crates/consensus/beacon/src/engine/metrics.rs +++ /dev/null @@ -1,32 +0,0 @@ -use reth_metrics::{ - metrics::{Counter, Gauge, Histogram}, - Metrics, -}; - -/// Beacon consensus engine metrics. -#[derive(Metrics)] -#[metrics(scope = "consensus.engine.beacon")] -pub(crate) struct EngineMetrics { - /// The number of times the pipeline was run. - pub(crate) pipeline_runs: Counter, - /// The total count of forkchoice updated messages received. - pub(crate) forkchoice_updated_messages: Counter, - /// The total count of new payload messages received. - pub(crate) new_payload_messages: Counter, - /// Latency for making canonical already canonical block - pub(crate) make_canonical_already_canonical_latency: Histogram, - /// Latency for making canonical committed block - pub(crate) make_canonical_committed_latency: Histogram, - /// Latency for making canonical returns error - pub(crate) make_canonical_error_latency: Histogram, - /// Latency for all making canonical results - pub(crate) make_canonical_latency: Histogram, -} - -/// Metrics for the `EngineSyncController`. -#[derive(Metrics)] -#[metrics(scope = "consensus.engine.beacon")] -pub(crate) struct EngineSyncMetrics { - /// How many blocks are currently being downloaded. - pub(crate) active_block_downloads: Gauge, -} diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs deleted file mode 100644 index 0412e98776ab8..0000000000000 --- a/crates/consensus/beacon/src/engine/mod.rs +++ /dev/null @@ -1,2961 +0,0 @@ -use alloy_consensus::{BlockHeader, Header}; -use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash}; -use alloy_primitives::{BlockNumber, B256}; -use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, PayloadStatus, PayloadStatusEnum, - PayloadValidationError, -}; -use futures::{stream::BoxStream, Future, StreamExt}; -use itertools::Either; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockStatus, BlockValidationKind, BlockchainTreeEngine, CanonicalOutcome, InsertPayloadOk, -}; -use reth_engine_primitives::{ - BeaconEngineMessage, BeaconOnNewPayloadError, EngineApiMessageVersion, EngineTypes, - ForkchoiceStateHash, ForkchoiceStateTracker, ForkchoiceStatus, OnForkChoiceUpdated, - PayloadTypes, -}; -use reth_errors::{BlockValidationError, ProviderResult, RethError, RethResult}; -use reth_network_p2p::{ - sync::{NetworkSyncUpdater, SyncState}, - EthBlockClient, -}; -use reth_node_types::{Block, BlockTy, HeaderTy, NodeTypesWithEngine}; -use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_builder_primitives::PayloadBuilder; -use reth_payload_primitives::{PayloadAttributes, PayloadBuilderAttributes}; -use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Head, SealedBlock, SealedHeader}; -use reth_provider::{ - providers::{ProviderNodeTypes, TreeNodeTypes}, - BlockIdReader, BlockReader, BlockSource, CanonChainTracker, ChainSpecProvider, ProviderError, - StageCheckpointReader, -}; -use reth_stages_api::{ControlFlow, Pipeline, PipelineTarget, StageId}; -use reth_tasks::TaskSpawner; -use reth_tokio_util::EventSender; -use std::{ - pin::Pin, - sync::Arc, - task::{Context, Poll}, - time::{Duration, Instant}, -}; -use tokio::sync::{ - mpsc::{self, UnboundedSender}, - oneshot, -}; -use tokio_stream::wrappers::UnboundedReceiverStream; -use tracing::*; - -mod error; -pub use error::{BeaconConsensusEngineError, BeaconEngineResult, BeaconForkChoiceUpdateError}; - -mod invalid_headers; -pub use invalid_headers::InvalidHeaderCache; - -pub use reth_engine_primitives::{BeaconConsensusEngineEvent, ConsensusEngineLiveSyncProgress}; - -mod handle; -pub use handle::BeaconConsensusEngineHandle; - -mod metrics; -use metrics::EngineMetrics; - -pub mod sync; -use sync::{EngineSyncController, EngineSyncEvent}; - -/// Hooks for running during the main loop of -/// [consensus engine][`crate::engine::BeaconConsensusEngine`]. -pub mod hooks; -use hooks::{EngineHookContext, EngineHookEvent, EngineHooks, EngineHooksController, PolledHook}; - -#[cfg(test)] -pub mod test_utils; - -/// The maximum number of invalid headers that can be tracked by the engine. -const MAX_INVALID_HEADERS: u32 = 512u32; - -/// The largest gap for which the tree will be used for sync. See docs for `pipeline_run_threshold` -/// for more information. -/// -/// This is the default threshold, the distance to the head that the tree will be used for sync. -/// If the distance exceeds this threshold, the pipeline will be used for sync. -pub const MIN_BLOCKS_FOR_PIPELINE_RUN: u64 = EPOCH_SLOTS; - -/// Helper trait expressing requirements for node types to be used in engine. -pub trait EngineNodeTypes: ProviderNodeTypes + NodeTypesWithEngine {} - -impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine {} - -/// Represents a pending forkchoice update. -/// -/// This type encapsulates the necessary components for a pending forkchoice update -/// in the context of a beacon consensus engine. -/// -/// It consists of: -/// - The current fork choice state. -/// - Optional payload attributes specific to the engine type. -/// - Sender for the result of an oneshot channel, conveying the outcome of the fork choice update. -type PendingForkchoiceUpdate = - (ForkchoiceState, Option, oneshot::Sender>); - -/// The beacon consensus engine is the driver that switches between historical and live sync. -/// -/// The beacon consensus engine is itself driven by messages from the Consensus Layer, which are -/// received by Engine API (JSON-RPC). -/// -/// The consensus engine is idle until it receives the first -/// [`BeaconEngineMessage::ForkchoiceUpdated`] message from the CL which would initiate the sync. At -/// first, the consensus engine would run the [Pipeline] until the latest known block hash. -/// Afterward, it would attempt to create/restore the [`BlockchainTreeEngine`] from the blocks -/// that are currently available. In case the restoration is successful, the consensus engine would -/// run in a live sync mode, populating the [`BlockchainTreeEngine`] with new blocks as they arrive -/// via engine API and downloading any missing blocks from the network to fill potential gaps. -/// -/// The consensus engine has two data input sources: -/// -/// ## New Payload (`engine_newPayloadV{}`) -/// -/// The engine receives new payloads from the CL. If the payload is connected to the canonical -/// chain, it will be fully validated added to a chain in the [`BlockchainTreeEngine`]: `VALID` -/// -/// If the payload's chain is disconnected (at least 1 block is missing) then it will be buffered: -/// `SYNCING` ([`BlockStatus::Disconnected`]). -/// -/// ## Forkchoice Update (FCU) (`engine_forkchoiceUpdatedV{}`) -/// -/// This contains the latest forkchoice state and the payload attributes. The engine will attempt to -/// make a new canonical chain based on the `head_hash` of the update and trigger payload building -/// if the `payload_attrs` are present and the FCU is `VALID`. -/// -/// The `head_hash` forms a chain by walking backwards from the `head_hash` towards the canonical -/// blocks of the chain. -/// -/// Making a new canonical chain can result in the following relevant outcomes: -/// -/// ### The chain is connected -/// -/// All blocks of the `head_hash`'s chain are present in the [`BlockchainTreeEngine`] and are -/// committed to the canonical chain. This also includes reorgs. -/// -/// ### The chain is disconnected -/// -/// In this case the [`BlockchainTreeEngine`] doesn't know how the new chain connects to the -/// existing canonical chain. It could be a simple commit (new blocks extend the current head) or a -/// re-org that requires unwinding the canonical chain. -/// -/// This further distinguishes between two variants: -/// -/// #### `head_hash`'s block exists -/// -/// The `head_hash`'s block was already received/downloaded, but at least one block is missing to -/// form a _connected_ chain. The engine will attempt to download the missing blocks from the -/// network by walking backwards (`parent_hash`), and then try to make the block canonical as soon -/// as the chain becomes connected. -/// -/// However, it still can be the case that the chain and the FCU is `INVALID`. -/// -/// #### `head_hash` block is missing -/// -/// This is similar to the previous case, but the `head_hash`'s block is missing. At which point the -/// engine doesn't know where the new head will point to: new chain could be a re-org or a simple -/// commit. The engine will download the missing head first and then proceed as in the previous -/// case. -/// -/// # Panics -/// -/// If the future is polled more than once. Leads to undefined state. -/// -/// Note: soon deprecated. See `reth_engine_service::EngineService`. -#[must_use = "Future does nothing unless polled"] -#[allow(missing_debug_implementations)] -pub struct BeaconConsensusEngine -where - N: EngineNodeTypes, - Client: EthBlockClient, - BT: BlockchainTreeEngine - + BlockReader - + BlockIdReader - + CanonChainTracker - + StageCheckpointReader, -{ - /// Controls syncing triggered by engine updates. - sync: EngineSyncController, - /// The type we can use to query both the database and the blockchain tree. - blockchain: BT, - /// Used for emitting updates about whether the engine is syncing or not. - sync_state_updater: Box, - /// The Engine API message receiver. - engine_message_stream: BoxStream<'static, BeaconEngineMessage>, - /// A clone of the handle - handle: BeaconConsensusEngineHandle, - /// Tracks the received forkchoice state updates received by the CL. - forkchoice_state_tracker: ForkchoiceStateTracker, - /// The payload store. - payload_builder: PayloadBuilderHandle, - /// Validator for execution payloads - payload_validator: ExecutionPayloadValidator, - /// Current blockchain tree action. - blockchain_tree_action: Option>, - /// Pending forkchoice update. - /// It is recorded if we cannot process the forkchoice update because - /// a hook with database read-write access is active. - /// This is a temporary solution to always process missed FCUs. - pending_forkchoice_update: - Option::PayloadAttributes>>, - /// Tracks the header of invalid payloads that were rejected by the engine because they're - /// invalid. - invalid_headers: InvalidHeaderCache, - /// After downloading a block corresponding to a recent forkchoice update, the engine will - /// check whether or not we can connect the block to the current canonical chain. If we can't, - /// we need to download and execute the missing parents of that block. - /// - /// When the block can't be connected, its block number will be compared to the canonical head, - /// resulting in a heuristic for the number of missing blocks, or the size of the gap between - /// the new block and the canonical head. - /// - /// If the gap is larger than this threshold, the engine will download and execute the missing - /// blocks using the pipeline. Otherwise, the engine, sync controller, and blockchain tree will - /// be used to download and execute the missing blocks. - pipeline_run_threshold: u64, - hooks: EngineHooksController, - /// Sender for engine events. - event_sender: EventSender, - /// Consensus engine metrics. - metrics: EngineMetrics, -} - -impl BeaconConsensusEngine -where - N: TreeNodeTypes, - BT: BlockchainTreeEngine - + BlockReader, Header = HeaderTy> - + BlockIdReader - + CanonChainTracker
> - + StageCheckpointReader - + ChainSpecProvider - + 'static, - Client: EthBlockClient + 'static, -{ - /// Create a new instance of the [`BeaconConsensusEngine`]. - #[allow(clippy::too_many_arguments)] - pub fn new( - client: Client, - pipeline: Pipeline, - blockchain: BT, - task_spawner: Box, - sync_state_updater: Box, - max_block: Option, - payload_builder: PayloadBuilderHandle, - target: Option, - pipeline_run_threshold: u64, - hooks: EngineHooks, - ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { - let (to_engine, rx) = mpsc::unbounded_channel(); - Self::with_channel( - client, - pipeline, - blockchain, - task_spawner, - sync_state_updater, - max_block, - payload_builder, - target, - pipeline_run_threshold, - to_engine, - Box::pin(UnboundedReceiverStream::from(rx)), - hooks, - ) - } - - /// Create a new instance of the [`BeaconConsensusEngine`] using the given channel to configure - /// the [`BeaconEngineMessage`] communication channel. - /// - /// By default the engine is started with idle pipeline. - /// The pipeline can be launched immediately in one of the following ways descending in - /// priority: - /// - Explicit [`Option::Some`] target block hash provided via a constructor argument. - /// - The process was previously interrupted amidst the pipeline run. This is checked by - /// comparing the checkpoints of the first ([`StageId::Headers`]) and last - /// ([`StageId::Finish`]) stages. In this case, the latest available header in the database is - /// used as the target. - /// - /// Propagates any database related error. - #[allow(clippy::too_many_arguments)] - pub fn with_channel( - client: Client, - pipeline: Pipeline, - blockchain: BT, - task_spawner: Box, - sync_state_updater: Box, - max_block: Option, - payload_builder: PayloadBuilderHandle, - target: Option, - pipeline_run_threshold: u64, - to_engine: UnboundedSender>, - engine_message_stream: BoxStream<'static, BeaconEngineMessage>, - hooks: EngineHooks, - ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { - let event_sender = EventSender::default(); - let handle = BeaconConsensusEngineHandle::new(to_engine); - let sync = EngineSyncController::new( - pipeline, - client, - task_spawner.clone(), - max_block, - blockchain.chain_spec(), - event_sender.clone(), - ); - let mut this = Self { - sync, - payload_validator: ExecutionPayloadValidator::new(blockchain.chain_spec()), - blockchain, - sync_state_updater, - engine_message_stream, - handle: handle.clone(), - forkchoice_state_tracker: Default::default(), - payload_builder, - invalid_headers: InvalidHeaderCache::new(MAX_INVALID_HEADERS), - blockchain_tree_action: None, - pending_forkchoice_update: None, - pipeline_run_threshold, - hooks: EngineHooksController::new(hooks), - event_sender, - metrics: EngineMetrics::default(), - }; - - let maybe_pipeline_target = match target { - // Provided target always takes precedence. - target @ Some(_) => target, - None => this.check_pipeline_consistency()?, - }; - - if let Some(target) = maybe_pipeline_target { - this.sync.set_pipeline_sync_target(target.into()); - } - - Ok((this, handle)) - } - - /// Returns current [`EngineHookContext`] that's used for polling engine hooks. - fn current_engine_hook_context(&self) -> RethResult { - Ok(EngineHookContext { - tip_block_number: self.blockchain.canonical_tip().number, - finalized_block_number: self - .blockchain - .finalized_block_number() - .map_err(RethError::Provider)?, - }) - } - - /// Set the next blockchain tree action. - fn set_blockchain_tree_action(&mut self, action: BlockchainTreeAction) { - let previous_action = self.blockchain_tree_action.replace(action); - debug_assert!(previous_action.is_none(), "Pre-existing action found"); - } - - /// Pre-validate forkchoice update and check whether it can be processed. - /// - /// This method returns the update outcome if validation fails or - /// the node is syncing and the update cannot be processed at the moment. - fn pre_validate_forkchoice_update( - &mut self, - state: ForkchoiceState, - ) -> ProviderResult> { - if state.head_block_hash.is_zero() { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // check if the new head hash is connected to any ancestor that we previously marked as - // invalid - let lowest_buffered_ancestor_fcu = self.lowest_buffered_ancestor_or(state.head_block_hash); - if let Some(status) = self.check_invalid_ancestor(lowest_buffered_ancestor_fcu)? { - return Ok(Some(OnForkChoiceUpdated::with_invalid(status))) - } - - if self.sync.is_pipeline_active() { - // We can only process new forkchoice updates if the pipeline is idle, since it requires - // exclusive access to the database - trace!(target: "consensus::engine", "Pipeline is syncing, skipping forkchoice update"); - return Ok(Some(OnForkChoiceUpdated::syncing())) - } - - Ok(None) - } - - /// Process the result of attempting to make forkchoice state head hash canonical. - /// - /// # Returns - /// - /// A forkchoice state update outcome or fatal error. - fn on_forkchoice_updated_make_canonical_result( - &mut self, - state: ForkchoiceState, - mut attrs: Option<::PayloadAttributes>, - make_canonical_result: Result, - elapsed: Duration, - ) -> Result { - match make_canonical_result { - Ok(outcome) => { - let should_update_head = match &outcome { - CanonicalOutcome::AlreadyCanonical { head, header } => { - self.on_head_already_canonical(head, header, &mut attrs) - } - CanonicalOutcome::Committed { head } => { - // new VALID update that moved the canonical chain forward - debug!(target: "consensus::engine", hash=?state.head_block_hash, number=head.number, "Canonicalized new head"); - true - } - }; - - if should_update_head { - let head = outcome.header(); - let _ = self.update_head(head.clone()); - self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(head.clone()), - elapsed, - )); - } - - // Validate that the forkchoice state is consistent. - let on_updated = if let Some(invalid_fcu_response) = - self.ensure_consistent_forkchoice_state(state)? - { - trace!(target: "consensus::engine", ?state, "Forkchoice state is inconsistent"); - invalid_fcu_response - } else if let Some(attrs) = attrs { - // the CL requested to build a new payload on top of this new VALID head - let head = outcome.into_header().unseal(); - self.process_payload_attributes( - attrs, - head, - state, - EngineApiMessageVersion::default(), - ) - } else { - OnForkChoiceUpdated::valid(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(state.head_block_hash), - )) - }; - Ok(on_updated) - } - Err(err) => { - if err.is_fatal() { - error!(target: "consensus::engine", %err, "Encountered fatal error"); - Err(err) - } else { - Ok(OnForkChoiceUpdated::valid( - self.on_failed_canonical_forkchoice_update(&state, err)?, - )) - } - } - } - } - - /// Invoked when head hash references a `VALID` block that is already canonical. - /// - /// Returns `true` if the head needs to be updated. - fn on_head_already_canonical( - &self, - head: &BlockNumHash, - header: &SealedHeader, - attrs: &mut Option<::PayloadAttributes>, - ) -> bool { - // On Optimism, the proposers are allowed to reorg their own chain at will. - #[cfg(feature = "optimism")] - if reth_chainspec::EthChainSpec::is_optimism(&self.blockchain.chain_spec()) { - debug!( - target: "consensus::engine", - fcu_head_num=?header.number, - current_head_num=?head.number, - "[Optimism] Allowing beacon reorg to old head" - ); - return true - } - - // 2. Client software MAY skip an update of the forkchoice state and MUST NOT begin a - // payload build process if `forkchoiceState.headBlockHash` references a `VALID` ancestor - // of the head of canonical chain, i.e. the ancestor passed payload validation process - // and deemed `VALID`. In the case of such an event, client software MUST return - // `{payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, - // validationError: null}, payloadId: null}` - if head != &header.num_hash() { - attrs.take(); - } - - debug!( - target: "consensus::engine", - fcu_head_num=?header.number, - current_head_num=?head.number, - "Ignoring beacon update to old head" - ); - false - } - - /// Invoked when we receive a new forkchoice update message. Calls into the blockchain tree - /// to resolve chain forks and ensure that the Execution Layer is working with the latest valid - /// chain. - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_forkchoiceUpdated`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification-1). - /// - /// Returns an error if an internal error occurred like a database error. - fn on_forkchoice_updated( - &mut self, - state: ForkchoiceState, - attrs: Option<::PayloadAttributes>, - tx: oneshot::Sender>, - ) { - self.metrics.forkchoice_updated_messages.increment(1); - self.blockchain.on_forkchoice_update_received(&state); - trace!(target: "consensus::engine", ?state, "Received new forkchoice state update"); - - match self.pre_validate_forkchoice_update(state) { - Ok(on_updated_result) => { - if let Some(on_updated) = on_updated_result { - // Pre-validate forkchoice state update and return if it's invalid - // or cannot be processed at the moment. - self.on_forkchoice_updated_status(state, on_updated, tx); - } else if let Some(hook) = self.hooks.active_db_write_hook() { - // We can only process new forkchoice updates if no hook with db write is - // running, since it requires exclusive access to the - // database - let replaced_pending = - self.pending_forkchoice_update.replace((state, attrs, tx)); - warn!( - target: "consensus::engine", - hook = %hook.name(), - head_block_hash = ?state.head_block_hash, - safe_block_hash = ?state.safe_block_hash, - finalized_block_hash = ?state.finalized_block_hash, - replaced_pending = ?replaced_pending.map(|(state, _, _)| state), - "Hook is in progress, delaying forkchoice update. \ - This may affect the performance of your node as a validator." - ); - } else { - self.set_blockchain_tree_action( - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }, - ); - } - } - Err(error) => { - let _ = tx.send(Err(error.into())); - } - } - } - - /// Called after the forkchoice update status has been resolved. - /// Depending on the outcome, the method updates the sync state and notifies the listeners - /// about new processed FCU. - fn on_forkchoice_updated_status( - &mut self, - state: ForkchoiceState, - on_updated: OnForkChoiceUpdated, - tx: oneshot::Sender>, - ) { - // send the response to the CL ASAP - let status = on_updated.forkchoice_status(); - let _ = tx.send(Ok(on_updated)); - - // update the forkchoice state tracker - self.forkchoice_state_tracker.set_latest(state, status); - - match status { - ForkchoiceStatus::Invalid => {} - ForkchoiceStatus::Valid => { - // FCU head is valid, we're no longer syncing - self.sync_state_updater.update_sync_state(SyncState::Idle); - // node's fully synced, clear active download requests - self.sync.clear_block_download_requests(); - } - ForkchoiceStatus::Syncing => { - // we're syncing - self.sync_state_updater.update_sync_state(SyncState::Syncing); - } - } - - // notify listeners about new processed FCU - self.event_sender.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); - } - - /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less - /// than the checkpoint of the first stage). - /// - /// This will return the pipeline target if: - /// * the pipeline was interrupted during its previous run - /// * a new stage was added - /// * stage data was dropped manually through `reth stage drop ...` - /// - /// # Returns - /// - /// A target block hash if the pipeline is inconsistent, otherwise `None`. - fn check_pipeline_consistency(&self) -> RethResult> { - // If no target was provided, check if the stages are congruent - check if the - // checkpoint of the last stage matches the checkpoint of the first. - let first_stage_checkpoint = self - .blockchain - .get_stage_checkpoint(*StageId::ALL.first().unwrap())? - .unwrap_or_default() - .block_number; - - // Skip the first stage as we've already retrieved it and comparing all other checkpoints - // against it. - for stage_id in StageId::ALL.iter().skip(1) { - let stage_checkpoint = - self.blockchain.get_stage_checkpoint(*stage_id)?.unwrap_or_default().block_number; - - // If the checkpoint of any stage is less than the checkpoint of the first stage, - // retrieve and return the block hash of the latest header and use it as the target. - if stage_checkpoint < first_stage_checkpoint { - debug!( - target: "consensus::engine", - first_stage_checkpoint, - inconsistent_stage_id = %stage_id, - inconsistent_stage_checkpoint = stage_checkpoint, - "Pipeline sync progress is inconsistent" - ); - return Ok(self.blockchain.block_hash(first_stage_checkpoint)?) - } - } - - Ok(None) - } - - /// Returns a new [`BeaconConsensusEngineHandle`] that can be cloned and shared. - /// - /// The [`BeaconConsensusEngineHandle`] can be used to interact with this - /// [`BeaconConsensusEngine`] - pub fn handle(&self) -> BeaconConsensusEngineHandle { - self.handle.clone() - } - - /// Returns true if the distance from the local tip to the block is greater than the configured - /// threshold. - /// - /// If the `local_tip` is greater than the `block`, then this will return false. - #[inline] - const fn exceeds_pipeline_run_threshold(&self, local_tip: u64, block: u64) -> bool { - block > local_tip && block - local_tip > self.pipeline_run_threshold - } - - /// Returns the finalized hash to sync to if the distance from the local tip to the block is - /// greater than the configured threshold and we're not synced to the finalized block yet - /// yet (if we've seen that block already). - /// - /// If this is invoked after a new block has been downloaded, the downloaded block could be the - /// (missing) finalized block. - fn can_pipeline_sync_to_finalized( - &self, - canonical_tip_num: u64, - target_block_number: u64, - downloaded_block: Option, - ) -> Option { - let sync_target_state = self.forkchoice_state_tracker.sync_target_state(); - - // check if the distance exceeds the threshold for pipeline sync - let mut exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, target_block_number); - - // check if the downloaded block is the tracked finalized block - if let Some(ref buffered_finalized) = sync_target_state - .as_ref() - .and_then(|state| self.blockchain.buffered_header_by_hash(state.finalized_block_hash)) - { - // if we have buffered the finalized block, we should check how far - // we're off - exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, buffered_finalized.number); - } - - // If this is invoked after we downloaded a block we can check if this block is the - // finalized block - if let (Some(downloaded_block), Some(ref state)) = (downloaded_block, sync_target_state) { - if downloaded_block.hash == state.finalized_block_hash { - // we downloaded the finalized block - exceeds_pipeline_run_threshold = - self.exceeds_pipeline_run_threshold(canonical_tip_num, downloaded_block.number); - } - } - - // if the number of missing blocks is greater than the max, run the - // pipeline - if exceeds_pipeline_run_threshold { - if let Some(state) = sync_target_state { - // if we have already canonicalized the finalized block, we should - // skip the pipeline run - match self.blockchain.header_by_hash_or_number(state.finalized_block_hash.into()) { - Err(err) => { - warn!(target: "consensus::engine", %err, "Failed to get finalized block header"); - } - Ok(None) => { - // ensure the finalized block is known (not the zero hash) - if !state.finalized_block_hash.is_zero() { - // we don't have the block yet and the distance exceeds the allowed - // threshold - return Some(state.finalized_block_hash) - } - - // OPTIMISTIC SYNCING - // - // It can happen when the node is doing an - // optimistic sync, where the CL has no knowledge of the finalized hash, - // but is expecting the EL to sync as high - // as possible before finalizing. - // - // This usually doesn't happen on ETH mainnet since CLs use the more - // secure checkpoint syncing. - // - // However, optimism chains will do this. The risk of a reorg is however - // low. - debug!(target: "consensus::engine", hash=?state.head_block_hash, "Setting head hash as an optimistic pipeline target."); - return Some(state.head_block_hash) - } - Ok(Some(_)) => { - // we're fully synced to the finalized block - // but we want to continue downloading the missing parent - } - } - } - } - - None - } - - /// Returns how far the local tip is from the given block. If the local tip is at the same - /// height or its block number is greater than the given block, this returns None. - #[inline] - const fn distance_from_local_tip(&self, local_tip: u64, block: u64) -> Option { - if block > local_tip { - Some(block - local_tip) - } else { - None - } - } - - /// If validation fails, the response MUST contain the latest valid hash: - /// - /// - The block hash of the ancestor of the invalid payload satisfying the following two - /// conditions: - /// - It is fully validated and deemed VALID - /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID - /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a `PoW` block. - /// - null if client software cannot determine the ancestor of the invalid payload satisfying - /// the above conditions. - fn latest_valid_hash_for_invalid_payload( - &mut self, - parent_hash: B256, - ) -> ProviderResult> { - // Check if parent exists in side chain or in canonical chain. - if self.blockchain.find_block_by_hash(parent_hash, BlockSource::Any)?.is_some() { - return Ok(Some(parent_hash)) - } - - // iterate over ancestors in the invalid cache - // until we encounter the first valid ancestor - let mut current_hash = parent_hash; - let mut current_block = self.invalid_headers.get(¤t_hash); - while let Some(block) = current_block { - current_hash = block.parent; - current_block = self.invalid_headers.get(¤t_hash); - - // If current_header is None, then the current_hash does not have an invalid - // ancestor in the cache, check its presence in blockchain tree - if current_block.is_none() && - self.blockchain.find_block_by_hash(current_hash, BlockSource::Any)?.is_some() - { - return Ok(Some(current_hash)) - } - } - Ok(None) - } - - /// Prepares the invalid payload response for the given hash, checking the - /// database for the parent hash and populating the payload status with the latest valid hash - /// according to the engine api spec. - fn prepare_invalid_response(&mut self, mut parent_hash: B256) -> ProviderResult { - // Edge case: the `latestValid` field is the zero hash if the parent block is the terminal - // PoW block, which we need to identify by looking at the parent's block difficulty - if let Ok(Some(parent)) = self.blockchain.header_by_hash_or_number(parent_hash.into()) { - if !parent.is_zero_difficulty() { - parent_hash = B256::ZERO; - } - } - - let valid_parent_hash = self.latest_valid_hash_for_invalid_payload(parent_hash)?; - Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: PayloadValidationError::LinksToRejectedPayload.to_string(), - }) - .with_latest_valid_hash(valid_parent_hash.unwrap_or_default())) - } - - /// Checks if the given `check` hash points to an invalid header, inserting the given `head` - /// block into the invalid header cache if the `check` hash has a known invalid ancestor. - /// - /// Returns a payload status response according to the engine API spec if the block is known to - /// be invalid. - fn check_invalid_ancestor_with_head( - &mut self, - check: B256, - head: B256, - ) -> ProviderResult> { - // check if the check hash was previously marked as invalid - let Some(block) = self.invalid_headers.get(&check) else { return Ok(None) }; - - // populate the latest valid hash field - let status = self.prepare_invalid_response(block.parent)?; - - // insert the head block into the invalid header cache - self.invalid_headers.insert_with_invalid_ancestor(head, block); - - Ok(Some(status)) - } - - /// Checks if the given `head` points to an invalid header, which requires a specific response - /// to a forkchoice update. - fn check_invalid_ancestor(&mut self, head: B256) -> ProviderResult> { - // check if the head was previously marked as invalid - let Some(block) = self.invalid_headers.get(&head) else { return Ok(None) }; - - // populate the latest valid hash field - Ok(Some(self.prepare_invalid_response(block.parent)?)) - } - - /// Record latency metrics for one call to make a block canonical - /// Takes start time of the call and result of the make canonical call - /// - /// Handles cases for error, already canonical and committed blocks - fn record_make_canonical_latency( - &self, - start: Instant, - outcome: &Result, - ) -> Duration { - let elapsed = start.elapsed(); - self.metrics.make_canonical_latency.record(elapsed); - match outcome { - Ok(CanonicalOutcome::AlreadyCanonical { .. }) => { - self.metrics.make_canonical_already_canonical_latency.record(elapsed) - } - Ok(CanonicalOutcome::Committed { .. }) => { - self.metrics.make_canonical_committed_latency.record(elapsed) - } - Err(_) => self.metrics.make_canonical_error_latency.record(elapsed), - } - elapsed - } - - /// Ensures that the given forkchoice state is consistent, assuming the head block has been - /// made canonical. - /// - /// If the forkchoice state is consistent, this will return Ok(None). Otherwise, this will - /// return an instance of [`OnForkChoiceUpdated`] that is INVALID. - /// - /// This also updates the safe and finalized blocks in the [`CanonChainTracker`], if they are - /// consistent with the head block. - fn ensure_consistent_forkchoice_state( - &self, - state: ForkchoiceState, - ) -> ProviderResult> { - // Ensure that the finalized block, if not zero, is known and in the canonical chain - // after the head block is canonicalized. - // - // This ensures that the finalized block is consistent with the head block, i.e. the - // finalized block is an ancestor of the head block. - if !state.finalized_block_hash.is_zero() && - !self.blockchain.is_canonical(state.finalized_block_hash)? - { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // Finalized block is consistent, so update it in the canon chain tracker. - self.update_finalized_block(state.finalized_block_hash)?; - - // Also ensure that the safe block, if not zero, is known and in the canonical chain - // after the head block is canonicalized. - // - // This ensures that the safe block is consistent with the head block, i.e. the safe - // block is an ancestor of the head block. - if !state.safe_block_hash.is_zero() && - !self.blockchain.is_canonical(state.safe_block_hash)? - { - return Ok(Some(OnForkChoiceUpdated::invalid_state())) - } - - // Safe block is consistent, so update it in the canon chain tracker. - self.update_safe_block(state.safe_block_hash)?; - - Ok(None) - } - - /// Sets the state of the canon chain tracker based to the given head. - /// - /// This expects the given head to be the new canonical head. - /// - /// Additionally, updates the head used for p2p handshakes. - /// - /// This also updates the tracked safe and finalized blocks, and should be called before - /// returning a VALID forkchoice update response - fn update_canon_chain(&self, head: SealedHeader, update: &ForkchoiceState) -> RethResult<()> { - self.update_head(head)?; - self.update_finalized_block(update.finalized_block_hash)?; - self.update_safe_block(update.safe_block_hash)?; - Ok(()) - } - - /// Updates the state of the canon chain tracker based on the given head. - /// - /// This expects the given head to be the new canonical head. - /// Additionally, updates the head used for p2p handshakes. - /// - /// This should be called before returning a VALID forkchoice update response - #[inline] - fn update_head(&self, head: SealedHeader) -> RethResult<()> { - let mut head_block = Head { - number: head.number, - hash: head.hash(), - difficulty: head.difficulty, - timestamp: head.timestamp, - // NOTE: this will be set later - total_difficulty: Default::default(), - }; - - // we update the tracked header first - self.blockchain.set_canonical_head(head); - - head_block.total_difficulty = - self.blockchain.header_td_by_number(head_block.number)?.ok_or_else(|| { - RethError::Provider(ProviderError::TotalDifficultyNotFound(head_block.number)) - })?; - self.sync_state_updater.update_status(head_block); - - Ok(()) - } - - /// Updates the tracked safe block if we have it - /// - /// Returns an error if the block is not found. - #[inline] - fn update_safe_block(&self, safe_block_hash: B256) -> ProviderResult<()> { - if !safe_block_hash.is_zero() { - if self.blockchain.safe_block_hash()? == Some(safe_block_hash) { - // nothing to update - return Ok(()) - } - - let safe = self - .blockchain - .find_block_by_hash(safe_block_hash, BlockSource::Any)? - .ok_or(ProviderError::UnknownBlockHash(safe_block_hash))?; - self.blockchain.set_safe(SealedHeader::new(safe.split().0, safe_block_hash)); - } - Ok(()) - } - - /// Updates the tracked finalized block if we have it - /// - /// Returns an error if the block is not found. - #[inline] - fn update_finalized_block(&self, finalized_block_hash: B256) -> ProviderResult<()> { - if !finalized_block_hash.is_zero() { - if self.blockchain.finalized_block_hash()? == Some(finalized_block_hash) { - // nothing to update - return Ok(()) - } - - let finalized = self - .blockchain - .find_block_by_hash(finalized_block_hash, BlockSource::Any)? - .ok_or(ProviderError::UnknownBlockHash(finalized_block_hash))?; - self.blockchain.finalize_block(finalized.header().number())?; - self.blockchain - .set_finalized(SealedHeader::new(finalized.split().0, finalized_block_hash)); - } - Ok(()) - } - - /// Handler for a failed a forkchoice update due to a canonicalization error. - /// - /// This will determine if the state's head is invalid, and if so, return immediately. - /// - /// If the newest head is not invalid, then this will trigger a new pipeline run to sync the gap - /// - /// See [`Self::on_forkchoice_updated`] and [`BlockchainTreeEngine::make_canonical`]. - fn on_failed_canonical_forkchoice_update( - &mut self, - state: &ForkchoiceState, - error: CanonicalError, - ) -> ProviderResult { - debug_assert!(self.sync.is_pipeline_idle(), "pipeline must be idle"); - - // check if the new head was previously invalidated, if so then we deem this FCU - // as invalid - if let Some(invalid_ancestor) = self.check_invalid_ancestor(state.head_block_hash)? { - warn!(target: "consensus::engine", %error, ?state, ?invalid_ancestor, head=?state.head_block_hash, "Failed to canonicalize the head hash, head is also considered invalid"); - debug!(target: "consensus::engine", head=?state.head_block_hash, current_error=%error, "Head was previously marked as invalid"); - return Ok(invalid_ancestor) - } - - match &error { - CanonicalError::Validation(BlockValidationError::BlockPreMerge { .. }) => { - warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); - return Ok(PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: error.to_string(), - }) - .with_latest_valid_hash(B256::ZERO)) - } - CanonicalError::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { - .. - }) => { - // This just means we couldn't find the block when attempting to make it canonical, - // so we should not warn the user, since this will result in us attempting to sync - // to a new target and is considered normal operation during sync - } - CanonicalError::OptimisticTargetRevert(block_number) => { - self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(*block_number)); - return Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - _ => { - warn!(target: "consensus::engine", %error, ?state, "Failed to canonicalize the head hash"); - // TODO(mattsse) better error handling before attempting to sync (FCU could be - // invalid): only trigger sync if we can't determine whether the FCU is invalid - } - } - - // we assume the FCU is valid and at least the head is missing, - // so we need to start syncing to it - // - // find the appropriate target to sync to, if we don't have the safe block hash then we - // start syncing to the safe block via pipeline first - let target = if self.forkchoice_state_tracker.is_empty() && - // check that safe block is valid and missing - !state.safe_block_hash.is_zero() && - self.blockchain.block_number(state.safe_block_hash).ok().flatten().is_none() - { - state.safe_block_hash - } else { - state.head_block_hash - }; - - // we need to first check the buffer for the target and its ancestors - let target = self.lowest_buffered_ancestor_or(target); - - // if the threshold is zero, we should not download the block first, and just use the - // pipeline. Otherwise we use the tree to insert the block first - if self.pipeline_run_threshold == 0 { - // use the pipeline to sync to the target - trace!(target: "consensus::engine", %target, "Triggering pipeline run to sync missing ancestors of the new head"); - self.sync.set_pipeline_sync_target(target.into()); - } else { - // trigger a full block download for missing hash, or the parent of its lowest buffered - // ancestor - trace!(target: "consensus::engine", request=%target, "Triggering full block download for missing ancestors of the new head"); - self.sync.download_full_block(target); - } - - debug!(target: "consensus::engine", %target, "Syncing to new target"); - Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - - /// Return the parent hash of the lowest buffered ancestor for the requested block, if there - /// are any buffered ancestors. If there are no buffered ancestors, and the block itself does - /// not exist in the buffer, this returns the hash that is passed in. - /// - /// Returns the parent hash of the block itself if the block is buffered and has no other - /// buffered ancestors. - fn lowest_buffered_ancestor_or(&self, hash: B256) -> B256 { - self.blockchain - .lowest_buffered_ancestor(hash) - .map(|block| block.parent_hash) - .unwrap_or_else(|| hash) - } - - /// When the Consensus layer receives a new block via the consensus gossip protocol, - /// the transactions in the block are sent to the execution layer in the form of a - /// [`ExecutionPayload`]. The Execution layer executes the transactions and validates the - /// state in the block header, then passes validation data back to Consensus layer, that - /// adds the block to the head of its own blockchain and attests to it. The block is then - /// broadcast over the consensus p2p network in the form of a "Beacon block". - /// - /// These responses should adhere to the [Engine API Spec for - /// `engine_newPayload`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#specification). - /// - /// This returns a [`PayloadStatus`] that represents the outcome of a processed new payload and - /// returns an error if an internal error occurred. - #[instrument(level = "trace", skip(self, payload, sidecar), fields(block_hash = ?payload.block_hash(), block_number = %payload.block_number(), is_pipeline_idle = %self.sync.is_pipeline_idle()), target = "consensus::engine")] - fn on_new_payload( - &mut self, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - ) -> Result, BeaconOnNewPayloadError> { - self.metrics.new_payload_messages.increment(1); - - // Ensures that the given payload does not violate any consensus rules that concern the - // block's layout, like: - // - missing or invalid base fee - // - invalid extra data - // - invalid transactions - // - incorrect hash - // - the versioned hashes passed with the payload do not exactly match transaction - // versioned hashes - // - the block does not contain blob transactions if it is pre-cancun - // - // This validates the following engine API rule: - // - // 3. Given the expected array of blob versioned hashes client software **MUST** run its - // validation by taking the following steps: - // - // 1. Obtain the actual array by concatenating blob versioned hashes lists - // (`tx.blob_versioned_hashes`) of each [blob - // transaction](https://eips.ethereum.org/EIPS/eip-4844#new-transaction-type) included - // in the payload, respecting the order of inclusion. If the payload has no blob - // transactions the expected array **MUST** be `[]`. - // - // 2. Return `{status: INVALID, latestValidHash: null, validationError: errorMessage | - // null}` if the expected and the actual arrays don't match. - // - // This validation **MUST** be instantly run in all cases even during active sync process. - let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload(payload, sidecar) { - Ok(block) => block, - Err(error) => { - error!(target: "consensus::engine", %error, "Invalid payload"); - // we need to convert the error to a payload status (response to the CL) - - let latest_valid_hash = - if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { - // Engine-API rules: - // > `latestValidHash: null` if the blockHash validation has failed () - // > `latestValidHash: null` if the expected and the actual arrays don't match () - None - } else { - self.latest_valid_hash_for_invalid_payload(parent_hash) - .map_err(BeaconOnNewPayloadError::internal)? - }; - - let status = PayloadStatusEnum::from(error); - return Ok(Either::Left(PayloadStatus::new(status, latest_valid_hash))) - } - }; - - let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block.hash()); - if lowest_buffered_ancestor == block.hash() { - lowest_buffered_ancestor = block.parent_hash; - } - - // now check the block itself - if let Some(status) = self - .check_invalid_ancestor_with_head(lowest_buffered_ancestor, block.hash()) - .map_err(BeaconOnNewPayloadError::internal)? - { - Ok(Either::Left(status)) - } else { - Ok(Either::Right(block)) - } - } - - /// Validates the payload attributes with respect to the header and fork choice state. - /// - /// Note: At this point, the fork choice update is considered to be VALID, however, we can still - /// return an error if the payload attributes are invalid. - fn process_payload_attributes( - &self, - attrs: ::PayloadAttributes, - head: Header, - state: ForkchoiceState, - version: EngineApiMessageVersion, - ) -> OnForkChoiceUpdated { - // 7. Client software MUST ensure that payloadAttributes.timestamp is greater than timestamp - // of a block referenced by forkchoiceState.headBlockHash. If this condition isn't held - // client software MUST respond with -38003: `Invalid payload attributes` and MUST NOT - // begin a payload build process. In such an event, the forkchoiceState update MUST NOT - // be rolled back. - if attrs.timestamp() <= head.timestamp { - return OnForkChoiceUpdated::invalid_payload_attributes() - } - - // 8. Client software MUST begin a payload build process building on top of - // forkchoiceState.headBlockHash and identified via buildProcessId value if - // payloadAttributes is not null and the forkchoice state has been updated successfully. - // The build process is specified in the Payload building section. - match <::PayloadBuilderAttributes as PayloadBuilderAttributes>::try_new( - state.head_block_hash, - attrs, - version as u8 - ) { - Ok(attributes) => { - // send the payload to the builder and return the receiver for the pending payload - // id, initiating payload job is handled asynchronously - let pending_payload_id = self.payload_builder.send_new_payload(attributes); - - // Client software MUST respond to this method call in the following way: - // { - // payloadStatus: { - // status: VALID, - // latestValidHash: forkchoiceState.headBlockHash, - // validationError: null - // }, - // payloadId: buildProcessId - // } - // - // if the payload is deemed VALID and the build process has begun. - OnForkChoiceUpdated::updated_with_pending_payload_id( - PayloadStatus::new(PayloadStatusEnum::Valid, Some(state.head_block_hash)), - pending_payload_id, - ) - } - Err(_) => OnForkChoiceUpdated::invalid_payload_attributes(), - } - } - - /// When the pipeline is active, the tree is unable to commit any additional blocks since the - /// pipeline holds exclusive access to the database. - /// - /// In this scenario we buffer the payload in the tree if the payload is valid, once the - /// pipeline is finished, the tree is then able to also use the buffered payloads to commit to a - /// (newer) canonical chain. - /// - /// This will return `SYNCING` if the block was buffered successfully, and an error if an error - /// occurred while buffering the block. - #[instrument(level = "trace", skip_all, target = "consensus::engine", ret)] - fn try_buffer_payload( - &mut self, - block: SealedBlock, - ) -> Result { - self.blockchain.buffer_block_without_senders(block)?; - Ok(PayloadStatus::from_status(PayloadStatusEnum::Syncing)) - } - - /// Attempts to insert a new payload into the tree. - /// - /// Caution: This expects that the pipeline is idle. - #[instrument(level = "trace", skip_all, target = "consensus::engine", ret)] - fn try_insert_new_payload( - &mut self, - block: SealedBlock, - ) -> Result { - debug_assert!(self.sync.is_pipeline_idle(), "pipeline must be idle"); - - let block_hash = block.hash(); - let start = Instant::now(); - let status = self - .blockchain - .insert_block_without_senders(block.clone(), BlockValidationKind::Exhaustive)?; - - let elapsed = start.elapsed(); - let mut latest_valid_hash = None; - let status = match status { - InsertPayloadOk::Inserted(BlockStatus::Valid(attachment)) => { - latest_valid_hash = Some(block_hash); - let block = Arc::new(block); - let event = if attachment.is_canonical() { - BeaconConsensusEngineEvent::CanonicalBlockAdded(block, elapsed) - } else { - BeaconConsensusEngineEvent::ForkBlockAdded(block, elapsed) - }; - self.event_sender.notify(event); - PayloadStatusEnum::Valid - } - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { - latest_valid_hash = Some(block_hash); - PayloadStatusEnum::Valid - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | - InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { - // check if the block's parent is already marked as invalid - if let Some(status) = - self.check_invalid_ancestor_with_head(block.parent_hash, block.hash()).map_err( - |error| InsertBlockError::new(block, InsertBlockErrorKind::Provider(error)), - )? - { - return Ok(status) - } - - // not known to be invalid, but we don't know anything else - PayloadStatusEnum::Syncing - } - }; - Ok(PayloadStatus::new(status, latest_valid_hash)) - } - - /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. - /// - /// This mainly compares the missing parent of the downloaded block with the current canonical - /// tip, and decides whether or not the pipeline should be run. - /// - /// The canonical tip is compared to the missing parent using `exceeds_pipeline_run_threshold`, - /// which returns true if the missing parent is sufficiently ahead of the canonical tip. If so, - /// the pipeline is run. Otherwise, we need to insert blocks using the blockchain tree, and - /// must download blocks outside of the pipeline. In this case, the distance is used to - /// determine how many blocks we should download at once. - fn on_disconnected_block( - &mut self, - downloaded_block: BlockNumHash, - missing_parent: BlockNumHash, - head: BlockNumHash, - ) { - // compare the missing parent with the canonical tip - if let Some(target) = self.can_pipeline_sync_to_finalized( - head.number, - missing_parent.number, - Some(downloaded_block), - ) { - // we don't have the block yet and the distance exceeds the allowed - // threshold - self.sync.set_pipeline_sync_target(target.into()); - // we can exit early here because the pipeline will take care of syncing - return - } - - // continue downloading the missing parent - // - // this happens if either: - // * the missing parent block num < canonical tip num - // * this case represents a missing block on a fork that is shorter than the canonical - // chain - // * the missing parent block num >= canonical tip num, but the number of missing blocks is - // less than the pipeline threshold - // * this case represents a potentially long range of blocks to download and execute - if let Some(distance) = self.distance_from_local_tip(head.number, missing_parent.number) { - self.sync.download_block_range(missing_parent.hash, distance) - } else { - // This happens when the missing parent is on an outdated - // sidechain - self.sync.download_full_block(missing_parent.hash); - } - } - - /// Attempt to form a new canonical chain based on the current sync target. - /// - /// This is invoked when we successfully __downloaded__ a new block from the network which - /// resulted in [`BlockStatus::Valid`]. - /// - /// Note: This will not succeed if the sync target has changed since the block download request - /// was issued and the new target is still disconnected and additional missing blocks are - /// downloaded - fn try_make_sync_target_canonical( - &mut self, - inserted: BlockNumHash, - ) -> Result<(), (B256, CanonicalError)> { - let Some(target) = self.forkchoice_state_tracker.sync_target_state() else { return Ok(()) }; - - // optimistically try to make the head of the current FCU target canonical, the sync - // target might have changed since the block download request was issued - // (new FCU received) - let start = Instant::now(); - let make_canonical_result = self.blockchain.make_canonical(target.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &make_canonical_result); - match make_canonical_result { - Ok(outcome) => { - if let CanonicalOutcome::Committed { head } = &outcome { - self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( - Box::new(head.clone()), - elapsed, - )); - } - - let new_head = outcome.into_header(); - debug!(target: "consensus::engine", hash=?new_head.hash(), number=new_head.number, "Canonicalized new head"); - - // we can update the FCU blocks - if let Err(err) = self.update_canon_chain(new_head, &target) { - debug!(target: "consensus::engine", ?err, ?target, "Failed to update the canonical chain tracker"); - } - - // we're no longer syncing - self.sync_state_updater.update_sync_state(SyncState::Idle); - - // clear any active block requests - self.sync.clear_block_download_requests(); - Ok(()) - } - Err(err) => { - // if we failed to make the FCU's head canonical, because we don't have that - // block yet, then we can try to make the inserted block canonical if we know - // it's part of the canonical chain: if it's the safe or the finalized block - if err.is_block_hash_not_found() { - // if the inserted block is the currently targeted `finalized` or `safe` - // block, we will attempt to make them canonical, - // because they are also part of the canonical chain and - // their missing block range might already be downloaded (buffered). - if let Some(target_hash) = - ForkchoiceStateHash::find(&target, inserted.hash).filter(|h| !h.is_head()) - { - // TODO: do not ignore this - let _ = self.blockchain.make_canonical(*target_hash.as_ref()); - } - } else if let Some(block_number) = err.optimistic_revert_block_number() { - self.sync.set_pipeline_sync_target(PipelineTarget::Unwind(block_number)); - } - - Err((target.head_block_hash, err)) - } - } - } - - /// Event handler for events emitted by the [`EngineSyncController`]. - /// - /// This returns a result to indicate whether the engine future should resolve (fatal error). - fn on_sync_event( - &mut self, - event: EngineSyncEvent, - ) -> Result { - let outcome = match event { - EngineSyncEvent::FetchedFullBlock(block) => { - trace!(target: "consensus::engine", hash=?block.hash(), number=%block.number, "Downloaded full block"); - // Insert block only if the block's parent is not marked as invalid - if self - .check_invalid_ancestor_with_head(block.parent_hash, block.hash()) - .map_err(|error| BeaconConsensusEngineError::Common(error.into()))? - .is_none() - { - self.set_blockchain_tree_action( - BlockchainTreeAction::InsertDownloadedPayload { block }, - ); - } - EngineEventOutcome::Processed - } - EngineSyncEvent::PipelineStarted(target) => { - trace!(target: "consensus::engine", ?target, continuous = target.is_none(), "Started the pipeline"); - self.metrics.pipeline_runs.increment(1); - self.sync_state_updater.update_sync_state(SyncState::Syncing); - EngineEventOutcome::Processed - } - EngineSyncEvent::PipelineFinished { result, reached_max_block } => { - trace!(target: "consensus::engine", ?result, ?reached_max_block, "Pipeline finished"); - // Any pipeline error at this point is fatal. - let ctrl = result?; - if reached_max_block { - // Terminate the sync early if it's reached the maximum user-configured block. - EngineEventOutcome::ReachedMaxBlock - } else { - self.on_pipeline_outcome(ctrl)?; - EngineEventOutcome::Processed - } - } - EngineSyncEvent::PipelineTaskDropped => { - error!(target: "consensus::engine", "Failed to receive spawned pipeline"); - return Err(BeaconConsensusEngineError::PipelineChannelClosed) - } - }; - - Ok(outcome) - } - - /// Invoked when the pipeline has successfully finished. - /// - /// Updates the internal sync state depending on the pipeline configuration, - /// the outcome of the pipeline run and the last observed forkchoice state. - fn on_pipeline_outcome(&mut self, ctrl: ControlFlow) -> RethResult<()> { - // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. - if let ControlFlow::Unwind { bad_block, .. } = ctrl { - warn!(target: "consensus::engine", invalid_num_hash=?bad_block.block, "Bad block detected in unwind"); - // update the `invalid_headers` cache with the new invalid header - self.invalid_headers.insert(*bad_block); - return Ok(()) - } - - let sync_target_state = match self.forkchoice_state_tracker.sync_target_state() { - Some(current_state) => current_state, - None => { - // This is only possible if the node was run with `debug.tip` - // argument and without CL. - warn!(target: "consensus::engine", "No fork choice state available"); - return Ok(()) - } - }; - - if sync_target_state.finalized_block_hash.is_zero() { - self.set_canonical_head(ctrl.block_number().unwrap_or_default())?; - self.blockchain.update_block_hashes_and_clear_buffered()?; - self.blockchain.connect_buffered_blocks_to_canonical_hashes()?; - // We are on an optimistic syncing process, better to wait for the next FCU to handle - return Ok(()) - } - - // Next, we check if we need to schedule another pipeline run or transition - // to live sync via tree. - // This can arise if we buffer the forkchoice head, and if the head is an - // ancestor of an invalid block. - // - // * The forkchoice head could be buffered if it were first sent as a `newPayload` request. - // - // In this case, we won't have the head hash in the database, so we would - // set the pipeline sync target to a known-invalid head. - // - // This is why we check the invalid header cache here. - let lowest_buffered_ancestor = - self.lowest_buffered_ancestor_or(sync_target_state.head_block_hash); - - // this inserts the head into invalid headers cache - // if the lowest buffered ancestor is invalid - if self - .check_invalid_ancestor_with_head( - lowest_buffered_ancestor, - sync_target_state.head_block_hash, - )? - .is_some() - { - warn!( - target: "consensus::engine", - invalid_ancestor = %lowest_buffered_ancestor, - head = %sync_target_state.head_block_hash, - "Current head has an invalid ancestor" - ); - return Ok(()) - } - - // get the block number of the finalized block, if we have it - let newest_finalized = self - .blockchain - .buffered_header_by_hash(sync_target_state.finalized_block_hash) - .map(|header| header.number); - - // The block number that the pipeline finished at - if the progress or newest - // finalized is None then we can't check the distance anyways. - // - // If both are Some, we perform another distance check and return the desired - // pipeline target - let pipeline_target = - ctrl.block_number().zip(newest_finalized).and_then(|(progress, finalized_number)| { - // Determines whether or not we should run the pipeline again, in case - // the new gap is large enough to warrant - // running the pipeline. - self.can_pipeline_sync_to_finalized(progress, finalized_number, None) - }); - - // If the distance is large enough, we should run the pipeline again to prevent - // the tree update from executing too many blocks and blocking. - if let Some(target) = pipeline_target { - // run the pipeline to the target since the distance is sufficient - self.sync.set_pipeline_sync_target(target.into()); - } else if let Some(number) = - self.blockchain.block_number(sync_target_state.finalized_block_hash)? - { - // Finalized block is in the database, attempt to restore the tree with - // the most recent canonical hashes. - self.blockchain.connect_buffered_blocks_to_canonical_hashes_and_finalize(number).inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error restoring blockchain tree state"); - })?; - } else { - // We don't have the finalized block in the database, so we need to - // trigger another pipeline run. - self.sync.set_pipeline_sync_target(sync_target_state.finalized_block_hash.into()); - } - - Ok(()) - } - - fn set_canonical_head(&self, max_block: BlockNumber) -> RethResult<()> { - let max_header = self.blockchain.sealed_header(max_block) - .inspect_err(|error| { - error!(target: "consensus::engine", %error, "Error getting canonical header for continuous sync"); - })? - .ok_or_else(|| ProviderError::HeaderNotFound(max_block.into()))?; - self.blockchain.set_canonical_head(max_header); - - Ok(()) - } - - fn on_hook_result(&self, polled_hook: PolledHook) -> Result<(), BeaconConsensusEngineError> { - if let EngineHookEvent::Finished(Err(error)) = &polled_hook.event { - error!( - target: "consensus::engine", - name = %polled_hook.name, - ?error, - "Hook finished with error" - ) - } - - if polled_hook.db_access_level.is_read_write() { - match polled_hook.event { - EngineHookEvent::NotReady => {} - EngineHookEvent::Started => { - // If the hook has read-write access to the database, it means that the engine - // can't process any FCU messages from CL. To prevent CL from sending us - // unneeded updates, we need to respond `true` on `eth_syncing` request. - self.sync_state_updater.update_sync_state(SyncState::Syncing) - } - EngineHookEvent::Finished(_) => { - // Hook with read-write access to the database has finished running, so engine - // can process new FCU messages from CL again. It's safe to - // return `false` on `eth_syncing` request. - self.sync_state_updater.update_sync_state(SyncState::Idle); - // If the hook had read-write access to the database, it means that the engine - // may have accumulated some buffered blocks. - if let Err(error) = - self.blockchain.connect_buffered_blocks_to_canonical_hashes() - { - error!(target: "consensus::engine", %error, "Error connecting buffered blocks to canonical hashes on hook result"); - return Err(RethError::Canonical(error).into()) - } - } - } - } - - Ok(()) - } - - /// Process the next set blockchain tree action. - /// The handler might set next blockchain tree action to perform, - /// so the state change should be handled accordingly. - fn on_blockchain_tree_action( - &mut self, - action: BlockchainTreeAction, - ) -> RethResult { - match action { - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx } => { - let start = Instant::now(); - let result = self.blockchain.make_canonical(state.head_block_hash); - let elapsed = self.record_make_canonical_latency(start, &result); - match self - .on_forkchoice_updated_make_canonical_result(state, attrs, result, elapsed) - { - Ok(on_updated) => { - trace!(target: "consensus::engine", status = ?on_updated, ?state, "Returning forkchoice status"); - let fcu_status = on_updated.forkchoice_status(); - self.on_forkchoice_updated_status(state, on_updated, tx); - - if fcu_status.is_valid() { - let tip_number = self.blockchain.canonical_tip().number; - if self.sync.has_reached_max_block(tip_number) { - // Terminate the sync early if it's reached - // the maximum user configured block. - return Ok(EngineEventOutcome::ReachedMaxBlock) - } - } - } - Err(error) => { - let _ = tx.send(Err(RethError::Canonical(error.clone()))); - if error.is_fatal() { - return Err(RethError::Canonical(error)) - } - } - }; - } - BlockchainTreeAction::InsertNewPayload { block, tx } => { - let block_hash = block.hash(); - let block_num_hash = block.num_hash(); - let result = if self.sync.is_pipeline_idle() { - // we can only insert new payloads if the pipeline is _not_ running, because it - // holds exclusive access to the database - self.try_insert_new_payload(block) - } else { - self.try_buffer_payload(block) - }; - - let status = match result { - Ok(status) => status, - Err(error) => { - warn!(target: "consensus::engine", %error, "Error while processing payload"); - - let (block, error) = error.split(); - if !error.is_invalid_block() { - // TODO: revise if any error should be considered fatal at this point. - let _ = - tx.send(Err(BeaconOnNewPayloadError::Internal(Box::new(error)))); - return Ok(EngineEventOutcome::Processed) - } - - // If the error was due to an invalid payload, the payload is added to the - // invalid headers cache and `Ok` with [PayloadStatusEnum::Invalid] is - // returned. - warn!(target: "consensus::engine", invalid_hash=?block.hash(), invalid_number=?block.number, %error, "Invalid block error on new payload"); - let latest_valid_hash = if error.is_block_pre_merge() { - // zero hash must be returned if block is pre-merge - Some(B256::ZERO) - } else { - self.latest_valid_hash_for_invalid_payload(block.parent_hash)? - }; - // keep track of the invalid header - self.invalid_headers.insert(block.sealed_header().block_with_parent()); - PayloadStatus::new( - PayloadStatusEnum::Invalid { validation_error: error.to_string() }, - latest_valid_hash, - ) - } - }; - - if status.is_valid() { - if let Some(target) = self.forkchoice_state_tracker.sync_target_state() { - // if we're currently syncing and the inserted block is the targeted - // FCU head block, we can try to make it canonical. - if block_hash == target.head_block_hash { - self.set_blockchain_tree_action( - BlockchainTreeAction::MakeNewPayloadCanonical { - payload_num_hash: block_num_hash, - status, - tx, - }, - ); - return Ok(EngineEventOutcome::Processed) - } - } - // block was successfully inserted, so we can cancel the full block - // request, if any exists - self.sync.cancel_full_block_request(block_hash); - } - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - let _ = tx.send(Ok(status)); - } - BlockchainTreeAction::MakeNewPayloadCanonical { payload_num_hash, status, tx } => { - let status = match self.try_make_sync_target_canonical(payload_num_hash) { - Ok(()) => status, - Err((_hash, error)) => { - if error.is_fatal() { - let response = - Err(BeaconOnNewPayloadError::Internal(Box::new(error.clone()))); - let _ = tx.send(response); - return Err(RethError::Canonical(error)) - } else if error.optimistic_revert_block_number().is_some() { - // engine already set the pipeline unwind target on - // `try_make_sync_target_canonical` - PayloadStatus::from_status(PayloadStatusEnum::Syncing) - } else { - // If we could not make the sync target block canonical, - // we should return the error as an invalid payload status. - PayloadStatus::new( - PayloadStatusEnum::Invalid { validation_error: error.to_string() }, - // TODO: return a proper latest valid hash - // See: - self.forkchoice_state_tracker.last_valid_head(), - ) - } - } - }; - - trace!(target: "consensus::engine", ?status, "Returning payload status"); - let _ = tx.send(Ok(status)); - } - - BlockchainTreeAction::InsertDownloadedPayload { block } => { - let downloaded_num_hash = block.num_hash(); - match self.blockchain.insert_block_without_senders( - block, - BlockValidationKind::SkipStateRootValidation, - ) { - Ok(status) => { - match status { - InsertPayloadOk::Inserted(BlockStatus::Valid(_)) => { - // block is connected to the canonical chain and is valid. - // if it's not connected to current canonical head, the state root - // has not been validated. - if let Err((hash, error)) = - self.try_make_sync_target_canonical(downloaded_num_hash) - { - if error.is_fatal() { - error!(target: "consensus::engine", %error, "Encountered fatal error while making sync target canonical: {:?}, {:?}", error, hash); - } else if !error.is_block_hash_not_found() { - debug!( - target: "consensus::engine", - "Unexpected error while making sync target canonical: {:?}, {:?}", - error, - hash - ) - } - } - } - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head, - missing_ancestor: missing_parent, - }) => { - // block is not connected to the canonical head, we need to download - // its missing branch first - self.on_disconnected_block( - downloaded_num_hash, - missing_parent, - head, - ); - } - _ => (), - } - } - Err(err) => { - warn!(target: "consensus::engine", %err, "Failed to insert downloaded block"); - if err.kind().is_invalid_block() { - let (block, err) = err.split(); - warn!(target: "consensus::engine", invalid_number=?block.number, invalid_hash=?block.hash(), %err, "Marking block as invalid"); - - self.invalid_headers.insert(block.sealed_header().block_with_parent()); - } - } - } - } - }; - Ok(EngineEventOutcome::Processed) - } -} - -/// On initialization, the consensus engine will poll the message receiver and return -/// [`Poll::Pending`] until the first forkchoice update message is received. -/// -/// As soon as the consensus engine receives the first forkchoice updated message and updates the -/// local forkchoice state, it will launch the pipeline to sync to the head hash. -/// While the pipeline is syncing, the consensus engine will keep processing messages from the -/// receiver and forwarding them to the blockchain tree. -impl Future for BeaconConsensusEngine -where - N: TreeNodeTypes, - Client: EthBlockClient + 'static, - BT: BlockchainTreeEngine - + BlockReader, Header = HeaderTy> - + BlockIdReader - + CanonChainTracker
> - + StageCheckpointReader - + ChainSpecProvider - + Unpin - + 'static, -{ - type Output = Result<(), BeaconConsensusEngineError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.get_mut(); - - // Control loop that advances the state - 'main: loop { - // Poll a running hook with db write access (if any) and CL messages first, draining - // both and then proceeding to polling other parts such as SyncController and hooks. - loop { - // Poll a running hook with db write access first, as we will not be able to process - // any engine messages until it's finished. - if let Poll::Ready(result) = - this.hooks.poll_active_db_write_hook(cx, this.current_engine_hook_context()?)? - { - this.on_hook_result(result)?; - continue - } - - // Process any blockchain tree action result as set forth during engine message - // processing. - if let Some(action) = this.blockchain_tree_action.take() { - match this.on_blockchain_tree_action(action) { - Ok(EngineEventOutcome::Processed) => {} - Ok(EngineEventOutcome::ReachedMaxBlock) => return Poll::Ready(Ok(())), - Err(error) => { - error!(target: "consensus::engine", %error, "Encountered fatal error"); - return Poll::Ready(Err(error.into())) - } - }; - - // Blockchain tree action handler might set next action to take. - continue - } - - // If the db write hook is no longer active and we have a pending forkchoice update, - // process it first. - if this.hooks.active_db_write_hook().is_none() { - if let Some((state, attrs, tx)) = this.pending_forkchoice_update.take() { - this.set_blockchain_tree_action( - BlockchainTreeAction::MakeForkchoiceHeadCanonical { state, attrs, tx }, - ); - continue - } - } - - // Process one incoming message from the CL. We don't drain the messages right away, - // because we want to sneak a polling of running hook in between them. - // - // These messages can affect the state of the SyncController and they're also time - // sensitive, hence they are polled first. - if let Poll::Ready(Some(msg)) = this.engine_message_stream.poll_next_unpin(cx) { - match msg { - BeaconEngineMessage::ForkchoiceUpdated { - state, - payload_attrs, - tx, - version: _version, - } => { - this.on_forkchoice_updated(state, payload_attrs, tx); - } - BeaconEngineMessage::NewPayload { payload, sidecar, tx } => { - match this.on_new_payload(payload, sidecar) { - Ok(Either::Right(block)) => { - this.set_blockchain_tree_action( - BlockchainTreeAction::InsertNewPayload { block, tx }, - ); - } - Ok(Either::Left(status)) => { - let _ = tx.send(Ok(status)); - } - Err(error) => { - let _ = tx.send(Err(error)); - } - } - } - BeaconEngineMessage::TransitionConfigurationExchanged => { - this.blockchain.on_transition_configuration_exchanged(); - } - } - continue - } - - // Both running hook with db write access and engine messages are pending, - // proceed to other polls - break - } - - // process sync events if any - if let Poll::Ready(sync_event) = this.sync.poll(cx) { - match this.on_sync_event(sync_event)? { - // Sync event was successfully processed - EngineEventOutcome::Processed => (), - // Max block has been reached, exit the engine loop - EngineEventOutcome::ReachedMaxBlock => return Poll::Ready(Ok(())), - } - - // this could have taken a while, so we start the next cycle to handle any new - // engine messages - continue 'main - } - - // at this point, all engine messages and sync events are fully drained - - // Poll next hook if all conditions are met: - // 1. Engine and sync messages are fully drained (both pending) - // 2. Latest FCU status is not INVALID - if !this.forkchoice_state_tracker.is_latest_invalid() { - if let Poll::Ready(result) = this.hooks.poll_next_hook( - cx, - this.current_engine_hook_context()?, - this.sync.is_pipeline_active(), - )? { - this.on_hook_result(result)?; - - // ensure we're polling until pending while also checking for new engine - // messages before polling the next hook - continue 'main - } - } - - // incoming engine messages and sync events are drained, so we can yield back - // control - return Poll::Pending - } - } -} - -enum BlockchainTreeAction { - MakeForkchoiceHeadCanonical { - state: ForkchoiceState, - attrs: Option, - tx: oneshot::Sender>, - }, - InsertNewPayload { - block: SealedBlock, - tx: oneshot::Sender>, - }, - MakeNewPayloadCanonical { - payload_num_hash: BlockNumHash, - status: PayloadStatus, - tx: oneshot::Sender>, - }, - /// Action to insert a new block that we successfully downloaded from the network. - /// There are several outcomes for inserting a downloaded block into the tree: - /// - /// ## [`BlockStatus::Valid`] - /// - /// The block is connected to the current canonical chain and is valid. - /// If the block is an ancestor of the current forkchoice head, then we can try again to - /// make the chain canonical. - /// - /// ## [`BlockStatus::Disconnected`] - /// - /// The block is not connected to the canonical chain, and we need to download the - /// missing parent first. - /// - /// ## Insert Error - /// - /// If the insertion into the tree failed, then the block was well-formed (valid hash), - /// but its chain is invalid, which means the FCU that triggered the - /// download is invalid. Here we can stop because there's nothing to do here - /// and the engine needs to wait for another FCU. - InsertDownloadedPayload { block: SealedBlock }, -} - -/// Represents outcomes of processing an engine event -#[derive(Debug)] -enum EngineEventOutcome { - /// Engine event was processed successfully, engine should continue. - Processed, - /// Engine event was processed successfully and reached max block. - ReachedMaxBlock, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - test_utils::{spawn_consensus_engine, TestConsensusEngineBuilder}, - BeaconForkChoiceUpdateError, - }; - use alloy_rpc_types_engine::{ForkchoiceState, ForkchoiceUpdated, PayloadStatus}; - use assert_matches::assert_matches; - use reth_chainspec::{ChainSpecBuilder, MAINNET}; - use reth_node_types::FullNodePrimitives; - use reth_primitives::BlockExt; - use reth_provider::{BlockWriter, ProviderFactory, StorageLocation}; - use reth_rpc_types_compat::engine::payload::block_to_payload_v1; - use reth_stages::{ExecOutput, PipelineError, StageError}; - use reth_stages_api::StageCheckpoint; - use reth_testing_utils::generators::{self, Rng}; - use std::{collections::VecDeque, sync::Arc}; - use tokio::sync::oneshot::error::TryRecvError; - - // Pipeline error is propagated. - #[tokio::test] - async fn pipeline_error_is_propagated() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); - - let res = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!( - res.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - } - - // Test that the consensus engine is idle until first forkchoice updated is received. - #[tokio::test] - async fn is_idle_until_forkchoice_is_set() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Err(StageError::ChannelClosed)])) - .disable_blockchain_tree_sync() - .with_max_block(1) - .build(); - - let mut rx = spawn_consensus_engine(consensus_engine); - - // consensus engine is idle - tokio::time::sleep(Duration::from_millis(100)).await; - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine is still idle because no FCUs were received - let _ = env - .send_new_payload( - block_to_payload_v1(SealedBlock::<_>::default()), - ExecutionPayloadSidecar::none(), - ) - .await; - - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine is still idle because pruning is running - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!(rx.try_recv(), Err(TryRecvError::Empty)); - - // consensus engine receives a forkchoice state and triggers the pipeline when pruning is - // finished - loop { - match rx.try_recv() { - Ok(result) => { - assert_matches!( - result, - Err(BeaconConsensusEngineError::Pipeline(n)) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - break - } - Err(TryRecvError::Empty) => { - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - } - Err(err) => panic!("receive error: {err}"), - } - } - } - - // Test that the consensus engine runs the pipeline again if the tree cannot be restored. - // The consensus engine will propagate the second result (error) only if it runs the pipeline - // for the second time. - #[tokio::test] - async fn runs_pipeline_again_if_tree_not_restored() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(1), done: true }), - Err(StageError::ChannelClosed), - ])) - .disable_blockchain_tree_sync() - .with_max_block(2) - .build(); - - let rx = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - finalized_block_hash: rng.gen(), - ..Default::default() - }) - .await; - - assert_matches!( - rx.await, - Ok(Err(BeaconConsensusEngineError::Pipeline(n))) if matches!(*n.as_ref(), PipelineError::Stage(StageError::ChannelClosed)) - ); - } - - #[tokio::test] - async fn terminates_upon_reaching_max_block() { - let mut rng = generators::rng(); - let max_block = 1000; - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(max_block), - done: true, - })])) - .with_max_block(max_block) - .disable_blockchain_tree_sync() - .build(); - - let rx = spawn_consensus_engine(consensus_engine); - - let _ = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - ..Default::default() - }) - .await; - assert_matches!(rx.await, Ok(Ok(()))); - } - - fn insert_blocks< - 'a, - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - BlockBody = reth_primitives::BlockBody, - BlockHeader = reth_primitives::Header, - >, - >, - >( - provider_factory: ProviderFactory, - mut blocks: impl Iterator, - ) { - let provider = provider_factory.provider_rw().unwrap(); - blocks - .try_for_each(|b| { - provider - .insert_block( - b.clone().try_seal_with_senders().expect("invalid tx signature in block"), - StorageLocation::Database, - ) - .map(drop) - }) - .expect("failed to insert"); - provider.commit().unwrap(); - } - - mod fork_choice_updated { - use super::*; - use alloy_primitives::U256; - use alloy_rpc_types_engine::ForkchoiceUpdateError; - use generators::BlockParams; - use reth_db::{tables, test_utils::create_test_static_files_dir, Database}; - use reth_db_api::transaction::DbTxMut; - use reth_provider::{providers::StaticFileProvider, test_utils::MockNodeTypesWithDB}; - use reth_testing_utils::generators::random_block; - - #[tokio::test] - async fn empty_head() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - let res = env.send_forkchoice_updated(ForkchoiceState::default()).await; - assert_matches!( - res, - Err(BeaconForkChoiceUpdateError::ForkchoiceUpdateError( - ForkchoiceUpdateError::InvalidState - )) - ); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn valid_forkchoice() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - env.db - .update(|tx| { - tx.put::( - StageId::Finish.to_string(), - StageCheckpoint::new(block1.number), - ) - }) - .unwrap() - .unwrap(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - let forkchoice = ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }; - - let result = env.send_forkchoice_updated(forkchoice).await.unwrap(); - let expected_result = ForkchoiceUpdated::new(PayloadStatus::new( - PayloadStatusEnum::Valid, - Some(block1.hash()), - )); - assert_eq!(result, expected_result); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn unknown_head_hash() { - let mut rng = generators::rng(); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .disable_blockchain_tree_sync() - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { parent: Some(genesis.hash()), ..Default::default() }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - let next_head = random_block( - &mut rng, - 2, - BlockParams { - parent: Some(block1.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let next_forkchoice_state = ForkchoiceState { - head_block_hash: next_head.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }; - - // if we `await` in the assert, the forkchoice will poll after we've inserted the block, - // and it will return VALID instead of SYNCING - let invalid_rx = env.send_forkchoice_updated(next_forkchoice_state).await; - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - // Insert next head immediately after sending forkchoice update - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - std::iter::once(&next_head), - ); - - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Syncing); - assert_matches!(invalid_rx, Ok(result) => assert_eq!(result, expected_result)); - - let result = env.send_forkchoice_retry_on_syncing(next_forkchoice_state).await.unwrap(); - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(next_head.hash()); - assert_eq!(result, expected_result); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn unknown_finalized_hash() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .disable_blockchain_tree_sync() - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: rng.gen(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - drop(engine); - } - - #[tokio::test] - async fn forkchoice_updated_pre_merge() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .paris_at_ttd(U256::from(3), 3) - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let mut block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block1.set_difficulty(U256::from(1)); - - // a second pre-merge block - let mut block2 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block2.set_difficulty(U256::from(1)); - - // a transition block - let mut block3 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - block3.set_difficulty(U256::from(1)); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1, &block2, &block3].into_iter(), - ); - - let _engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - - assert_matches!(res, Ok(result) => { - let ForkchoiceUpdated { payload_status, .. } = result; - assert_matches!(payload_status.status, PayloadStatusEnum::Invalid { .. }); - assert_eq!(payload_status.latest_valid_hash, Some(B256::ZERO)); - }); - } - - #[tokio::test] - async fn forkchoice_updated_invalid_pow() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([ - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - Ok(ExecOutput { checkpoint: StageCheckpoint::new(0), done: true }), - ])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_temp_dir, temp_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(temp_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let _engine = spawn_consensus_engine(consensus_engine); - - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = ForkchoiceUpdated::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block1.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - } - } - - mod new_payload { - use super::*; - use alloy_genesis::Genesis; - use alloy_primitives::U256; - use generators::BlockParams; - use reth_db::test_utils::create_test_static_files_dir; - use reth_primitives::EthereumHardfork; - use reth_provider::{ - providers::StaticFileProvider, - test_utils::{blocks::BlockchainTestData, MockNodeTypesWithDB}, - }; - use reth_testing_utils::{generators::random_block, GenesisAllocator}; - #[tokio::test] - async fn new_payload_before_forkchoice() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send new payload - let res = env - .send_new_payload( - block_to_payload_v1(random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - )), - ExecutionPayloadSidecar::none(), - ) - .await; - - // Invalid, because this is a genesis block - assert_matches!(res, Ok(result) => assert_matches!(result.status, PayloadStatusEnum::Invalid { .. })); - - // Send new payload - let res = env - .send_new_payload( - block_to_payload_v1(random_block( - &mut rng, - 1, - BlockParams { ommers_count: Some(0), ..Default::default() }, - )), - ExecutionPayloadSidecar::none(), - ) - .await; - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_known() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(genesis.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - let block2 = random_block( - &mut rng, - 2, - BlockParams { - parent: Some(block1.hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1, &block2].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block1.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let result = env - .send_new_payload_retry_on_syncing( - block_to_payload_v1(block2.clone()), - ExecutionPayloadSidecar::none(), - ) - .await - .unwrap(); - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block2.hash()); - assert_eq!(result, expected_result); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn simple_validate_block() { - let mut rng = generators::rng(); - let amount = U256::from(1000000000000000000u64); - let mut allocator = GenesisAllocator::default().with_rng(&mut rng); - for _ in 0..16 { - // add 16 new accounts - allocator.new_funded_account(amount); - } - - let alloc = allocator.build(); - - let genesis = Genesis::default().extend_accounts(alloc); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(genesis) - .shanghai_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_real_pipeline() - .with_real_executor() - .with_real_consensus() - .build(); - - let genesis = SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()); - let block1 = random_block( - &mut rng, - 1, - BlockParams { - parent: Some(chain_spec.genesis_hash()), - ommers_count: Some(0), - ..Default::default() - }, - ); - - // TODO: add transactions that transfer from the alloc accounts, generating the new - // block tx and state root - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(block1.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_parent_unknown() { - let mut rng = generators::rng(); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .build(); - let genesis = random_block( - &mut rng, - 0, - BlockParams { ommers_count: Some(0), ..Default::default() }, - ); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - std::iter::once(&genesis), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: genesis.hash(), - finalized_block_hash: genesis.hash(), - ..Default::default() - }) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Valid) - .with_latest_valid_hash(genesis.hash()); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let parent = rng.gen(); - let block = random_block( - &mut rng, - 2, - BlockParams { parent: Some(parent), ommers_count: Some(0), ..Default::default() }, - ); - let res = env - .send_new_payload(block_to_payload_v1(block), ExecutionPayloadSidecar::none()) - .await; - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Syncing); - assert_matches!(res, Ok(result) => assert_eq!(result, expected_result)); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[tokio::test] - async fn payload_pre_merge() { - let data = BlockchainTestData::default(); - let mut block1 = data.blocks[0].0.block.clone(); - block1.set_difficulty( - MAINNET.fork(EthereumHardfork::Paris).ttd().unwrap() - U256::from(1), - ); - block1 = block1.unseal::().seal_slow(); - let (block2, exec_result2) = data.blocks[1].clone(); - let mut block2 = block2.unseal().block; - block2.body.withdrawals = None; - block2.header.parent_hash = block1.hash(); - block2.header.base_fee_per_gas = Some(100); - block2.header.difficulty = U256::ZERO; - let block2 = block2.clone().seal_slow(); - - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .london_activated() - .build(), - ); - - let (consensus_engine, env) = TestConsensusEngineBuilder::new(chain_spec.clone()) - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(0), - done: true, - })])) - .with_executor_results(Vec::from([exec_result2])) - .build(); - - let (_static_dir, static_dir_path) = create_test_static_files_dir(); - - insert_blocks( - ProviderFactory::::new( - env.db.clone(), - chain_spec.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - [&data.genesis, &block1].into_iter(), - ); - - let mut engine_rx = spawn_consensus_engine(consensus_engine); - - // Send forkchoice - let res = env - .send_forkchoice_updated(ForkchoiceState { - head_block_hash: block1.hash(), - finalized_block_hash: block1.hash(), - ..Default::default() - }) - .await; - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block1.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_matches!(res, Ok(ForkchoiceUpdated { payload_status, .. }) => assert_eq!(payload_status, expected_result)); - - // Send new payload - let result = env - .send_new_payload_retry_on_syncing( - block_to_payload_v1(block2.clone()), - ExecutionPayloadSidecar::none(), - ) - .await - .unwrap(); - - let expected_result = PayloadStatus::from_status(PayloadStatusEnum::Invalid { - validation_error: BlockValidationError::BlockPreMerge { hash: block2.hash() } - .to_string(), - }) - .with_latest_valid_hash(B256::ZERO); - assert_eq!(result, expected_result); - - assert_matches!(engine_rx.try_recv(), Err(TryRecvError::Empty)); - } - } -} diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs deleted file mode 100644 index adbb531b22fd3..0000000000000 --- a/crates/consensus/beacon/src/engine/sync.rs +++ /dev/null @@ -1,672 +0,0 @@ -//! Sync management for the engine implementation. - -use crate::{ - engine::metrics::EngineSyncMetrics, BeaconConsensusEngineEvent, - ConsensusEngineLiveSyncProgress, EthBeaconConsensus, -}; -use alloy_consensus::Header; -use alloy_primitives::{BlockNumber, B256}; -use futures::FutureExt; -use reth_network_p2p::{ - full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, - BlockClient, -}; -use reth_node_types::{BodyTy, HeaderTy}; -use reth_primitives::{BlockBody, EthPrimitives, NodePrimitives, SealedBlock}; -use reth_provider::providers::ProviderNodeTypes; -use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineTarget, PipelineWithResult}; -use reth_tasks::TaskSpawner; -use reth_tokio_util::EventSender; -use std::{ - cmp::{Ordering, Reverse}, - collections::{binary_heap::PeekMut, BinaryHeap}, - sync::Arc, - task::{ready, Context, Poll}, -}; -use tokio::sync::oneshot; -use tracing::trace; - -/// Manages syncing under the control of the engine. -/// -/// This type controls the [Pipeline] and supports (single) full block downloads. -/// -/// Caution: If the pipeline is running, this type will not emit blocks downloaded from the network -/// [`EngineSyncEvent::FetchedFullBlock`] until the pipeline is idle to prevent commits to the -/// database while the pipeline is still active. -pub(crate) struct EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient, -{ - /// A downloader that can download full blocks from the network. - full_block_client: FullBlockClient, - /// The type that can spawn the pipeline task. - pipeline_task_spawner: Box, - /// The current state of the pipeline. - /// The pipeline is used for large ranges. - pipeline_state: PipelineState, - /// Pending target block for the pipeline to sync - pending_pipeline_target: Option, - /// In-flight full block requests in progress. - inflight_full_block_requests: Vec>, - /// In-flight full block _range_ requests in progress. - inflight_block_range_requests: Vec>, - /// Sender for engine events. - event_sender: EventSender>, - /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for - /// ordering. This means the blocks will be popped from the heap with ascending block numbers. - range_buffered_blocks: BinaryHeap, BodyTy>>>, - /// Max block after which the consensus engine would terminate the sync. Used for debugging - /// purposes. - max_block: Option, - /// Engine sync metrics. - metrics: EngineSyncMetrics, -} - -impl EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient, -{ - /// Create a new instance - pub(crate) fn new( - pipeline: Pipeline, - client: Client, - pipeline_task_spawner: Box, - max_block: Option, - chain_spec: Arc, - event_sender: EventSender>, - ) -> Self { - Self { - full_block_client: FullBlockClient::new( - client, - Arc::new(EthBeaconConsensus::new(chain_spec)), - ), - pipeline_task_spawner, - pipeline_state: PipelineState::Idle(Some(pipeline)), - pending_pipeline_target: None, - inflight_full_block_requests: Vec::new(), - inflight_block_range_requests: Vec::new(), - range_buffered_blocks: BinaryHeap::new(), - event_sender, - max_block, - metrics: EngineSyncMetrics::default(), - } - } -} - -impl EngineSyncController -where - N: ProviderNodeTypes, - Client: BlockClient
, Body = BodyTy> + 'static, -{ - /// Sets the metrics for the active downloads - fn update_block_download_metrics(&self) { - self.metrics.active_block_downloads.set(self.inflight_full_block_requests.len() as f64); - // TODO: full block range metrics - } - - /// Sets the max block value for testing - #[cfg(test)] - pub(crate) fn set_max_block(&mut self, block: BlockNumber) { - self.max_block = Some(block); - } - - /// Cancels all download requests that are in progress and buffered blocks. - pub(crate) fn clear_block_download_requests(&mut self) { - self.inflight_full_block_requests.clear(); - self.inflight_block_range_requests.clear(); - self.range_buffered_blocks.clear(); - self.update_block_download_metrics(); - } - - /// Cancels the full block request with the given hash. - pub(crate) fn cancel_full_block_request(&mut self, hash: B256) { - self.inflight_full_block_requests.retain(|req| *req.hash() != hash); - self.update_block_download_metrics(); - } - - /// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`. - #[allow(dead_code)] - pub(crate) const fn is_pipeline_sync_pending(&self) -> bool { - self.pending_pipeline_target.is_some() && self.pipeline_state.is_idle() - } - - /// Returns `true` if the pipeline is idle. - pub(crate) const fn is_pipeline_idle(&self) -> bool { - self.pipeline_state.is_idle() - } - - /// Returns `true` if the pipeline is active. - pub(crate) const fn is_pipeline_active(&self) -> bool { - !self.is_pipeline_idle() - } - - /// Returns true if there's already a request for the given hash. - pub(crate) fn is_inflight_request(&self, hash: B256) -> bool { - self.inflight_full_block_requests.iter().any(|req| *req.hash() == hash) - } - - /// Starts requesting a range of blocks from the network, in reverse from the given hash. - /// - /// If the `count` is 1, this will use the `download_full_block` method instead, because it - /// downloads headers and bodies for the block concurrently. - pub(crate) fn download_block_range(&mut self, hash: B256, count: u64) { - if count == 1 { - self.download_full_block(hash); - } else { - trace!( - target: "consensus::engine", - ?hash, - ?count, - "start downloading full block range." - ); - - // notify listeners that we're downloading a block range - self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( - ConsensusEngineLiveSyncProgress::DownloadingBlocks { - remaining_blocks: count, - target: hash, - }, - )); - let request = self.full_block_client.get_full_block_range(hash, count); - self.inflight_block_range_requests.push(request); - } - - // // TODO: need more metrics for block ranges - // self.update_block_download_metrics(); - } - - /// Starts requesting a full block from the network. - /// - /// Returns `true` if the request was started, `false` if there's already a request for the - /// given hash. - pub(crate) fn download_full_block(&mut self, hash: B256) -> bool { - if self.is_inflight_request(hash) { - return false - } - trace!( - target: "consensus::engine::sync", - ?hash, - "Start downloading full block" - ); - - // notify listeners that we're downloading a block - self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( - ConsensusEngineLiveSyncProgress::DownloadingBlocks { - remaining_blocks: 1, - target: hash, - }, - )); - - let request = self.full_block_client.get_full_block(hash); - self.inflight_full_block_requests.push(request); - - self.update_block_download_metrics(); - - true - } - - /// Sets a new target to sync the pipeline to. - /// - /// But ensures the target is not the zero hash. - pub(crate) fn set_pipeline_sync_target(&mut self, target: PipelineTarget) { - if target.sync_target().is_some_and(|target| target.is_zero()) { - trace!( - target: "consensus::engine::sync", - "Pipeline target cannot be zero hash." - ); - // precaution to never sync to the zero hash - return - } - self.pending_pipeline_target = Some(target); - } - - /// Check if the engine reached max block as specified by `max_block` parameter. - /// - /// Note: this is mainly for debugging purposes. - pub(crate) fn has_reached_max_block(&self, progress: BlockNumber) -> bool { - let has_reached_max_block = self.max_block.is_some_and(|target| progress >= target); - if has_reached_max_block { - trace!( - target: "consensus::engine::sync", - ?progress, - max_block = ?self.max_block, - "Consensus engine reached max block" - ); - } - has_reached_max_block - } - - /// Advances the pipeline state. - /// - /// This checks for the result in the channel, or returns pending if the pipeline is idle. - fn poll_pipeline(&mut self, cx: &mut Context<'_>) -> Poll> { - let res = match self.pipeline_state { - PipelineState::Idle(_) => return Poll::Pending, - PipelineState::Running(ref mut fut) => { - ready!(fut.poll_unpin(cx)) - } - }; - let ev = match res { - Ok((pipeline, result)) => { - let minimum_block_number = pipeline.minimum_block_number(); - let reached_max_block = - self.has_reached_max_block(minimum_block_number.unwrap_or_default()); - self.pipeline_state = PipelineState::Idle(Some(pipeline)); - EngineSyncEvent::PipelineFinished { result, reached_max_block } - } - Err(_) => { - // failed to receive the pipeline - EngineSyncEvent::PipelineTaskDropped - } - }; - Poll::Ready(ev) - } - - /// This will spawn the pipeline if it is idle and a target is set or if the pipeline is set to - /// run continuously. - fn try_spawn_pipeline(&mut self) -> Option> { - match &mut self.pipeline_state { - PipelineState::Idle(pipeline) => { - let target = self.pending_pipeline_target.take()?; - let (tx, rx) = oneshot::channel(); - - let pipeline = pipeline.take().expect("exists"); - self.pipeline_task_spawner.spawn_critical_blocking( - "pipeline task", - Box::pin(async move { - let result = pipeline.run_as_fut(Some(target)).await; - let _ = tx.send(result); - }), - ); - self.pipeline_state = PipelineState::Running(rx); - - // we also clear any pending full block requests because we expect them to be - // outdated (included in the range the pipeline is syncing anyway) - self.clear_block_download_requests(); - - Some(EngineSyncEvent::PipelineStarted(Some(target))) - } - PipelineState::Running(_) => None, - } - } - - /// Advances the sync process. - pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll> { - // try to spawn a pipeline if a target is set - if let Some(event) = self.try_spawn_pipeline() { - return Poll::Ready(event) - } - - // make sure we poll the pipeline if it's active, and return any ready pipeline events - if !self.is_pipeline_idle() { - // advance the pipeline - if let Poll::Ready(event) = self.poll_pipeline(cx) { - return Poll::Ready(event) - } - } - - // advance all full block requests - for idx in (0..self.inflight_full_block_requests.len()).rev() { - let mut request = self.inflight_full_block_requests.swap_remove(idx); - if let Poll::Ready(block) = request.poll_unpin(cx) { - trace!(target: "consensus::engine", block=?block.num_hash(), "Received single full block, buffering"); - self.range_buffered_blocks.push(Reverse(OrderedSealedBlock(block))); - } else { - // still pending - self.inflight_full_block_requests.push(request); - } - } - - // advance all full block range requests - for idx in (0..self.inflight_block_range_requests.len()).rev() { - let mut request = self.inflight_block_range_requests.swap_remove(idx); - if let Poll::Ready(blocks) = request.poll_unpin(cx) { - trace!(target: "consensus::engine", len=?blocks.len(), first=?blocks.first().map(|b| b.num_hash()), last=?blocks.last().map(|b| b.num_hash()), "Received full block range, buffering"); - self.range_buffered_blocks - .extend(blocks.into_iter().map(OrderedSealedBlock).map(Reverse)); - } else { - // still pending - self.inflight_block_range_requests.push(request); - } - } - - self.update_block_download_metrics(); - - // drain an element of the block buffer if there are any - if let Some(block) = self.range_buffered_blocks.pop() { - // peek ahead and pop duplicates - while let Some(peek) = self.range_buffered_blocks.peek_mut() { - if peek.0 .0.hash() == block.0 .0.hash() { - PeekMut::pop(peek); - } else { - break - } - } - return Poll::Ready(EngineSyncEvent::FetchedFullBlock(block.0 .0)) - } - - Poll::Pending - } -} - -/// A wrapper type around [`SealedBlock`] that implements the [Ord] trait by block number. -#[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlock(SealedBlock); - -impl PartialOrd for OrderedSealedBlock -where - H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for OrderedSealedBlock -where - H: reth_primitives_traits::BlockHeader + 'static, - B: reth_primitives_traits::BlockBody + 'static, -{ - fn cmp(&self, other: &Self) -> Ordering { - self.0.number().cmp(&other.0.number()) - } -} - -/// The event type emitted by the [`EngineSyncController`]. -#[derive(Debug)] -pub(crate) enum EngineSyncEvent { - /// A full block has been downloaded from the network. - FetchedFullBlock(SealedBlock), - /// Pipeline started syncing - /// - /// This is none if the pipeline is triggered without a specific target. - PipelineStarted(Option), - /// Pipeline finished - /// - /// If this is returned, the pipeline is idle. - PipelineFinished { - /// Final result of the pipeline run. - result: Result, - /// Whether the pipeline reached the configured `max_block`. - /// - /// Note: this is only relevant in debugging scenarios. - reached_max_block: bool, - }, - /// Pipeline task was dropped after it was started, unable to receive it because channel - /// closed. This would indicate a panicked pipeline task - PipelineTaskDropped, -} - -/// The possible pipeline states within the sync controller. -/// -/// [`PipelineState::Idle`] means that the pipeline is currently idle. -/// [`PipelineState::Running`] means that the pipeline is currently running. -/// -/// NOTE: The differentiation between these two states is important, because when the pipeline is -/// running, it acquires the write lock over the database. This means that we cannot forward to the -/// blockchain tree any messages that would result in database writes, since it would result in a -/// deadlock. -enum PipelineState { - /// Pipeline is idle. - Idle(Option>), - /// Pipeline is running and waiting for a response - Running(oneshot::Receiver>), -} - -impl PipelineState { - /// Returns `true` if the state matches idle. - const fn is_idle(&self) -> bool { - matches!(self, Self::Idle(_)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::Header; - use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT; - use assert_matches::assert_matches; - use futures::poll; - use reth_chainspec::{ChainSpec, ChainSpecBuilder, MAINNET}; - use reth_network_p2p::{either::Either, test_utils::TestFullBlockClient, EthBlockClient}; - use reth_primitives::{BlockBody, SealedHeader}; - use reth_provider::{ - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, - }; - use reth_prune_types::PruneModes; - use reth_stages::{test_utils::TestStages, ExecOutput, StageError}; - use reth_stages_api::StageCheckpoint; - use reth_static_file::StaticFileProducer; - use reth_tasks::TokioTaskExecutor; - use std::{collections::VecDeque, future::poll_fn, ops::Range}; - use tokio::sync::watch; - - struct TestPipelineBuilder { - pipeline_exec_outputs: VecDeque>, - executor_results: Vec, - max_block: Option, - } - - impl TestPipelineBuilder { - /// Create a new [`TestPipelineBuilder`]. - const fn new() -> Self { - Self { - pipeline_exec_outputs: VecDeque::new(), - executor_results: Vec::new(), - max_block: None, - } - } - - /// Set the pipeline execution outputs to use for the test consensus engine. - fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.pipeline_exec_outputs = pipeline_exec_outputs; - self - } - - /// Set the executor results to use for the test consensus engine. - #[allow(dead_code)] - fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_results = executor_results; - self - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Builds the pipeline. - fn build(self, chain_spec: Arc) -> Pipeline { - reth_tracing::init_test_tracing(); - - // Setup pipeline - let (tip_tx, _tip_rx) = watch::channel(B256::default()); - let mut pipeline = Pipeline::::builder() - .add_stages(TestStages::new(self.pipeline_exec_outputs, Default::default())) - .with_tip_sender(tip_tx); - - if let Some(max_block) = self.max_block { - pipeline = pipeline.with_max_block(max_block); - } - - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); - - let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); - - pipeline.build(provider_factory, static_file_producer) - } - } - - struct TestSyncControllerBuilder { - max_block: Option, - client: Option, - } - - impl TestSyncControllerBuilder { - /// Create a new [`TestSyncControllerBuilder`]. - const fn new() -> Self { - Self { max_block: None, client: None } - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Sets the client to use for network operations. - fn with_client(mut self, client: Client) -> Self { - self.client = Some(client); - self - } - - /// Builds the sync controller. - fn build( - self, - pipeline: Pipeline, - chain_spec: Arc, - ) -> EngineSyncController> - where - N: ProviderNodeTypes, - Client: EthBlockClient + 'static, - { - let client = self - .client - .map(Either::Left) - .unwrap_or_else(|| Either::Right(TestFullBlockClient::default())); - - EngineSyncController::new( - pipeline, - client, - Box::::default(), - self.max_block, - chain_spec, - Default::default(), - ) - } - } - - #[tokio::test] - async fn pipeline_started_after_setting_target() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let client = TestFullBlockClient::default(); - insert_headers_into_client(&client, SealedHeader::default(), 0..10); - // force the pipeline to be "done" after 5 blocks - let pipeline = TestPipelineBuilder::new() - .with_pipeline_exec_outputs(VecDeque::from([Ok(ExecOutput { - checkpoint: StageCheckpoint::new(5), - done: true, - })])) - .build(chain_spec.clone()); - - let mut sync_controller = TestSyncControllerBuilder::new() - .with_client(client.clone()) - .build(pipeline, chain_spec); - - let tip = client.highest_block().expect("there should be blocks here"); - sync_controller.set_pipeline_sync_target(tip.hash().into()); - - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_event = poll!(sync_future); - - // can assert that the first event here is PipelineStarted because we set the sync target, - // and we should get Ready because the pipeline should be spawned immediately - assert_matches!(next_event, Poll::Ready(EngineSyncEvent::PipelineStarted(Some(target))) => { - assert_eq!(target.sync_target().unwrap(), tip.hash()); - }); - - // the next event should be the pipeline finishing in a good state - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_ready = sync_future.await; - assert_matches!(next_ready, EngineSyncEvent::PipelineFinished { result, reached_max_block } => { - assert_matches!(result, Ok(control_flow) => assert_eq!(control_flow, ControlFlow::Continue { block_number: 5 })); - // no max block configured - assert!(!reached_max_block); - }); - } - - fn insert_headers_into_client( - client: &TestFullBlockClient, - genesis_header: SealedHeader, - range: Range, - ) { - let mut sealed_header = genesis_header; - let body = BlockBody::default(); - for _ in range { - let (mut header, hash) = sealed_header.split(); - // update to the next header - header.parent_hash = hash; - header.number += 1; - header.timestamp += 1; - sealed_header = SealedHeader::seal(header); - client.insert(sealed_header.clone(), body.clone()); - } - } - - #[tokio::test] - async fn controller_sends_range_request() { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .paris_activated() - .build(), - ); - - let client = TestFullBlockClient::default(); - let header = Header { - base_fee_per_gas: Some(7), - gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - ..Default::default() - }; - let header = SealedHeader::seal(header); - insert_headers_into_client(&client, header, 0..10); - - // set up a pipeline - let pipeline = TestPipelineBuilder::new().build(chain_spec.clone()); - - let mut sync_controller = TestSyncControllerBuilder::new() - .with_client(client.clone()) - .build(pipeline, chain_spec); - - let tip = client.highest_block().expect("there should be blocks here"); - - // call the download range method - sync_controller.download_block_range(tip.hash(), tip.number); - - // ensure we have one in flight range request - assert_eq!(sync_controller.inflight_block_range_requests.len(), 1); - - // ensure the range request is made correctly - let first_req = sync_controller.inflight_block_range_requests.first().unwrap(); - assert_eq!(first_req.start_hash(), tip.hash()); - assert_eq!(first_req.count(), tip.number); - - // ensure they are in ascending order - for num in 1..=10 { - let sync_future = poll_fn(|cx| sync_controller.poll(cx)); - let next_ready = sync_future.await; - assert_matches!(next_ready, EngineSyncEvent::FetchedFullBlock(block) => { - assert_eq!(block.number, num); - }); - } - } -} diff --git a/crates/consensus/beacon/src/engine/test_utils.rs b/crates/consensus/beacon/src/engine/test_utils.rs deleted file mode 100644 index 56de724aded22..0000000000000 --- a/crates/consensus/beacon/src/engine/test_utils.rs +++ /dev/null @@ -1,467 +0,0 @@ -#![allow(missing_docs)] -use crate::{ - engine::hooks::PruneHook, hooks::EngineHooks, BeaconConsensusEngine, - BeaconConsensusEngineError, BeaconConsensusEngineHandle, BeaconForkChoiceUpdateError, - EthBeaconConsensus, MIN_BLOCKS_FOR_PIPELINE_RUN, -}; -use alloy_primitives::{BlockNumber, B256}; -use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadSidecar, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, -}; -use reth_blockchain_tree::{ - config::BlockchainTreeConfig, externals::TreeExternals, BlockchainTree, ShareableBlockchainTree, -}; -use reth_chainspec::ChainSpec; -use reth_config::config::StageConfig; -use reth_consensus::{test_utils::TestConsensus, ConsensusError, FullConsensus}; -use reth_db::{test_utils::TempDatabase, DatabaseEnv as DE}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; -use reth_engine_primitives::{BeaconOnNewPayloadError, EngineApiMessageVersion}; -use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_evm::{either::Either, test_utils::MockExecutorProvider}; -use reth_evm_ethereum::execute::EthExecutorProvider; -use reth_exex_types::FinishedExExHeight; -use reth_network_p2p::{ - sync::NoopSyncStateUpdater, test_utils::NoopFullBlockClient, EthBlockClient, -}; -use reth_payload_builder::test_utils::spawn_test_payload_service; -use reth_primitives::SealedHeader; -use reth_provider::{ - providers::BlockchainProvider, - test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB}, - ExecutionOutcome, -}; -use reth_prune::Pruner; -use reth_prune_types::PruneModes; -use reth_stages::{sets::DefaultStages, test_utils::TestStages, ExecOutput, Pipeline, StageError}; -use reth_static_file::StaticFileProducer; -use reth_tasks::TokioTaskExecutor; -use std::{collections::VecDeque, sync::Arc}; -use tokio::sync::{oneshot, watch}; - -type DatabaseEnv = TempDatabase; - -type TestBeaconConsensusEngine = BeaconConsensusEngine< - MockNodeTypesWithDB, - BlockchainProvider, - Arc>, ->; - -#[derive(Debug)] -pub struct TestEnv { - pub db: DB, - // Keep the tip receiver around, so it's not dropped. - #[allow(dead_code)] - tip_rx: watch::Receiver, - engine_handle: BeaconConsensusEngineHandle, -} - -impl TestEnv { - const fn new( - db: DB, - tip_rx: watch::Receiver, - engine_handle: BeaconConsensusEngineHandle, - ) -> Self { - Self { db, tip_rx, engine_handle } - } - - pub async fn send_new_payload>( - &self, - payload: T, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - self.engine_handle.new_payload(payload.into(), sidecar).await - } - - /// Sends the `ExecutionPayload` message to the consensus engine and retries if the engine - /// is syncing. - pub async fn send_new_payload_retry_on_syncing>( - &self, - payload: T, - sidecar: ExecutionPayloadSidecar, - ) -> Result { - let payload: ExecutionPayload = payload.into(); - loop { - let result = self.send_new_payload(payload.clone(), sidecar.clone()).await?; - if !result.is_syncing() { - return Ok(result) - } - } - } - - pub async fn send_forkchoice_updated( - &self, - state: ForkchoiceState, - ) -> Result { - self.engine_handle - .fork_choice_updated(state, None, EngineApiMessageVersion::default()) - .await - } - - /// Sends the `ForkchoiceUpdated` message to the consensus engine and retries if the engine - /// is syncing. - pub async fn send_forkchoice_retry_on_syncing( - &self, - state: ForkchoiceState, - ) -> Result { - loop { - let result = self - .engine_handle - .fork_choice_updated(state, None, EngineApiMessageVersion::default()) - .await?; - if !result.is_syncing() { - return Ok(result) - } - } - } -} - -// TODO: add with_consensus in case we want to use the TestConsensus purposeful failure - this -// would require similar patterns to how we use with_client and the downloader -/// Represents either a real consensus engine, or a test consensus engine. -#[derive(Debug, Default)] -enum TestConsensusConfig { - /// Test consensus engine - #[default] - Test, - /// Real consensus engine - Real, -} - -/// Represents either test pipeline outputs, or real pipeline configuration. -#[derive(Debug)] -enum TestPipelineConfig { - /// Test pipeline outputs. - Test(VecDeque>), - /// Real pipeline configuration. - Real, -} - -impl Default for TestPipelineConfig { - fn default() -> Self { - Self::Test(VecDeque::new()) - } -} - -/// Represents either test executor results, or real executor configuration. -#[derive(Debug)] -enum TestExecutorConfig { - /// Test executor results. - Test(Vec), - /// Real executor configuration. - Real, -} - -impl Default for TestExecutorConfig { - fn default() -> Self { - Self::Test(Vec::new()) - } -} - -/// The basic configuration for a `TestConsensusEngine`, without generics for the client or -/// consensus engine. -#[derive(Debug)] -pub struct TestConsensusEngineBuilder { - chain_spec: Arc, - pipeline_config: TestPipelineConfig, - executor_config: TestExecutorConfig, - pipeline_run_threshold: Option, - max_block: Option, - consensus: TestConsensusConfig, -} - -impl TestConsensusEngineBuilder { - /// Create a new `TestConsensusEngineBuilder` with the given `ChainSpec`. - pub fn new(chain_spec: Arc) -> Self { - Self { - chain_spec, - pipeline_config: Default::default(), - executor_config: Default::default(), - pipeline_run_threshold: None, - max_block: None, - consensus: Default::default(), - } - } - - /// Set the pipeline execution outputs to use for the test consensus engine. - pub fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); - self - } - - /// Set the executor results to use for the test consensus engine. - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.executor_config = TestExecutorConfig::Test(executor_results); - self - } - - /// Sets the max block for the pipeline to run. - pub const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.max_block = Some(max_block); - self - } - - /// Uses the real pipeline instead of a pipeline with empty exec outputs. - pub fn with_real_pipeline(mut self) -> Self { - self.pipeline_config = TestPipelineConfig::Real; - self - } - - /// Uses the real executor instead of a executor with empty results. - pub fn with_real_executor(mut self) -> Self { - self.executor_config = TestExecutorConfig::Real; - self - } - - /// Uses a real consensus engine instead of a test consensus engine. - pub const fn with_real_consensus(mut self) -> Self { - self.consensus = TestConsensusConfig::Real; - self - } - - /// Disables blockchain tree driven sync. This is the same as setting the pipeline run - /// threshold to 0. - pub const fn disable_blockchain_tree_sync(mut self) -> Self { - self.pipeline_run_threshold = Some(0); - self - } - - /// Sets the client to use for network operations. - #[allow(dead_code)] - pub const fn with_client( - self, - client: Client, - ) -> NetworkedTestConsensusEngineBuilder - where - Client: EthBlockClient + 'static, - { - NetworkedTestConsensusEngineBuilder { base_config: self, client: Some(client) } - } - - /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. - pub fn build( - self, - ) -> (TestBeaconConsensusEngine, TestEnv>) { - let networked = NetworkedTestConsensusEngineBuilder { base_config: self, client: None }; - - networked.build() - } -} - -/// A builder for `TestConsensusEngine`, allows configuration of mocked pipeline outputs and -/// mocked executor results. -/// -/// This optionally includes a client for network operations. -#[derive(Debug)] -pub struct NetworkedTestConsensusEngineBuilder { - base_config: TestConsensusEngineBuilder, - client: Option, -} - -impl NetworkedTestConsensusEngineBuilder -where - Client: EthBlockClient + 'static, -{ - /// Set the pipeline execution outputs to use for the test consensus engine. - #[allow(dead_code)] - pub fn with_pipeline_exec_outputs( - mut self, - pipeline_exec_outputs: VecDeque>, - ) -> Self { - self.base_config.pipeline_config = TestPipelineConfig::Test(pipeline_exec_outputs); - self - } - - /// Set the executor results to use for the test consensus engine. - #[allow(dead_code)] - pub fn with_executor_results(mut self, executor_results: Vec) -> Self { - self.base_config.executor_config = TestExecutorConfig::Test(executor_results); - self - } - - /// Sets the max block for the pipeline to run. - #[allow(dead_code)] - pub const fn with_max_block(mut self, max_block: BlockNumber) -> Self { - self.base_config.max_block = Some(max_block); - self - } - - /// Uses the real pipeline instead of a pipeline with empty exec outputs. - #[allow(dead_code)] - pub fn with_real_pipeline(mut self) -> Self { - self.base_config.pipeline_config = TestPipelineConfig::Real; - self - } - - /// Uses the real executor instead of a executor with empty results. - #[allow(dead_code)] - pub fn with_real_executor(mut self) -> Self { - self.base_config.executor_config = TestExecutorConfig::Real; - self - } - - /// Disables blockchain tree driven sync. This is the same as setting the pipeline run - /// threshold to 0. - #[allow(dead_code)] - pub const fn disable_blockchain_tree_sync(mut self) -> Self { - self.base_config.pipeline_run_threshold = Some(0); - self - } - - /// Sets the client to use for network operations. - #[allow(dead_code)] - pub fn with_client( - self, - client: ClientType, - ) -> NetworkedTestConsensusEngineBuilder - where - ClientType: EthBlockClient + 'static, - { - NetworkedTestConsensusEngineBuilder { base_config: self.base_config, client: Some(client) } - } - - /// Builds the test consensus engine into a `TestConsensusEngine` and `TestEnv`. - pub fn build(self) -> (TestBeaconConsensusEngine, TestEnv>) { - reth_tracing::init_test_tracing(); - let provider_factory = - create_test_provider_factory_with_chain_spec(self.base_config.chain_spec.clone()); - - let consensus: Arc> = - match self.base_config.consensus { - TestConsensusConfig::Real => { - Arc::new(EthBeaconConsensus::new(Arc::clone(&self.base_config.chain_spec))) - } - TestConsensusConfig::Test => Arc::new(TestConsensus::default()), - }; - let payload_builder = spawn_test_payload_service::(); - - // use either noop client or a user provided client (for example TestFullBlockClient) - let client = Arc::new( - self.client - .map(Either::Left) - .unwrap_or_else(|| Either::Right(NoopFullBlockClient::default())), - ); - - // use either test executor or real executor - let executor_factory = match self.base_config.executor_config { - TestExecutorConfig::Test(results) => { - let executor_factory = MockExecutorProvider::default(); - executor_factory.extend(results); - Either::Left(executor_factory) - } - TestExecutorConfig::Real => { - Either::Right(EthExecutorProvider::ethereum(self.base_config.chain_spec.clone())) - } - }; - - let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); - - // Setup pipeline - let (tip_tx, tip_rx) = watch::channel(B256::default()); - let mut pipeline = match self.base_config.pipeline_config { - TestPipelineConfig::Test(outputs) => Pipeline::::builder() - .add_stages(TestStages::new(outputs, Default::default())) - .with_tip_sender(tip_tx), - TestPipelineConfig::Real => { - let header_downloader = ReverseHeadersDownloaderBuilder::default() - .build(client.clone(), consensus.clone().as_header_validator()) - .into_task(); - - let body_downloader = BodiesDownloaderBuilder::default() - .build( - client.clone(), - consensus.clone().as_consensus(), - provider_factory.clone(), - ) - .into_task(); - - Pipeline::::builder().add_stages(DefaultStages::new( - provider_factory.clone(), - tip_rx.clone(), - consensus.clone().as_consensus(), - header_downloader, - body_downloader, - executor_factory.clone(), - StageConfig::default(), - PruneModes::default(), - )) - } - }; - - if let Some(max_block) = self.base_config.max_block { - pipeline = pipeline.with_max_block(max_block); - } - - let pipeline = pipeline.build(provider_factory.clone(), static_file_producer); - - // Setup blockchain tree - let externals = TreeExternals::new(provider_factory.clone(), consensus, executor_factory); - let tree = Arc::new(ShareableBlockchainTree::new( - BlockchainTree::new(externals, BlockchainTreeConfig::new(1, 2, 3, 2)) - .expect("failed to create tree"), - )); - let header = self.base_config.chain_spec.genesis_header().clone(); - let genesis_block = SealedHeader::seal(header); - - let blockchain_provider = BlockchainProvider::with_blocks( - provider_factory.clone(), - tree, - genesis_block, - None, - None, - ); - - let pruner = Pruner::new_with_factory( - provider_factory.clone(), - vec![], - 5, - self.base_config.chain_spec.prune_delete_limit, - None, - watch::channel(FinishedExExHeight::NoExExs).1, - ); - - let mut hooks = EngineHooks::new(); - hooks.add(PruneHook::new(pruner, Box::::default())); - - let (mut engine, handle) = BeaconConsensusEngine::new( - client, - pipeline, - blockchain_provider, - Box::::default(), - Box::::default(), - None, - payload_builder, - None, - self.base_config.pipeline_run_threshold.unwrap_or(MIN_BLOCKS_FOR_PIPELINE_RUN), - hooks, - ) - .expect("failed to create consensus engine"); - - if let Some(max_block) = self.base_config.max_block { - engine.sync.set_max_block(max_block) - } - - (engine, TestEnv::new(provider_factory.db_ref().clone(), tip_rx, handle)) - } -} - -pub fn spawn_consensus_engine( - engine: TestBeaconConsensusEngine, -) -> oneshot::Receiver> -where - Client: EthBlockClient + 'static, -{ - let (tx, rx) = oneshot::channel(); - tokio::spawn(async move { - let result = engine.await; - tx.send(result).expect("failed to forward consensus engine result"); - }); - rx -} diff --git a/crates/consensus/beacon/src/lib.rs b/crates/consensus/beacon/src/lib.rs deleted file mode 100644 index f62a75f94d516..0000000000000 --- a/crates/consensus/beacon/src/lib.rs +++ /dev/null @@ -1,14 +0,0 @@ -//! Beacon consensus implementation. - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -pub use reth_ethereum_consensus::EthBeaconConsensus; - -mod engine; -pub use engine::*; From 2f94aeebedcb3c3faf38f199d272121eca216d31 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 13:35:07 +0100 Subject: [PATCH 020/113] chore!: rm legacy blockchain provider (#13725) --- Cargo.lock | 1 - crates/storage/provider/Cargo.toml | 3 +- crates/storage/provider/src/providers/mod.rs | 892 +----------------- crates/storage/provider/src/traits/mod.rs | 3 - .../provider/src/traits/tree_viewer.rs | 22 - 5 files changed, 5 insertions(+), 916 deletions(-) delete mode 100644 crates/storage/provider/src/traits/tree_viewer.rs diff --git a/Cargo.lock b/Cargo.lock index c5dff17cf4c4d..22f123e02d1e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8666,7 +8666,6 @@ dependencies = [ "parking_lot", "rand 0.8.5", "rayon", - "reth-blockchain-tree-api", "reth-chain-state", "reth-chainspec", "reth-codecs", diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 84808ed7c3811..0955821b42372 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-blockchain-tree-api.workspace = true reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["reth-codec", "secp256k1"] } reth-primitives-traits = { workspace = true, features = ["reth-codec"] } @@ -43,7 +42,7 @@ alloy-consensus.workspace = true revm.workspace = true # optimism -reth-optimism-primitives = { workspace = true, optional = true } +reth-optimism-primitives = { workspace = true, features = ["reth-codec"], optional = true } # async tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index f1c799cd2e30a..6ff53e4afeae6 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -1,51 +1,9 @@ -use core::fmt; +//! Contains the main provider types and traits for interacting with the blockchain's storage. -use crate::{ - AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, - BlockSource, BlockchainTreePendingStateProvider, CanonStateNotifications, - CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, - DatabaseProviderFactory, FullExecutionDataProvider, HeaderProvider, NodePrimitivesProvider, - ProviderError, PruneCheckpointReader, ReceiptProvider, ReceiptProviderIdExt, - StageCheckpointReader, StateProviderBox, StateProviderFactory, StaticFileProviderFactory, - TransactionVariant, TransactionsProvider, TreeViewer, WithdrawalsProvider, -}; -use alloy_consensus::{transaction::TransactionMeta, Header}; -use alloy_eips::{ - eip4895::Withdrawals, BlockHashOrNumber, BlockId, BlockNumHash, BlockNumberOrTag, -}; -use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; -use alloy_rpc_types_engine::ForkchoiceState; -use reth_blockchain_tree_api::{ - error::{CanonicalError, InsertBlockError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, -}; -use reth_chain_state::{ChainInfoTracker, ForkChoiceNotifications, ForkChoiceSubscriptions}; -use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_chainspec::EthereumHardforks; use reth_db::table::Value; -use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; -use reth_node_types::{ - BlockTy, FullNodePrimitives, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine, - ReceiptTy, TxTy, -}; -use reth_primitives::{ - Account, BlockWithSenders, EthPrimitives, Receipt, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, -}; -use reth_prune_types::{PruneCheckpoint, PruneSegment}; -use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::{ - BlockBodyIndicesProvider, CanonChainTracker, OmmersProvider, StateCommitmentProvider, -}; -use reth_storage_errors::provider::ProviderResult; -use std::{ - collections::BTreeMap, - ops::{RangeBounds, RangeInclusive}, - sync::Arc, - time::Instant, -}; - -use tracing::trace; +use reth_node_types::{FullNodePrimitives, NodeTypes, NodeTypesWithDB, NodeTypesWithEngine}; +use reth_primitives::EthPrimitives; mod database; pub use database::*; @@ -123,845 +81,3 @@ impl EngineNodeTypes for T where T: ProviderNodeTypes + NodeTypesWithEngine { /// tree. pub trait TreeNodeTypes: ProviderNodeTypes + NodeTypesForTree {} impl TreeNodeTypes for T where T: ProviderNodeTypes + NodeTypesForTree {} - -/// The main type for interacting with the blockchain. -/// -/// This type serves as the main entry point for interacting with the blockchain and provides data -/// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper -/// type that holds an instance of the database and the blockchain tree. -pub struct BlockchainProvider { - /// Provider type used to access the database. - database: ProviderFactory, - /// The blockchain tree instance. - tree: Arc>, - /// Tracks the chain info wrt forkchoice updates - chain_info: ChainInfoTracker, -} - -impl Clone for BlockchainProvider { - fn clone(&self) -> Self { - Self { - database: self.database.clone(), - tree: self.tree.clone(), - chain_info: self.chain_info.clone(), - } - } -} - -impl BlockchainProvider { - /// Sets the treeviewer for the provider. - #[doc(hidden)] - pub fn with_tree(mut self, tree: Arc>) -> Self { - self.tree = tree; - self - } -} - -impl BlockchainProvider { - /// Create new provider instance that wraps the database and the blockchain tree, using the - /// provided latest header to initialize the chain info tracker, alongside the finalized header - /// if it exists. - pub fn with_blocks( - database: ProviderFactory, - tree: Arc>, - latest: SealedHeader, - finalized: Option, - safe: Option, - ) -> Self { - Self { database, tree, chain_info: ChainInfoTracker::new(latest, finalized, safe) } - } - - /// Create a new provider using only the database and the tree, fetching the latest header from - /// the database to initialize the provider. - pub fn new( - database: ProviderFactory, - tree: Arc>, - ) -> ProviderResult { - let provider = database.provider()?; - let best = provider.chain_info()?; - let latest_header = provider - .header_by_number(best.best_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(best.best_number.into()))?; - - let finalized_header = provider - .last_finalized_block_number()? - .map(|num| provider.sealed_header(num)) - .transpose()? - .flatten(); - - let safe_header = provider - .last_safe_block_number()? - .map(|num| provider.sealed_header(num)) - .transpose()? - .flatten(); - - Ok(Self::with_blocks( - database, - tree, - SealedHeader::new(latest_header, best.best_hash), - finalized_header, - safe_header, - )) - } - - /// Ensures that the given block number is canonical (synced) - /// - /// This is a helper for guarding the [`HistoricalStateProvider`] against block numbers that are - /// out of range and would lead to invalid results, mainly during initial sync. - /// - /// Verifying the `block_number` would be expensive since we need to lookup sync table - /// Instead, we ensure that the `block_number` is within the range of the - /// [`Self::best_block_number`] which is updated when a block is synced. - #[inline] - fn ensure_canonical_block(&self, block_number: BlockNumber) -> ProviderResult<()> { - let latest = self.best_block_number()?; - if block_number > latest { - Err(ProviderError::HeaderNotFound(block_number.into())) - } else { - Ok(()) - } - } -} - -impl BlockchainProvider -where - Self: StateProviderFactory, - N: NodeTypesWithDB, -{ - /// Return a [`StateProviderBox`] that contains bundle state data provider. - /// Used to inspect or execute transaction on the pending state. - fn pending_with_provider( - &self, - bundle_state_data: Box, - ) -> ProviderResult { - let canonical_fork = bundle_state_data.canonical_fork(); - trace!(target: "providers::blockchain", ?canonical_fork, "Returning post state provider"); - - let state_provider = self.history_by_block_hash(canonical_fork.hash)?; - let bundle_state_provider = BundleStateProvider::new(state_provider, bundle_state_data); - Ok(Box::new(bundle_state_provider)) - } -} - -impl NodePrimitivesProvider for BlockchainProvider { - type Primitives = N::Primitives; -} - -impl DatabaseProviderFactory for BlockchainProvider { - type DB = N::DB; - type Provider = as DatabaseProviderFactory>::Provider; - type ProviderRW = as DatabaseProviderFactory>::ProviderRW; - - fn database_provider_ro(&self) -> ProviderResult { - self.database.database_provider_ro() - } - - fn database_provider_rw(&self) -> ProviderResult { - self.database.database_provider_rw() - } -} - -impl StateCommitmentProvider for BlockchainProvider { - type StateCommitment = N::StateCommitment; -} - -impl StaticFileProviderFactory for BlockchainProvider { - fn static_file_provider(&self) -> StaticFileProvider { - self.database.static_file_provider() - } -} - -impl HeaderProvider for BlockchainProvider { - type Header = Header; - - fn header(&self, block_hash: &BlockHash) -> ProviderResult> { - self.database.header(block_hash) - } - - fn header_by_number(&self, num: BlockNumber) -> ProviderResult> { - self.database.header_by_number(num) - } - - fn header_td(&self, hash: &BlockHash) -> ProviderResult> { - self.database.header_td(hash) - } - - fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { - self.database.header_td_by_number(number) - } - - fn headers_range(&self, range: impl RangeBounds) -> ProviderResult> { - self.database.headers_range(range) - } - - fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - self.database.sealed_header(number) - } - - fn sealed_headers_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.sealed_headers_range(range) - } - - fn sealed_headers_while( - &self, - range: impl RangeBounds, - predicate: impl FnMut(&SealedHeader) -> bool, - ) -> ProviderResult> { - self.database.sealed_headers_while(range, predicate) - } -} - -impl BlockHashReader for BlockchainProvider { - fn block_hash(&self, number: u64) -> ProviderResult> { - self.database.block_hash(number) - } - - fn canonical_hashes_range( - &self, - start: BlockNumber, - end: BlockNumber, - ) -> ProviderResult> { - self.database.canonical_hashes_range(start, end) - } -} - -impl BlockNumReader for BlockchainProvider { - fn chain_info(&self) -> ProviderResult { - Ok(self.chain_info.chain_info()) - } - - fn best_block_number(&self) -> ProviderResult { - Ok(self.chain_info.get_canonical_block_number()) - } - - fn last_block_number(&self) -> ProviderResult { - self.database.last_block_number() - } - - fn block_number(&self, hash: B256) -> ProviderResult> { - self.database.block_number(hash) - } -} - -impl BlockIdReader for BlockchainProvider { - fn pending_block_num_hash(&self) -> ProviderResult> { - Ok(self.tree.pending_block_num_hash()) - } - - fn safe_block_num_hash(&self) -> ProviderResult> { - Ok(self.chain_info.get_safe_num_hash()) - } - - fn finalized_block_num_hash(&self) -> ProviderResult> { - Ok(self.chain_info.get_finalized_num_hash()) - } -} - -impl BlockReader for BlockchainProvider { - type Block = BlockTy; - - fn find_block_by_hash( - &self, - hash: B256, - source: BlockSource, - ) -> ProviderResult> { - let block = match source { - BlockSource::Any => { - // check database first - let mut block = self.database.block_by_hash(hash)?; - if block.is_none() { - // Note: it's fine to return the unsealed block because the caller already has - // the hash - block = self.tree.block_by_hash(hash).map(|block| block.unseal()); - } - block - } - BlockSource::Pending => self.tree.block_by_hash(hash).map(|block| block.unseal()), - BlockSource::Canonical => self.database.block_by_hash(hash)?, - }; - - Ok(block) - } - - fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { - match id { - BlockHashOrNumber::Hash(hash) => self.find_block_by_hash(hash, BlockSource::Any), - BlockHashOrNumber::Number(num) => self.database.block_by_number(num), - } - } - - fn pending_block(&self) -> ProviderResult>> { - Ok(self.tree.pending_block()) - } - - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { - Ok(self.tree.pending_block_with_senders()) - } - - fn pending_block_and_receipts( - &self, - ) -> ProviderResult, Vec)>> { - Ok(self.tree.pending_block_and_receipts()) - } - - /// Returns the block with senders with matching number or hash from database. - /// - /// **NOTE: If [`TransactionVariant::NoHash`] is provided then the transactions have invalid - /// hashes, since they would need to be calculated on the spot, and we want fast querying.** - /// - /// Returns `None` if block is not found. - fn block_with_senders( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult>> { - self.database.block_with_senders(id, transaction_kind) - } - - fn sealed_block_with_senders( - &self, - id: BlockHashOrNumber, - transaction_kind: TransactionVariant, - ) -> ProviderResult>> { - self.database.sealed_block_with_senders(id, transaction_kind) - } - - fn block_range(&self, range: RangeInclusive) -> ProviderResult> { - self.database.block_range(range) - } - - fn block_with_senders_range( - &self, - range: RangeInclusive, - ) -> ProviderResult>> { - self.database.block_with_senders_range(range) - } - - fn sealed_block_with_senders_range( - &self, - range: RangeInclusive, - ) -> ProviderResult>> { - self.database.sealed_block_with_senders_range(range) - } -} - -impl TransactionsProvider for BlockchainProvider { - type Transaction = TxTy; - - fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { - self.database.transaction_id(tx_hash) - } - - fn transaction_by_id(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_by_id(id) - } - - fn transaction_by_id_unhashed( - &self, - id: TxNumber, - ) -> ProviderResult> { - self.database.transaction_by_id_unhashed(id) - } - - fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { - self.database.transaction_by_hash(hash) - } - - fn transaction_by_hash_with_meta( - &self, - tx_hash: TxHash, - ) -> ProviderResult> { - self.database.transaction_by_hash_with_meta(tx_hash) - } - - fn transaction_block(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_block(id) - } - - fn transactions_by_block( - &self, - id: BlockHashOrNumber, - ) -> ProviderResult>> { - self.database.transactions_by_block(id) - } - - fn transactions_by_block_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult>> { - self.database.transactions_by_block_range(range) - } - - fn transactions_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.transactions_by_tx_range(range) - } - - fn senders_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.senders_by_tx_range(range) - } - - fn transaction_sender(&self, id: TxNumber) -> ProviderResult> { - self.database.transaction_sender(id) - } -} - -impl ReceiptProvider for BlockchainProvider { - type Receipt = ReceiptTy; - - fn receipt(&self, id: TxNumber) -> ProviderResult> { - self.database.receipt(id) - } - - fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { - self.database.receipt_by_hash(hash) - } - - fn receipts_by_block( - &self, - block: BlockHashOrNumber, - ) -> ProviderResult>> { - self.database.receipts_by_block(block) - } - - fn receipts_by_tx_range( - &self, - range: impl RangeBounds, - ) -> ProviderResult> { - self.database.receipts_by_tx_range(range) - } -} - -impl ReceiptProviderIdExt for BlockchainProvider { - fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { - match block { - BlockId::Hash(rpc_block_hash) => { - let mut receipts = self.receipts_by_block(rpc_block_hash.block_hash.into())?; - if receipts.is_none() && !rpc_block_hash.require_canonical.unwrap_or(false) { - receipts = self.tree.receipts_by_block_hash(rpc_block_hash.block_hash); - } - Ok(receipts) - } - BlockId::Number(num_tag) => match num_tag { - BlockNumberOrTag::Pending => Ok(self.tree.pending_receipts()), - _ => { - if let Some(num) = self.convert_block_number(num_tag)? { - self.receipts_by_block(num.into()) - } else { - Ok(None) - } - } - }, - } - } -} - -impl WithdrawalsProvider for BlockchainProvider { - fn withdrawals_by_block( - &self, - id: BlockHashOrNumber, - timestamp: u64, - ) -> ProviderResult> { - self.database.withdrawals_by_block(id, timestamp) - } -} - -impl OmmersProvider for BlockchainProvider { - fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { - self.database.ommers(id) - } -} - -impl BlockBodyIndicesProvider for BlockchainProvider { - fn block_body_indices( - &self, - number: BlockNumber, - ) -> ProviderResult> { - self.database.block_body_indices(number) - } -} - -impl StageCheckpointReader for BlockchainProvider { - fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { - self.database.provider()?.get_stage_checkpoint(id) - } - - fn get_stage_checkpoint_progress(&self, id: StageId) -> ProviderResult>> { - self.database.provider()?.get_stage_checkpoint_progress(id) - } - - fn get_all_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_all_checkpoints() - } -} - -impl PruneCheckpointReader for BlockchainProvider { - fn get_prune_checkpoint( - &self, - segment: PruneSegment, - ) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoint(segment) - } - - fn get_prune_checkpoints(&self) -> ProviderResult> { - self.database.provider()?.get_prune_checkpoints() - } -} - -impl ChainSpecProvider for BlockchainProvider { - type ChainSpec = N::ChainSpec; - - fn chain_spec(&self) -> Arc { - self.database.chain_spec() - } -} - -impl StateProviderFactory for BlockchainProvider { - /// Storage provider for latest block - fn latest(&self) -> ProviderResult { - trace!(target: "providers::blockchain", "Getting latest block state provider"); - self.database.latest() - } - - /// Returns a [`StateProviderBox`] indexed by the given block number or tag. - /// - /// Note: if a number is provided this will only look at historical(canonical) state. - fn state_by_block_number_or_tag( - &self, - number_or_tag: BlockNumberOrTag, - ) -> ProviderResult { - match number_or_tag { - BlockNumberOrTag::Latest => self.latest(), - BlockNumberOrTag::Finalized => { - // we can only get the finalized state by hash, not by num - let hash = - self.finalized_block_hash()?.ok_or(ProviderError::FinalizedBlockNotFound)?; - - // only look at historical state - self.history_by_block_hash(hash) - } - BlockNumberOrTag::Safe => { - // we can only get the safe state by hash, not by num - let hash = self.safe_block_hash()?.ok_or(ProviderError::SafeBlockNotFound)?; - - self.history_by_block_hash(hash) - } - BlockNumberOrTag::Earliest => self.history_by_block_number(0), - BlockNumberOrTag::Pending => self.pending(), - BlockNumberOrTag::Number(num) => { - // Note: The `BlockchainProvider` could also lookup the tree for the given block number, if for example the block number is `latest + 1`, however this should only support canonical state: - self.history_by_block_number(num) - } - } - } - - fn history_by_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_number, "Getting history by block number"); - self.ensure_canonical_block(block_number)?; - self.database.history_by_block_number(block_number) - } - - fn history_by_block_hash(&self, block_hash: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block_hash, "Getting history by block hash"); - self.database.history_by_block_hash(block_hash) - } - - fn state_by_block_hash(&self, block: BlockHash) -> ProviderResult { - trace!(target: "providers::blockchain", ?block, "Getting state by block hash"); - let mut state = self.history_by_block_hash(block); - - // we failed to get the state by hash, from disk, hash block be the pending block - if state.is_err() { - if let Ok(Some(pending)) = self.pending_state_by_hash(block) { - // we found pending block by hash - state = Ok(pending) - } - } - - state - } - - /// Returns the state provider for pending state. - /// - /// If there's no pending block available then the latest state provider is returned: - /// [`Self::latest`] - fn pending(&self) -> ProviderResult { - trace!(target: "providers::blockchain", "Getting provider for pending state"); - - if let Some(block) = self.tree.pending_block_num_hash() { - if let Ok(pending) = self.tree.pending_state_provider(block.hash) { - return self.pending_with_provider(pending) - } - } - - // fallback to latest state if the pending block is not available - self.latest() - } - - fn pending_state_by_hash(&self, block_hash: B256) -> ProviderResult> { - if let Some(state) = self.tree.find_pending_state_provider(block_hash) { - return Ok(Some(self.pending_with_provider(state)?)) - } - Ok(None) - } -} - -impl BlockchainTreeEngine for BlockchainProvider { - fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - self.tree.buffer_block(block) - } - - fn insert_block( - &self, - block: SealedBlockWithSenders, - validation_kind: BlockValidationKind, - ) -> Result { - self.tree.insert_block(block, validation_kind) - } - - fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()> { - self.tree.finalize_block(finalized_block) - } - - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError> { - self.tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block) - } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - self.tree.update_block_hashes_and_clear_buffered() - } - - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { - self.tree.connect_buffered_blocks_to_canonical_hashes() - } - - fn make_canonical(&self, block_hash: BlockHash) -> Result { - self.tree.make_canonical(block_hash) - } -} - -impl BlockchainTreeViewer for BlockchainProvider { - fn header_by_hash(&self, hash: BlockHash) -> Option { - self.tree.header_by_hash(hash) - } - - fn block_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.block_by_hash(block_hash) - } - - fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.block_with_senders_by_hash(block_hash) - } - - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.buffered_header_by_hash(block_hash) - } - - fn is_canonical(&self, hash: BlockHash) -> Result { - self.tree.is_canonical(hash) - } - - fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option { - self.tree.lowest_buffered_ancestor(hash) - } - - fn canonical_tip(&self) -> BlockNumHash { - self.tree.canonical_tip() - } - - fn pending_block_num_hash(&self) -> Option { - self.tree.pending_block_num_hash() - } - - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { - self.tree.pending_block_and_receipts() - } - - fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - self.tree.receipts_by_block_hash(block_hash) - } -} - -impl CanonChainTracker for BlockchainProvider { - type Header = HeaderTy; - - fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { - // update timestamp - self.chain_info.on_forkchoice_update_received(); - } - - fn last_received_update_timestamp(&self) -> Option { - self.chain_info.last_forkchoice_update_received_at() - } - - fn on_transition_configuration_exchanged(&self) { - self.chain_info.on_transition_configuration_exchanged(); - } - - fn last_exchanged_transition_configuration_timestamp(&self) -> Option { - self.chain_info.last_transition_configuration_exchanged_at() - } - - fn set_canonical_head(&self, header: SealedHeader) { - self.chain_info.set_canonical_head(header); - } - - fn set_safe(&self, header: SealedHeader) { - self.chain_info.set_safe(header); - } - - fn set_finalized(&self, header: SealedHeader) { - self.chain_info.set_finalized(header); - } -} - -impl BlockReaderIdExt for BlockchainProvider { - fn block_by_id(&self, id: BlockId) -> ProviderResult> { - match id { - BlockId::Number(num) => self.block_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: should we only apply this for the RPCs that are listed in EIP-1898? - // so not at the provider level? - // if we decide to do this at a higher level, then we can make this an automatic - // trait impl - if Some(true) == hash.require_canonical { - // check the database, canonical blocks are only stored in the database - self.find_block_by_hash(hash.block_hash, BlockSource::Canonical) - } else { - BlockReader::block_by_hash(self, hash.block_hash) - } - } - } - } - - fn header_by_number_or_tag( - &self, - id: BlockNumberOrTag, - ) -> ProviderResult> { - Ok(match id { - BlockNumberOrTag::Latest => Some(self.chain_info.get_canonical_head().unseal()), - BlockNumberOrTag::Finalized => { - self.chain_info.get_finalized_header().map(|h| h.unseal()) - } - BlockNumberOrTag::Safe => self.chain_info.get_safe_header().map(|h| h.unseal()), - BlockNumberOrTag::Earliest => self.header_by_number(0)?, - BlockNumberOrTag::Pending => self.tree.pending_header().map(|h| h.unseal()), - BlockNumberOrTag::Number(num) => self.header_by_number(num)?, - }) - } - - fn sealed_header_by_number_or_tag( - &self, - id: BlockNumberOrTag, - ) -> ProviderResult>> { - match id { - BlockNumberOrTag::Latest => Ok(Some(self.chain_info.get_canonical_head())), - BlockNumberOrTag::Finalized => Ok(self.chain_info.get_finalized_header()), - BlockNumberOrTag::Safe => Ok(self.chain_info.get_safe_header()), - BlockNumberOrTag::Earliest => self - .header_by_number(0)? - .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), - BlockNumberOrTag::Pending => Ok(self.tree.pending_header()), - BlockNumberOrTag::Number(num) => self - .header_by_number(num)? - .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), - } - } - - fn sealed_header_by_id( - &self, - id: BlockId, - ) -> ProviderResult>> { - Ok(match id { - BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), - }) - } - - fn header_by_id(&self, id: BlockId) -> ProviderResult> { - Ok(match id { - BlockId::Number(num) => self.header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?, - }) - } - - fn ommers_by_id(&self, id: BlockId) -> ProviderResult>> { - match id { - BlockId::Number(num) => self.ommers_by_number_or_tag(num), - BlockId::Hash(hash) => { - // TODO: EIP-1898 question, see above - // here it is not handled - self.ommers(BlockHashOrNumber::Hash(hash.block_hash)) - } - } - } -} - -impl BlockchainTreePendingStateProvider for BlockchainProvider { - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option> { - self.tree.find_pending_state_provider(block_hash) - } -} - -impl CanonStateSubscriptions for BlockchainProvider { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.tree.subscribe_to_canonical_state() - } -} - -impl ForkChoiceSubscriptions for BlockchainProvider { - type Header = HeaderTy; - - fn subscribe_safe_block(&self) -> ForkChoiceNotifications { - let receiver = self.chain_info.subscribe_safe_block(); - ForkChoiceNotifications(receiver) - } - - fn subscribe_finalized_block(&self) -> ForkChoiceNotifications { - let receiver = self.chain_info.subscribe_finalized_block(); - ForkChoiceNotifications(receiver) - } -} - -impl ChangeSetReader for BlockchainProvider { - fn account_block_changeset( - &self, - block_number: BlockNumber, - ) -> ProviderResult> { - self.database.provider()?.account_block_changeset(block_number) - } -} - -impl AccountReader for BlockchainProvider { - /// Get basic account information. - fn basic_account(&self, address: &Address) -> ProviderResult> { - self.database.provider()?.basic_account(address) - } -} - -impl fmt::Debug for BlockchainProvider { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("BlockchainProvider").finish_non_exhaustive() - } -} diff --git a/crates/storage/provider/src/traits/mod.rs b/crates/storage/provider/src/traits/mod.rs index 4b3178fc6413f..09ba9f109bdf8 100644 --- a/crates/storage/provider/src/traits/mod.rs +++ b/crates/storage/provider/src/traits/mod.rs @@ -19,6 +19,3 @@ pub use static_file_provider::StaticFileProviderFactory; mod full; pub use full::{FullProvider, FullRpcProvider}; - -mod tree_viewer; -pub use tree_viewer::TreeViewer; diff --git a/crates/storage/provider/src/traits/tree_viewer.rs b/crates/storage/provider/src/traits/tree_viewer.rs deleted file mode 100644 index f75dbae24d22e..0000000000000 --- a/crates/storage/provider/src/traits/tree_viewer.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::BlockchainTreePendingStateProvider; -use reth_blockchain_tree_api::{BlockchainTreeEngine, BlockchainTreeViewer}; -use reth_chain_state::CanonStateSubscriptions; - -/// Helper trait to combine all the traits we need for the `BlockchainProvider` -/// -/// This is a temporary solution -pub trait TreeViewer: - BlockchainTreeViewer - + BlockchainTreePendingStateProvider - + CanonStateSubscriptions - + BlockchainTreeEngine -{ -} - -impl TreeViewer for T where - T: BlockchainTreeViewer - + BlockchainTreePendingStateProvider - + CanonStateSubscriptions - + BlockchainTreeEngine -{ -} From 052a730e3c7b95e3055ac414e06dcfbbf1f828d1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 13:51:51 +0100 Subject: [PATCH 021/113] chore!: rm legacy blockchain tree crate (#13726) --- Cargo.lock | 38 - Cargo.toml | 2 - crates/blockchain-tree/Cargo.toml | 89 - crates/blockchain-tree/docs/mermaid/tree.mmd | 21 - crates/blockchain-tree/src/block_buffer.rs | 494 ---- crates/blockchain-tree/src/block_indices.rs | 620 ----- crates/blockchain-tree/src/blockchain_tree.rs | 2442 ----------------- crates/blockchain-tree/src/bundle.rs | 69 - crates/blockchain-tree/src/canonical_chain.rs | 241 -- crates/blockchain-tree/src/chain.rs | 311 --- crates/blockchain-tree/src/config.rs | 91 - crates/blockchain-tree/src/externals.rs | 106 - crates/blockchain-tree/src/lib.rs | 59 - crates/blockchain-tree/src/metrics.rs | 153 -- crates/blockchain-tree/src/noop.rs | 140 - crates/blockchain-tree/src/shareable.rs | 205 -- crates/blockchain-tree/src/state.rs | 430 --- 17 files changed, 5511 deletions(-) delete mode 100644 crates/blockchain-tree/Cargo.toml delete mode 100644 crates/blockchain-tree/docs/mermaid/tree.mmd delete mode 100644 crates/blockchain-tree/src/block_buffer.rs delete mode 100644 crates/blockchain-tree/src/block_indices.rs delete mode 100644 crates/blockchain-tree/src/blockchain_tree.rs delete mode 100644 crates/blockchain-tree/src/bundle.rs delete mode 100644 crates/blockchain-tree/src/canonical_chain.rs delete mode 100644 crates/blockchain-tree/src/chain.rs delete mode 100644 crates/blockchain-tree/src/config.rs delete mode 100644 crates/blockchain-tree/src/externals.rs delete mode 100644 crates/blockchain-tree/src/lib.rs delete mode 100644 crates/blockchain-tree/src/metrics.rs delete mode 100644 crates/blockchain-tree/src/noop.rs delete mode 100644 crates/blockchain-tree/src/shareable.rs delete mode 100644 crates/blockchain-tree/src/state.rs diff --git a/Cargo.lock b/Cargo.lock index 22f123e02d1e5..0e3c08dc5eeb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6517,44 +6517,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-blockchain-tree" -version = "1.1.5" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-genesis", - "alloy-primitives", - "aquamarine", - "assert_matches", - "linked_hash_set", - "metrics", - "parking_lot", - "reth-blockchain-tree-api", - "reth-chainspec", - "reth-consensus", - "reth-db", - "reth-db-api", - "reth-evm", - "reth-evm-ethereum", - "reth-execution-errors", - "reth-execution-types", - "reth-metrics", - "reth-network", - "reth-node-types", - "reth-primitives", - "reth-provider", - "reth-revm", - "reth-stages-api", - "reth-storage-errors", - "reth-testing-utils", - "reth-trie", - "reth-trie-db", - "reth-trie-parallel", - "tokio", - "tracing", -] - [[package]] name = "reth-blockchain-tree-api" version = "1.1.5" diff --git a/Cargo.toml b/Cargo.toml index 47d802c5ac148..890b79a14198b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,6 @@ members = [ "bin/reth-bench/", "bin/reth/", "crates/blockchain-tree-api/", - "crates/blockchain-tree/", "crates/chain-state/", "crates/chainspec/", "crates/cli/cli/", @@ -304,7 +303,6 @@ op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-bench = { path = "bin/reth-bench" } -reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } reth-chain-state = { path = "crates/chain-state" } reth-chainspec = { path = "crates/chainspec", default-features = false } diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml deleted file mode 100644 index 1c42a292aea71..0000000000000 --- a/crates/blockchain-tree/Cargo.toml +++ /dev/null @@ -1,89 +0,0 @@ -[package] -name = "reth-blockchain-tree" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -# reth -reth-blockchain-tree-api.workspace = true -reth-primitives.workspace = true -reth-storage-errors.workspace = true -reth-execution-errors.workspace = true -reth-db.workspace = true -reth-db-api.workspace = true -reth-evm.workspace = true -reth-revm.workspace = true -reth-provider.workspace = true -reth-execution-types.workspace = true -reth-stages-api.workspace = true -reth-trie = { workspace = true, features = ["metrics"] } -reth-trie-db = { workspace = true, features = ["metrics"] } -reth-trie-parallel.workspace = true -reth-network.workspace = true -reth-consensus.workspace = true -reth-node-types.workspace = true - -# ethereum -alloy-consensus.workspace = true -alloy-primitives.workspace = true -alloy-eips.workspace = true - -# common -parking_lot.workspace = true -tracing.workspace = true -tokio = { workspace = true, features = ["macros", "sync"] } - -# metrics -reth-metrics = { workspace = true, features = ["common"] } -metrics.workspace = true - -# misc -aquamarine.workspace = true -linked_hash_set.workspace = true - -[dev-dependencies] -reth-chainspec.workspace = true -reth-db = { workspace = true, features = ["test-utils"] } -reth-primitives = { workspace = true, features = ["test-utils"] } -reth-provider = { workspace = true, features = ["test-utils"] } -reth-evm = { workspace = true, features = ["test-utils"] } -reth-consensus = { workspace = true, features = ["test-utils"] } -reth-testing-utils.workspace = true -reth-revm.workspace = true -reth-evm-ethereum.workspace = true -reth-execution-types.workspace = true -parking_lot.workspace = true -assert_matches.workspace = true -alloy-genesis.workspace = true -alloy-consensus.workspace = true - -[features] -test-utils = [ - "reth-chainspec/test-utils", - "reth-consensus/test-utils", - "reth-evm/test-utils", - "reth-network/test-utils", - "reth-primitives/test-utils", - "reth-revm/test-utils", - "reth-stages-api/test-utils", - "reth-db/test-utils", - "reth-db-api/test-utils", - "reth-provider/test-utils", - "reth-trie-db/test-utils", - "reth-trie/test-utils", - "reth-trie-parallel/test-utils" -] -optimism = [ - "reth-primitives/optimism", - "reth-provider/optimism", - "reth-execution-types/optimism", - "reth-db/optimism", - "reth-db-api/optimism" -] diff --git a/crates/blockchain-tree/docs/mermaid/tree.mmd b/crates/blockchain-tree/docs/mermaid/tree.mmd deleted file mode 100644 index c9b41b857b17c..0000000000000 --- a/crates/blockchain-tree/docs/mermaid/tree.mmd +++ /dev/null @@ -1,21 +0,0 @@ -flowchart BT - subgraph canonical chain - CanonState:::state - block0canon:::canon -->block1canon:::canon -->block2canon:::canon -->block3canon:::canon --> - block4canon:::canon --> block5canon:::canon - end - block5canon --> block6pending1:::pending - block5canon --> block6pending2:::pending - subgraph sidechain2 - S2State:::state - block3canon --> block4s2:::sidechain --> block5s2:::sidechain - end - subgraph sidechain1 - S1State:::state - block2canon --> block3s1:::sidechain --> block4s1:::sidechain --> block5s1:::sidechain --> - block6s1:::sidechain - end - classDef state fill:#1882C4 - classDef canon fill:#8AC926 - classDef pending fill:#FFCA3A - classDef sidechain fill:#FF595E diff --git a/crates/blockchain-tree/src/block_buffer.rs b/crates/blockchain-tree/src/block_buffer.rs deleted file mode 100644 index 994ed82cfb947..0000000000000 --- a/crates/blockchain-tree/src/block_buffer.rs +++ /dev/null @@ -1,494 +0,0 @@ -use crate::metrics::BlockBufferMetrics; -use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_network::cache::LruCache; -use reth_node_types::Block; -use reth_primitives::SealedBlockWithSenders; -use std::collections::{BTreeMap, HashMap, HashSet}; - -/// Contains the tree of pending blocks that cannot be executed due to missing parent. -/// It allows to store unconnected blocks for potential future inclusion. -/// -/// The buffer has three main functionalities: -/// * [`BlockBuffer::insert_block`] for inserting blocks inside the buffer. -/// * [`BlockBuffer::remove_block_with_children`] for connecting blocks if the parent gets received -/// and inserted. -/// * [`BlockBuffer::remove_old_blocks`] to remove old blocks that precede the finalized number. -/// -/// Note: Buffer is limited by number of blocks that it can contain and eviction of the block -/// is done by last recently used block. -#[derive(Debug)] -pub struct BlockBuffer { - /// All blocks in the buffer stored by their block hash. - pub(crate) blocks: HashMap>, - /// Map of any parent block hash (even the ones not currently in the buffer) - /// to the buffered children. - /// Allows connecting buffered blocks by parent. - pub(crate) parent_to_child: HashMap>, - /// `BTreeMap` tracking the earliest blocks by block number. - /// Used for removal of old blocks that precede finalization. - pub(crate) earliest_blocks: BTreeMap>, - /// LRU used for tracing oldest inserted blocks that are going to be - /// first in line for evicting if `max_blocks` limit is hit. - /// - /// Used as counter of amount of blocks inside buffer. - pub(crate) lru: LruCache, - /// Various metrics for the block buffer. - pub(crate) metrics: BlockBufferMetrics, -} - -impl BlockBuffer { - /// Create new buffer with max limit of blocks - pub fn new(limit: u32) -> Self { - Self { - blocks: Default::default(), - parent_to_child: Default::default(), - earliest_blocks: Default::default(), - lru: LruCache::new(limit), - metrics: Default::default(), - } - } - - /// Return reference to buffered blocks - pub const fn blocks(&self) -> &HashMap> { - &self.blocks - } - - /// Return reference to the requested block. - pub fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.blocks.get(hash) - } - - /// Return a reference to the lowest ancestor of the given block in the buffer. - pub fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - let mut current_block = self.blocks.get(hash)?; - while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { - current_block = parent; - } - Some(current_block) - } - - /// Insert a correct block inside the buffer. - pub fn insert_block(&mut self, block: SealedBlockWithSenders) { - let hash = block.hash(); - - self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); - self.earliest_blocks.entry(block.number()).or_default().insert(hash); - self.blocks.insert(hash, block); - - if let (_, Some(evicted_hash)) = self.lru.insert_and_get_evicted(hash) { - // evict the block if limit is hit - if let Some(evicted_block) = self.remove_block(&evicted_hash) { - // evict the block if limit is hit - self.remove_from_parent(evicted_block.parent_hash(), &evicted_hash); - } - } - self.metrics.blocks.set(self.blocks.len() as f64); - } - - /// Removes the given block from the buffer and also all the children of the block. - /// - /// This is used to get all the blocks that are dependent on the block that is included. - /// - /// Note: that order of returned blocks is important and the blocks with lower block number - /// in the chain will come first so that they can be executed in the correct order. - pub fn remove_block_with_children( - &mut self, - parent_hash: &BlockHash, - ) -> Vec> { - let removed = self - .remove_block(parent_hash) - .into_iter() - .chain(self.remove_children(vec![*parent_hash])) - .collect(); - self.metrics.blocks.set(self.blocks.len() as f64); - removed - } - - /// Discard all blocks that precede block number from the buffer. - pub fn remove_old_blocks(&mut self, block_number: BlockNumber) { - let mut block_hashes_to_remove = Vec::new(); - - // discard all blocks that are before the finalized number. - while let Some(entry) = self.earliest_blocks.first_entry() { - if *entry.key() > block_number { - break - } - let block_hashes = entry.remove(); - block_hashes_to_remove.extend(block_hashes); - } - - // remove from other collections. - for block_hash in &block_hashes_to_remove { - // It's fine to call - self.remove_block(block_hash); - } - - self.remove_children(block_hashes_to_remove); - self.metrics.blocks.set(self.blocks.len() as f64); - } - - /// Remove block entry - fn remove_from_earliest_blocks(&mut self, number: BlockNumber, hash: &BlockHash) { - if let Some(entry) = self.earliest_blocks.get_mut(&number) { - entry.remove(hash); - if entry.is_empty() { - self.earliest_blocks.remove(&number); - } - } - } - - /// Remove from parent child connection. This method does not remove children. - fn remove_from_parent(&mut self, parent_hash: BlockHash, hash: &BlockHash) { - // remove from parent to child connection, but only for this block parent. - if let Some(entry) = self.parent_to_child.get_mut(&parent_hash) { - entry.remove(hash); - // if set is empty remove block entry. - if entry.is_empty() { - self.parent_to_child.remove(&parent_hash); - } - } - } - - /// Removes block from inner collections. - /// This method will only remove the block if it's present inside `self.blocks`. - /// The block might be missing from other collections, the method will only ensure that it has - /// been removed. - fn remove_block(&mut self, hash: &BlockHash) -> Option> { - let block = self.blocks.remove(hash)?; - self.remove_from_earliest_blocks(block.number(), hash); - self.remove_from_parent(block.parent_hash(), hash); - self.lru.remove(hash); - Some(block) - } - - /// Remove all children and their descendants for the given blocks and return them. - fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { - // remove all parent child connection and all the child children blocks that are connected - // to the discarded parent blocks. - let mut remove_parent_children = parent_hashes; - let mut removed_blocks = Vec::new(); - while let Some(parent_hash) = remove_parent_children.pop() { - // get this child blocks children and add them to the remove list. - if let Some(parent_children) = self.parent_to_child.remove(&parent_hash) { - // remove child from buffer - for child_hash in &parent_children { - if let Some(block) = self.remove_block(child_hash) { - removed_blocks.push(block); - } - } - remove_parent_children.extend(parent_children); - } - } - removed_blocks - } -} - -#[cfg(test)] -mod tests { - use crate::BlockBuffer; - use alloy_eips::BlockNumHash; - use alloy_primitives::BlockHash; - use reth_primitives::SealedBlockWithSenders; - use reth_testing_utils::generators::{self, random_block, BlockParams, Rng}; - use std::collections::HashMap; - - /// Create random block with specified number and parent hash. - fn create_block(rng: &mut R, number: u64, parent: BlockHash) -> SealedBlockWithSenders { - let block = - random_block(rng, number, BlockParams { parent: Some(parent), ..Default::default() }); - block.seal_with_senders().unwrap() - } - - /// Assert that all buffer collections have the same data length. - fn assert_buffer_lengths(buffer: &BlockBuffer, expected: usize) { - assert_eq!(buffer.blocks.len(), expected); - assert_eq!(buffer.lru.len(), expected); - assert_eq!( - buffer.parent_to_child.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()), - expected - ); - assert_eq!( - buffer.earliest_blocks.iter().fold(0, |acc, (_, hashes)| acc + hashes.len()), - expected - ); - } - - /// Assert that the block was removed from all buffer collections. - fn assert_block_removal(buffer: &BlockBuffer, block: &SealedBlockWithSenders) { - assert!(!buffer.blocks.contains_key(&block.hash())); - assert!(buffer - .parent_to_child - .get(&block.parent_hash) - .and_then(|p| p.get(&block.hash())) - .is_none()); - assert!(buffer - .earliest_blocks - .get(&block.number) - .and_then(|hashes| hashes.get(&block.hash())) - .is_none()); - } - - #[test] - fn simple_insertion() { - let mut rng = generators::rng(); - let parent = rng.gen(); - let block1 = create_block(&mut rng, 10, parent); - let mut buffer = BlockBuffer::new(3); - - buffer.insert_block(block1.clone()); - assert_buffer_lengths(&buffer, 1); - assert_eq!(buffer.block(&block1.hash()), Some(&block1)); - } - - #[test] - fn take_entire_chain_of_children() { - let mut rng = generators::rng(); - - let main_parent_hash = rng.gen(); - let block1 = create_block(&mut rng, 10, main_parent_hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 12, block2.hash()); - let parent4 = rng.gen(); - let block4 = create_block(&mut rng, 14, parent4); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block3.clone()); - buffer.insert_block(block4.clone()); - - assert_buffer_lengths(&buffer, 4); - assert_eq!(buffer.block(&block4.hash()), Some(&block4)); - assert_eq!(buffer.block(&block2.hash()), Some(&block2)); - assert_eq!(buffer.block(&main_parent_hash), None); - - assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4)); - assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1)); - assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); - assert_eq!( - buffer.remove_block_with_children(&main_parent_hash), - vec![block1, block2, block3] - ); - assert_buffer_lengths(&buffer, 1); - } - - #[test] - fn take_all_multi_level_children() { - let mut rng = generators::rng(); - - let main_parent_hash = rng.gen(); - let block1 = create_block(&mut rng, 10, main_parent_hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 11, block1.hash()); - let block4 = create_block(&mut rng, 12, block2.hash()); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block3.clone()); - buffer.insert_block(block4.clone()); - - assert_buffer_lengths(&buffer, 4); - assert_eq!( - buffer - .remove_block_with_children(&main_parent_hash) - .into_iter() - .map(|b| (b.hash(), b)) - .collect::>(), - HashMap::from([ - (block1.hash(), block1), - (block2.hash(), block2), - (block3.hash(), block3), - (block4.hash(), block4) - ]) - ); - assert_buffer_lengths(&buffer, 0); - } - - #[test] - fn take_block_with_children() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 11, block1.hash()); - let block4 = create_block(&mut rng, 12, block2.hash()); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block3.clone()); - buffer.insert_block(block4.clone()); - - assert_buffer_lengths(&buffer, 4); - assert_eq!( - buffer - .remove_block_with_children(&block1.hash()) - .into_iter() - .map(|b| (b.hash(), b)) - .collect::>(), - HashMap::from([ - (block1.hash(), block1), - (block2.hash(), block2), - (block3.hash(), block3), - (block4.hash(), block4) - ]) - ); - assert_buffer_lengths(&buffer, 0); - } - - #[test] - fn remove_chain_of_children() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 12, block2.hash()); - let parent4 = rng.gen(); - let block4 = create_block(&mut rng, 14, parent4); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2); - buffer.insert_block(block3); - buffer.insert_block(block4); - - assert_buffer_lengths(&buffer, 4); - buffer.remove_old_blocks(block1.number); - assert_buffer_lengths(&buffer, 1); - } - - #[test] - fn remove_all_multi_level_children() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 11, block1.hash()); - let block4 = create_block(&mut rng, 12, block2.hash()); - - let mut buffer = BlockBuffer::new(5); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2); - buffer.insert_block(block3); - buffer.insert_block(block4); - - assert_buffer_lengths(&buffer, 4); - buffer.remove_old_blocks(block1.number); - assert_buffer_lengths(&buffer, 0); - } - - #[test] - fn remove_multi_chains() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block1a = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block2a = create_block(&mut rng, 11, block1.hash()); - let random_parent1 = rng.gen(); - let random_block1 = create_block(&mut rng, 10, random_parent1); - let random_parent2 = rng.gen(); - let random_block2 = create_block(&mut rng, 11, random_parent2); - let random_parent3 = rng.gen(); - let random_block3 = create_block(&mut rng, 12, random_parent3); - - let mut buffer = BlockBuffer::new(10); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block1a.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block2a.clone()); - buffer.insert_block(random_block1.clone()); - buffer.insert_block(random_block2.clone()); - buffer.insert_block(random_block3.clone()); - - // check that random blocks are their own ancestor, and that chains have proper ancestors - assert_eq!(buffer.lowest_ancestor(&random_block1.hash()), Some(&random_block1)); - assert_eq!(buffer.lowest_ancestor(&random_block2.hash()), Some(&random_block2)); - assert_eq!(buffer.lowest_ancestor(&random_block3.hash()), Some(&random_block3)); - - // descendants have ancestors - assert_eq!(buffer.lowest_ancestor(&block2a.hash()), Some(&block1)); - assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1)); - - // roots are themselves - assert_eq!(buffer.lowest_ancestor(&block1a.hash()), Some(&block1a)); - assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); - - assert_buffer_lengths(&buffer, 7); - buffer.remove_old_blocks(10); - assert_buffer_lengths(&buffer, 2); - } - - #[test] - fn evict_with_gap() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 12, block2.hash()); - let parent4 = rng.gen(); - let block4 = create_block(&mut rng, 13, parent4); - - let mut buffer = BlockBuffer::new(3); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2.clone()); - buffer.insert_block(block3.clone()); - - // pre-eviction block1 is the root - assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block1)); - assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block1)); - assert_eq!(buffer.lowest_ancestor(&block1.hash()), Some(&block1)); - - buffer.insert_block(block4.clone()); - - assert_eq!(buffer.lowest_ancestor(&block4.hash()), Some(&block4)); - - // block1 gets evicted - assert_block_removal(&buffer, &block1); - - // check lowest ancestor results post eviction - assert_eq!(buffer.lowest_ancestor(&block3.hash()), Some(&block2)); - assert_eq!(buffer.lowest_ancestor(&block2.hash()), Some(&block2)); - assert_eq!(buffer.lowest_ancestor(&block1.hash()), None); - - assert_buffer_lengths(&buffer, 3); - } - - #[test] - fn simple_eviction() { - let mut rng = generators::rng(); - - let main_parent = BlockNumHash::new(9, rng.gen()); - let block1 = create_block(&mut rng, 10, main_parent.hash); - let block2 = create_block(&mut rng, 11, block1.hash()); - let block3 = create_block(&mut rng, 12, block2.hash()); - let parent4 = rng.gen(); - let block4 = create_block(&mut rng, 13, parent4); - - let mut buffer = BlockBuffer::new(3); - - buffer.insert_block(block1.clone()); - buffer.insert_block(block2); - buffer.insert_block(block3); - buffer.insert_block(block4); - - // block3 gets evicted - assert_block_removal(&buffer, &block1); - - assert_buffer_lengths(&buffer, 3); - } -} diff --git a/crates/blockchain-tree/src/block_indices.rs b/crates/blockchain-tree/src/block_indices.rs deleted file mode 100644 index 26a676f4d36ce..0000000000000 --- a/crates/blockchain-tree/src/block_indices.rs +++ /dev/null @@ -1,620 +0,0 @@ -//! Implementation of [`BlockIndices`] related to [`super::BlockchainTree`] - -use super::state::SidechainId; -use crate::canonical_chain::CanonicalChain; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use linked_hash_set::LinkedHashSet; -use reth_execution_types::Chain; -use reth_primitives::SealedBlockWithSenders; -use std::collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}; - -/// Internal indices of the blocks and chains. -/// -/// This is main connection between blocks, chains and canonical chain. -/// -/// It contains a list of canonical block hashes, forks to child blocks, and a mapping of block hash -/// to chain ID. -#[derive(Debug, Clone)] -pub struct BlockIndices { - /// Last finalized block. - last_finalized_block: BlockNumber, - /// Non-finalized canonical chain. Contains N number (depends on `finalization_depth`) of - /// blocks. These blocks are found in `fork_to_child` but not inside `blocks_to_chain` or - /// `number_to_block` as those are sidechain specific indices. - canonical_chain: CanonicalChain, - /// Index needed when discarding the chain, so we can remove connected chains from tree. - /// - /// This maintains insertion order for all child blocks, so - /// [`BlockIndices::pending_block_num_hash`] returns always the same block: the first child - /// block we inserted. - /// - /// NOTE: It contains just blocks that are forks as a key and not all blocks. - fork_to_child: HashMap>, - /// Utility index for Block number to block hash(s). - /// - /// This maps all blocks with same block number to their hash. - /// - /// Can be used for RPC fetch block(s) in chain by its number. - /// - /// Note: This is a bijection: at all times `blocks_to_chain` and this map contain the block - /// hashes. - block_number_to_block_hashes: BTreeMap>, - /// Block hashes to the sidechain IDs they belong to. - blocks_to_chain: HashMap, -} - -impl BlockIndices { - /// Create new block indices structure - pub fn new( - last_finalized_block: BlockNumber, - canonical_chain: BTreeMap, - ) -> Self { - Self { - last_finalized_block, - canonical_chain: CanonicalChain::new(canonical_chain), - fork_to_child: Default::default(), - blocks_to_chain: Default::default(), - block_number_to_block_hashes: Default::default(), - } - } - - /// Return fork to child indices - pub const fn fork_to_child(&self) -> &HashMap> { - &self.fork_to_child - } - - /// Return block to sidechain id - #[allow(dead_code)] - pub(crate) const fn blocks_to_chain(&self) -> &HashMap { - &self.blocks_to_chain - } - - /// Returns the hash and number of the pending block. - /// - /// It is possible that multiple child blocks for the canonical tip exist. - /// This will always return the _first_ child we recorded for the canonical tip. - pub(crate) fn pending_block_num_hash(&self) -> Option { - let canonical_tip = self.canonical_tip(); - let hash = self.fork_to_child.get(&canonical_tip.hash)?.front().copied()?; - Some(BlockNumHash { number: canonical_tip.number + 1, hash }) - } - - /// Returns all pending block hashes. - /// - /// Pending blocks are considered blocks that are extending the canonical tip by one block - /// number and have their parent hash set to the canonical tip. - pub fn pending_blocks(&self) -> (BlockNumber, Vec) { - let canonical_tip = self.canonical_tip(); - let pending_blocks = self - .fork_to_child - .get(&canonical_tip.hash) - .cloned() - .unwrap_or_default() - .into_iter() - .collect(); - (canonical_tip.number + 1, pending_blocks) - } - - /// Last finalized block - pub const fn last_finalized_block(&self) -> BlockNumber { - self.last_finalized_block - } - - /// Insert non fork block. - pub(crate) fn insert_non_fork_block( - &mut self, - block_number: BlockNumber, - block_hash: BlockHash, - chain_id: SidechainId, - ) { - self.block_number_to_block_hashes.entry(block_number).or_default().insert(block_hash); - self.blocks_to_chain.insert(block_hash, chain_id); - } - - /// Insert block to chain and fork child indices of the new chain - pub(crate) fn insert_chain(&mut self, chain_id: SidechainId, chain: &Chain) { - for (number, block) in chain.blocks() { - // add block -> chain_id index - self.blocks_to_chain.insert(block.hash(), chain_id); - // add number -> block - self.block_number_to_block_hashes.entry(*number).or_default().insert(block.hash()); - } - let first = chain.first(); - // add parent block -> block index - self.fork_to_child.entry(first.parent_hash).or_default().insert_if_absent(first.hash()); - } - - /// Get the [`SidechainId`] for the given block hash if it exists. - pub(crate) fn get_side_chain_id(&self, block: &BlockHash) -> Option { - self.blocks_to_chain.get(block).copied() - } - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them and return all chains that needs to be - /// removed. - pub(crate) fn update_block_hashes( - &mut self, - hashes: BTreeMap, - ) -> (BTreeSet, Vec) { - // set new canonical hashes. - self.canonical_chain.replace(hashes.clone()); - - let mut new_hashes = hashes.into_iter(); - let mut old_hashes = self.canonical_chain().clone().into_iter(); - - let mut removed = Vec::new(); - let mut added = Vec::new(); - - let mut new_hash = new_hashes.next(); - let mut old_hash = old_hashes.next(); - - loop { - let Some(old_block_value) = old_hash else { - // end of old_hashes canonical chain. New chain has more blocks than old chain. - while let Some(new) = new_hash { - // add new blocks to added list. - added.push(new.into()); - new_hash = new_hashes.next(); - } - break - }; - let Some(new_block_value) = new_hash else { - // Old canonical chain had more block than new chain. - // remove all present block. - // this is mostly not going to happen as reorg should make new chain in Tree. - while let Some(rem) = old_hash { - removed.push(rem); - old_hash = old_hashes.next(); - } - break - }; - // compare old and new canonical block number - match new_block_value.0.cmp(&old_block_value.0) { - std::cmp::Ordering::Less => { - // new chain has more past blocks than old chain - added.push(new_block_value.into()); - new_hash = new_hashes.next(); - } - std::cmp::Ordering::Equal => { - if new_block_value.1 != old_block_value.1 { - // remove block hash as it is different - removed.push(old_block_value); - added.push(new_block_value.into()) - } - new_hash = new_hashes.next(); - old_hash = old_hashes.next(); - } - std::cmp::Ordering::Greater => { - // old chain has more past blocks than new chain - removed.push(old_block_value); - old_hash = old_hashes.next() - } - } - } - - // remove children of removed blocks - ( - removed.into_iter().fold(BTreeSet::new(), |mut fold, (number, hash)| { - fold.extend(self.remove_block(number, hash)); - fold - }), - added, - ) - } - - /// Remove chain from indices and return dependent chains that need to be removed. - /// Does the cleaning of the tree and removing blocks from the chain. - pub(crate) fn remove_chain(&mut self, chain: &Chain) -> BTreeSet { - chain - .blocks() - .iter() - .flat_map(|(block_number, block)| { - let block_hash = block.hash(); - self.remove_block(*block_number, block_hash) - }) - .collect() - } - - /// Remove Blocks from indices. - fn remove_block( - &mut self, - block_number: BlockNumber, - block_hash: BlockHash, - ) -> BTreeSet { - // rm number -> block - if let btree_map::Entry::Occupied(mut entry) = - self.block_number_to_block_hashes.entry(block_number) - { - let set = entry.get_mut(); - set.remove(&block_hash); - // remove set if empty - if set.is_empty() { - entry.remove(); - } - } - - // rm block -> chain_id - self.blocks_to_chain.remove(&block_hash); - - // rm fork -> child - let removed_fork = self.fork_to_child.remove(&block_hash); - removed_fork - .map(|fork_blocks| { - fork_blocks - .into_iter() - .filter_map(|fork_child| self.blocks_to_chain.remove(&fork_child)) - .collect() - }) - .unwrap_or_default() - } - - /// Remove all blocks from canonical list and insert new blocks to it. - /// - /// It is assumed that blocks are interconnected and that they connect to canonical chain - pub fn canonicalize_blocks(&mut self, blocks: &BTreeMap) { - if blocks.is_empty() { - return - } - - // Remove all blocks from canonical chain - let first_number = *blocks.first_key_value().unwrap().0; - - // this will remove all blocks numbers that are going to be replaced. - self.canonical_chain.retain(|&number, _| number < first_number); - - // remove them from block to chain_id index - blocks.iter().map(|(_, b)| (b.number, b.hash(), b.parent_hash)).for_each( - |(number, hash, parent_hash)| { - // rm block -> chain_id - self.blocks_to_chain.remove(&hash); - - // rm number -> block - if let btree_map::Entry::Occupied(mut entry) = - self.block_number_to_block_hashes.entry(number) - { - let set = entry.get_mut(); - set.remove(&hash); - // remove set if empty - if set.is_empty() { - entry.remove(); - } - } - // rm fork block -> hash - if let hash_map::Entry::Occupied(mut entry) = self.fork_to_child.entry(parent_hash) - { - let set = entry.get_mut(); - set.remove(&hash); - // remove set if empty - if set.is_empty() { - entry.remove(); - } - } - }, - ); - - // insert new canonical - self.canonical_chain.extend(blocks.iter().map(|(number, block)| (*number, block.hash()))) - } - - /// this is function that is going to remove N number of last canonical hashes. - /// - /// NOTE: This is not safe standalone, as it will not disconnect - /// blocks that depend on unwinded canonical chain. And should be - /// used when canonical chain is reinserted inside Tree. - pub(crate) fn unwind_canonical_chain(&mut self, unwind_to: BlockNumber) { - // this will remove all blocks numbers that are going to be replaced. - self.canonical_chain.retain(|num, _| *num <= unwind_to); - } - - /// Used for finalization of block. - /// - /// Return list of chains for removal that depend on finalized canonical chain. - pub(crate) fn finalize_canonical_blocks( - &mut self, - finalized_block: BlockNumber, - num_of_additional_canonical_hashes_to_retain: u64, - ) -> BTreeSet { - // get finalized chains. blocks between [self.last_finalized,finalized_block). - // Dont remove finalized_block, as sidechain can point to it. - let finalized_blocks: Vec = self - .canonical_chain - .iter() - .filter(|(number, _)| *number >= self.last_finalized_block && *number < finalized_block) - .map(|(_, hash)| hash) - .collect(); - - // remove unneeded canonical hashes. - let remove_until = - finalized_block.saturating_sub(num_of_additional_canonical_hashes_to_retain); - self.canonical_chain.retain(|&number, _| number >= remove_until); - - let mut lose_chains = BTreeSet::new(); - - for block_hash in finalized_blocks { - // there is a fork block. - if let Some(fork_blocks) = self.fork_to_child.remove(&block_hash) { - lose_chains = fork_blocks.into_iter().fold(lose_chains, |mut fold, fork_child| { - if let Some(lose_chain) = self.blocks_to_chain.remove(&fork_child) { - fold.insert(lose_chain); - } - fold - }); - } - } - - // set last finalized block. - self.last_finalized_block = finalized_block; - - lose_chains - } - - /// Returns the block hash of the canonical block with the given number. - #[inline] - pub fn canonical_hash(&self, block_number: &BlockNumber) -> Option { - self.canonical_chain.canonical_hash(block_number) - } - - /// Returns the block number of the canonical block with the given hash. - #[inline] - pub fn canonical_number(&self, block_hash: &BlockHash) -> Option { - self.canonical_chain.canonical_number(block_hash) - } - - /// get canonical tip - #[inline] - pub fn canonical_tip(&self) -> BlockNumHash { - self.canonical_chain.tip() - } - - /// Canonical chain needed for execution of EVM. It should contain last 256 block hashes. - #[inline] - pub(crate) const fn canonical_chain(&self) -> &CanonicalChain { - &self.canonical_chain - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::Header; - use alloy_primitives::B256; - use reth_primitives::{SealedBlock, SealedHeader}; - - #[test] - fn pending_block_num_hash_returns_none_if_no_fork() { - // Create a new canonical chain with a single block (represented by its number and hash). - let canonical_chain = BTreeMap::from([(0, B256::from_slice(&[1; 32]))]); - - let block_indices = BlockIndices::new(0, canonical_chain); - - // No fork to child blocks, so there is no pending block. - assert_eq!(block_indices.pending_block_num_hash(), None); - } - - #[test] - fn pending_block_num_hash_works() { - // Create a canonical chain with multiple blocks at heights 1, 2, and 3. - let canonical_chain = BTreeMap::from([ - (1, B256::from_slice(&[1; 32])), - (2, B256::from_slice(&[2; 32])), - (3, B256::from_slice(&[3; 32])), - ]); - - let mut block_indices = BlockIndices::new(3, canonical_chain); - - // Define the hash of the parent block (the block at height 3 in the canonical chain). - let parent_hash = B256::from_slice(&[3; 32]); - - // Define the hashes of two child blocks that extend the canonical chain. - let child_hash_1 = B256::from_slice(&[2; 32]); - let child_hash_2 = B256::from_slice(&[3; 32]); - - // Create a set to store both child block hashes. - let mut child_set = LinkedHashSet::new(); - child_set.insert(child_hash_1); - child_set.insert(child_hash_2); - - // Associate the parent block hash with its children in the fork_to_child mapping. - block_indices.fork_to_child.insert(parent_hash, child_set); - - // Pending block should be the first child block. - assert_eq!( - block_indices.pending_block_num_hash(), - Some(BlockNumHash { number: 4, hash: child_hash_1 }) - ); - } - - #[test] - fn pending_blocks_returns_empty_if_no_fork() { - // Create a canonical chain with a single block at height 10. - let canonical_chain = BTreeMap::from([(10, B256::from_slice(&[1; 32]))]); - let block_indices = BlockIndices::new(0, canonical_chain); - - // No child blocks are associated with the canonical tip. - assert_eq!(block_indices.pending_blocks(), (11, Vec::new())); - } - - #[test] - fn pending_blocks_returns_multiple_children() { - // Define the hash of the parent block (the block at height 5 in the canonical chain). - let parent_hash = B256::from_slice(&[3; 32]); - - // Create a canonical chain with a block at height 5. - let canonical_chain = BTreeMap::from([(5, parent_hash)]); - let mut block_indices = BlockIndices::new(0, canonical_chain); - - // Define the hashes of two child blocks. - let child_hash_1 = B256::from_slice(&[4; 32]); - let child_hash_2 = B256::from_slice(&[5; 32]); - - // Create a set to store both child block hashes. - let mut child_set = LinkedHashSet::new(); - child_set.insert(child_hash_1); - child_set.insert(child_hash_2); - - // Associate the parent block hash with its children. - block_indices.fork_to_child.insert(parent_hash, child_set); - - // Pending blocks should be the two child blocks. - assert_eq!(block_indices.pending_blocks(), (6, vec![child_hash_1, child_hash_2])); - } - - #[test] - fn pending_blocks_with_multiple_forked_chains() { - // Define hashes for parent blocks and child blocks. - let parent_hash_1 = B256::from_slice(&[6; 32]); - let parent_hash_2 = B256::from_slice(&[7; 32]); - - // Create a canonical chain with blocks at heights 1 and 2. - let canonical_chain = BTreeMap::from([(1, parent_hash_1), (2, parent_hash_2)]); - - let mut block_indices = BlockIndices::new(2, canonical_chain); - - // Define hashes for child blocks. - let child_hash_1 = B256::from_slice(&[8; 32]); - let child_hash_2 = B256::from_slice(&[9; 32]); - - // Create sets to store child blocks for each parent block. - let mut child_set_1 = LinkedHashSet::new(); - let mut child_set_2 = LinkedHashSet::new(); - child_set_1.insert(child_hash_1); - child_set_2.insert(child_hash_2); - - // Associate parent block hashes with their child blocks. - block_indices.fork_to_child.insert(parent_hash_1, child_set_1); - block_indices.fork_to_child.insert(parent_hash_2, child_set_2); - - // Check that the pending blocks are only those extending the canonical tip. - assert_eq!(block_indices.pending_blocks(), (3, vec![child_hash_2])); - } - - #[test] - fn insert_non_fork_block_adds_block_correctly() { - // Create a new BlockIndices instance with an empty state. - let mut block_indices = BlockIndices::new(0, BTreeMap::new()); - - // Define test parameters. - let block_number = 1; - let block_hash = B256::from_slice(&[1; 32]); - let chain_id = SidechainId::from(42); - - // Insert the block into the BlockIndices instance. - block_indices.insert_non_fork_block(block_number, block_hash, chain_id); - - // Check that the block number to block hashes mapping includes the new block hash. - assert_eq!( - block_indices.block_number_to_block_hashes.get(&block_number), - Some(&HashSet::from([block_hash])) - ); - - // Check that the block hash to chain ID mapping includes the new entry. - assert_eq!(block_indices.blocks_to_chain.get(&block_hash), Some(&chain_id)); - } - - #[test] - fn insert_non_fork_block_combined_tests() { - // Create a new BlockIndices instance with an empty state. - let mut block_indices = BlockIndices::new(0, BTreeMap::new()); - - // Define test parameters. - let block_number_1 = 2; - let block_hash_1 = B256::from_slice(&[1; 32]); - let block_hash_2 = B256::from_slice(&[2; 32]); - let chain_id_1 = SidechainId::from(84); - - let block_number_2 = 4; - let block_hash_3 = B256::from_slice(&[3; 32]); - let chain_id_2 = SidechainId::from(200); - - // Insert multiple hashes for the same block number. - block_indices.insert_non_fork_block(block_number_1, block_hash_1, chain_id_1); - block_indices.insert_non_fork_block(block_number_1, block_hash_2, chain_id_1); - - // Insert blocks with different numbers. - block_indices.insert_non_fork_block(block_number_2, block_hash_3, chain_id_2); - - // Block number 1 should have two block hashes associated with it. - let mut expected_hashes_for_block_1 = HashSet::default(); - expected_hashes_for_block_1.insert(block_hash_1); - expected_hashes_for_block_1.insert(block_hash_2); - assert_eq!( - block_indices.block_number_to_block_hashes.get(&block_number_1), - Some(&expected_hashes_for_block_1) - ); - - // Check that the block hashes for block_number_1 are associated with the correct chain ID. - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_1), Some(&chain_id_1)); - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id_1)); - - // Block number 2 should have a single block hash associated with it. - assert_eq!( - block_indices.block_number_to_block_hashes.get(&block_number_2), - Some(&HashSet::from([block_hash_3])) - ); - - // Block hash 3 should be associated with the correct chain ID. - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_3), Some(&chain_id_2)); - } - - #[test] - fn insert_chain_validates_insertion() { - // Create a new BlockIndices instance with an empty state. - let mut block_indices = BlockIndices::new(0, BTreeMap::new()); - - // Define test parameters. - let chain_id = SidechainId::from(42); - - // Define some example blocks and their hashes. - let block_hash_1 = B256::from_slice(&[1; 32]); - let block_hash_2 = B256::from_slice(&[2; 32]); - let parent_hash = B256::from_slice(&[0; 32]); - - // Define blocks with their numbers and parent hashes. - let block_1 = SealedBlockWithSenders { - block: SealedBlock::new( - SealedHeader::new( - Header { parent_hash, number: 1, ..Default::default() }, - block_hash_1, - ), - Default::default(), - ), - ..Default::default() - }; - let block_2 = SealedBlockWithSenders { - block: SealedBlock::new( - SealedHeader::new( - Header { parent_hash: block_hash_1, number: 2, ..Default::default() }, - block_hash_2, - ), - Default::default(), - ), - ..Default::default() - }; - - // Define a chain containing the blocks. - let chain = Chain::new(vec![block_1, block_2], Default::default(), Default::default()); - - // Insert the chain into the BlockIndices. - block_indices.insert_chain(chain_id, &chain); - - // Check that the blocks are correctly mapped to the chain ID. - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_1), Some(&chain_id)); - assert_eq!(block_indices.blocks_to_chain.get(&block_hash_2), Some(&chain_id)); - - // Check that block numbers map to their respective hashes. - let mut expected_hashes_1 = HashSet::default(); - expected_hashes_1.insert(block_hash_1); - assert_eq!(block_indices.block_number_to_block_hashes.get(&1), Some(&expected_hashes_1)); - - let mut expected_hashes_2 = HashSet::default(); - expected_hashes_2.insert(block_hash_2); - assert_eq!(block_indices.block_number_to_block_hashes.get(&2), Some(&expected_hashes_2)); - - // Check that the fork_to_child mapping contains the correct parent-child relationship. - // We take the first block of the chain. - let mut expected_children = LinkedHashSet::new(); - expected_children.insert(block_hash_1); - assert_eq!(block_indices.fork_to_child.get(&parent_hash), Some(&expected_children)); - } -} diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs deleted file mode 100644 index 465f779e60bc6..0000000000000 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ /dev/null @@ -1,2442 +0,0 @@ -//! Implementation of [`BlockchainTree`] - -use crate::{ - externals::TreeNodeTypes, - metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, - state::{SidechainId, TreeState}, - AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, -}; -use alloy_eips::{BlockNumHash, ForkBlock}; -use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, -}; -use reth_consensus::{Consensus, ConsensusError}; -use reth_evm::execute::BlockExecutorProvider; -use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::{ - EthereumHardfork, GotExpected, Hardforks, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StaticFileSegment, -}; -use reth_provider::{ - BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, - CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, - ChainSplitTarget, DBProvider, DisplayBlocksChain, HashedPostStateProvider, HeaderProvider, - ProviderError, StaticFileProviderFactory, StorageLocation, -}; -use reth_stages_api::{MetricEvent, MetricEventsSender}; -use reth_storage_errors::provider::{ProviderResult, RootMismatch}; -use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, StateRoot}; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; -use std::{ - collections::{btree_map::Entry, BTreeMap, HashSet}, - sync::Arc, -}; -use tracing::{debug, error, info, instrument, trace, warn}; - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// A Tree of chains. -/// -/// The flowchart represents all the states a block can have inside the tree. -/// -/// - Green blocks belong to the canonical chain and are saved inside the database. -/// - Pending blocks and sidechains are found in-memory inside [`BlockchainTree`]. -/// -/// Both pending chains and sidechains have the same mechanisms, the only difference is when they -/// get committed to the database. -/// -/// For pending, it is an append operation, but for sidechains they need to move the current -/// canonical blocks to the tree (by removing them from the database), and commit the sidechain -/// blocks to the database to become the canonical chain (reorg). -/// -/// `include_mmd!("docs/mermaid/tree.mmd`") -/// -/// # Main functions -/// * [`BlockchainTree::insert_block`]: Connect a block to a chain, execute it, and if valid, insert -/// the block into the tree. -/// * [`BlockchainTree::finalize_block`]: Remove chains that branch off of the now finalized block. -/// * [`BlockchainTree::make_canonical`]: Check if we have the hash of a block that is the current -/// canonical head and commit it to db. -#[derive(Debug)] -pub struct BlockchainTree { - /// The state of the tree - /// - /// Tracks all the chains, the block indices, and the block buffer. - state: TreeState, - /// External components (the database, consensus engine etc.) - externals: TreeExternals, - /// Tree configuration - config: BlockchainTreeConfig, - /// Broadcast channel for canon state changes notifications. - canon_state_notification_sender: CanonStateNotificationSender, - /// Metrics for sync stages. - sync_metrics_tx: Option, - /// Metrics for the blockchain tree. - metrics: TreeMetrics, -} - -impl BlockchainTree { - /// Subscribe to new blocks events. - /// - /// Note: Only canonical blocks are emitted by the tree. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { - self.canon_state_notification_sender.subscribe() - } - - /// Returns a clone of the sender for the canonical state notifications. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.canon_state_notification_sender.clone() - } -} - -impl BlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - /// Builds the blockchain tree for the node. - /// - /// This method configures the blockchain tree, which is a critical component of the node, - /// responsible for managing the blockchain state, including blocks, transactions, and receipts. - /// It integrates with the consensus mechanism and the EVM for executing transactions. - /// - /// # Parameters - /// - `externals`: External components required by the blockchain tree: - /// - `provider_factory`: A factory for creating various blockchain-related providers, such - /// as for accessing the database or static files. - /// - `consensus`: The consensus configuration, which defines how the node reaches agreement - /// on the blockchain state with other nodes. - /// - `evm_config`: The EVM (Ethereum Virtual Machine) configuration, which affects how - /// smart contracts and transactions are executed. Proper validation of this configuration - /// is crucial for the correct execution of transactions. - /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect - /// its structure or performance. - pub fn new( - externals: TreeExternals, - config: BlockchainTreeConfig, - ) -> ProviderResult { - let max_reorg_depth = config.max_reorg_depth() as usize; - // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg - // depth at least N blocks must be sent at once. - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(max_reorg_depth * 2); - - let last_canonical_hashes = - externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?; - - // If we haven't written the finalized block, assume it's zero - let last_finalized_block_number = - externals.fetch_latest_finalized_block_number()?.unwrap_or_default(); - - Ok(Self { - externals, - state: TreeState::new( - last_finalized_block_number, - last_canonical_hashes, - config.max_unconnected_blocks(), - ), - config, - canon_state_notification_sender, - sync_metrics_tx: None, - metrics: Default::default(), - }) - } - - /// Replaces the canon state notification sender. - /// - /// Caution: this will close any existing subscriptions to the previous sender. - #[doc(hidden)] - pub fn with_canon_state_notification_sender( - mut self, - canon_state_notification_sender: CanonStateNotificationSender, - ) -> Self { - self.canon_state_notification_sender = canon_state_notification_sender; - self - } - - /// Set the sync metric events sender. - /// - /// A transmitter for sending synchronization metrics. This is used for monitoring the node's - /// synchronization process with the blockchain network. - pub fn with_sync_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { - self.sync_metrics_tx = Some(metrics_tx); - self - } - - /// Check if the block is known to blockchain tree or database and return its status. - /// - /// Function will check: - /// * if block is inside database returns [`BlockStatus::Valid`]. - /// * if block is inside buffer returns [`BlockStatus::Disconnected`]. - /// * if block is part of the canonical returns [`BlockStatus::Valid`]. - /// - /// Returns an error if - /// - an error occurred while reading from the database. - /// - the block is already finalized - pub(crate) fn is_block_known( - &self, - block: BlockNumHash, - ) -> Result, InsertBlockErrorKind> { - // check if block is canonical - if self.is_block_hash_canonical(&block.hash)? { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); - } - - let last_finalized_block = self.block_indices().last_finalized_block(); - // check db if block is finalized. - if block.number <= last_finalized_block { - // check if block is inside database - if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); - } - - return Err(BlockchainTreeError::PendingBlockIsFinalized { - last_finalized: last_finalized_block, - } - .into()) - } - - // is block inside chain - if let Some(attachment) = self.is_block_inside_sidechain(&block) { - return Ok(Some(BlockStatus::Valid(attachment))); - } - - // check if block is disconnected - if let Some(block) = self.state.buffered_blocks.block(&block.hash) { - return Ok(Some(BlockStatus::Disconnected { - head: self.state.block_indices.canonical_tip(), - missing_ancestor: block.parent_num_hash(), - })) - } - - Ok(None) - } - - /// Expose internal indices of the `BlockchainTree`. - #[inline] - pub const fn block_indices(&self) -> &BlockIndices { - self.state.block_indices() - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub fn sidechain_block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { - self.state.block_by_hash(block_hash) - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub fn block_with_senders_by_hash( - &self, - block_hash: BlockHash, - ) -> Option<&SealedBlockWithSenders> { - self.state.block_with_senders_by_hash(block_hash) - } - - /// Returns the block's receipts with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - self.state.receipts_by_block_hash(block_hash) - } - - /// Returns the block that's considered the `Pending` block, if it exists. - pub fn pending_block(&self) -> Option<&SealedBlock> { - let b = self.block_indices().pending_block_num_hash()?; - self.sidechain_block_by_hash(b.hash) - } - - /// Return items needed to execute on the pending state. - /// This includes: - /// * `BlockHash` of canonical block that chain connects to. Needed for creating database - /// provider for the rest of the state. - /// * `BundleState` changes that happened at the asked `block_hash` - /// * `BTreeMap` list of past pending and canonical hashes, That are - /// needed for evm `BLOCKHASH` opcode. - /// Return none if: - /// * block unknown. - /// * `chain_id` not present in state. - /// * there are no parent hashes stored. - pub fn post_state_data(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Searching for post state data"); - - let canonical_chain = self.state.block_indices.canonical_chain(); - - // if it is part of the chain - if let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) { - trace!(target: "blockchain_tree", ?block_hash, "Constructing post state data based on non-canonical chain"); - // get block state - let Some(chain) = self.state.chains.get(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain with ID not present"); - return None; - }; - let block_number = chain.block_number(block_hash)?; - let execution_outcome = chain.execution_outcome_at_block(block_number)?; - - // get parent hashes - let mut parent_block_hashes = self.all_chain_hashes(chain_id); - let Some((first_pending_block_number, _)) = parent_block_hashes.first_key_value() - else { - debug!(target: "blockchain_tree", ?chain_id, "No block hashes stored"); - return None; - }; - let canonical_chain = canonical_chain - .iter() - .filter(|&(key, _)| &key < first_pending_block_number) - .collect::>(); - parent_block_hashes.extend(canonical_chain); - - // get canonical fork. - let canonical_fork = self.canonical_fork(chain_id)?; - return Some(ExecutionData { execution_outcome, parent_block_hashes, canonical_fork }); - } - - // check if there is canonical block - if let Some(canonical_number) = canonical_chain.canonical_number(&block_hash) { - trace!(target: "blockchain_tree", %block_hash, "Constructing post state data based on canonical chain"); - return Some(ExecutionData { - canonical_fork: ForkBlock { number: canonical_number, hash: block_hash }, - execution_outcome: ExecutionOutcome::default(), - parent_block_hashes: canonical_chain.inner().clone(), - }); - } - - None - } - - /// Try inserting a validated [Self::validate_block] block inside the tree. - /// - /// If the block's parent block is unknown, this returns [`BlockStatus::Disconnected`] and the - /// block will be buffered until the parent block is inserted and then attached to sidechain - #[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()), target = "blockchain_tree", ret)] - fn try_insert_validated_block( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - debug_assert!(self.validate_block(&block).is_ok(), "Block must be validated"); - - let parent = block.parent_num_hash(); - - // check if block parent can be found in any side chain. - if let Some(chain_id) = self.block_indices().get_side_chain_id(&parent.hash) { - // found parent in side tree, try to insert there - return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind); - } - - // if not found, check if the parent can be found inside canonical chain. - if self.is_block_hash_canonical(&parent.hash)? { - return self.try_append_canonical_chain(block.clone(), block_validation_kind); - } - - // this is another check to ensure that if the block points to a canonical block its block - // is valid - if let Some(canonical_parent_number) = - self.block_indices().canonical_number(&block.parent_hash) - { - // we found the parent block in canonical chain - if canonical_parent_number != parent.number { - return Err(ConsensusError::ParentBlockNumberMismatch { - parent_block_number: canonical_parent_number, - block_number: block.number, - } - .into()) - } - } - - // if there is a parent inside the buffer, validate against it. - if let Some(buffered_parent) = self.state.buffered_blocks.block(&parent.hash) { - self.externals.consensus.validate_header_against_parent(&block, buffered_parent)?; - } - - // insert block inside unconnected block buffer. Delaying its execution. - self.state.buffered_blocks.insert_block(block.clone()); - - let block_hash = block.hash(); - // find the lowest ancestor of the block in the buffer to return as the missing parent - // this shouldn't return None because that only happens if the block was evicted, which - // shouldn't happen right after insertion - let lowest_ancestor = self - .state - .buffered_blocks - .lowest_ancestor(&block_hash) - .ok_or(BlockchainTreeError::BlockBufferingFailed { block_hash })?; - - Ok(BlockStatus::Disconnected { - head: self.state.block_indices.canonical_tip(), - missing_ancestor: lowest_ancestor.parent_num_hash(), - }) - } - - /// This tries to append the given block to the canonical chain. - /// - /// WARNING: this expects that the block extends the canonical chain: The block's parent is - /// part of the canonical chain (e.g. the block's parent is the latest canonical hash). See also - /// [Self::is_block_hash_canonical]. - #[instrument(level = "trace", skip_all, target = "blockchain_tree")] - fn try_append_canonical_chain( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - let parent = block.parent_num_hash(); - let block_num_hash = block.num_hash(); - debug!(target: "blockchain_tree", head = ?block_num_hash.hash, ?parent, "Appending block to canonical chain"); - - let provider = self.externals.provider_factory.provider()?; - - // Validate that the block is post merge - let parent_td = provider - .header_td(&block.parent_hash)? - .ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?; - - if !self - .externals - .provider_factory - .chain_spec() - .fork(EthereumHardfork::Paris) - .active_at_ttd(parent_td, U256::ZERO) - { - return Err(BlockExecutionError::Validation(BlockValidationError::BlockPreMerge { - hash: block.hash(), - }) - .into()) - } - - let parent_header = provider - .header(&block.parent_hash)? - .ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?; - - let parent_sealed_header = SealedHeader::new(parent_header, block.parent_hash); - - let canonical_chain = self.state.block_indices.canonical_chain(); - - let block_attachment = if block.parent_hash == canonical_chain.tip().hash { - BlockAttachment::Canonical - } else { - BlockAttachment::HistoricalFork - }; - - let chain = AppendableChain::new_canonical_fork( - block, - &parent_sealed_header, - canonical_chain.inner(), - parent, - &self.externals, - block_attachment, - block_validation_kind, - )?; - - self.insert_chain(chain); - self.try_connect_buffered_blocks(block_num_hash); - - Ok(BlockStatus::Valid(block_attachment)) - } - - /// Try inserting a block into the given side chain. - /// - /// WARNING: This expects a valid side chain id, see [BlockIndices::get_side_chain_id] - #[instrument(level = "trace", skip_all, target = "blockchain_tree")] - fn try_insert_block_into_side_chain( - &mut self, - block: SealedBlockWithSenders, - chain_id: SidechainId, - block_validation_kind: BlockValidationKind, - ) -> Result { - let block_num_hash = block.num_hash(); - debug!(target: "blockchain_tree", ?block_num_hash, ?chain_id, "Inserting block into side chain"); - // Create a new sidechain by forking the given chain, or append the block if the parent - // block is the top of the given chain. - let block_hashes = self.all_chain_hashes(chain_id); - - // get canonical fork. - let canonical_fork = self.canonical_fork(chain_id).ok_or_else(|| { - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() } - })?; - - // get chain that block needs to join to. - let parent_chain = self.state.chains.get_mut(&chain_id).ok_or_else(|| { - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() } - })?; - - let chain_tip = parent_chain.tip().hash(); - let canonical_chain = self.state.block_indices.canonical_chain(); - - // append the block if it is continuing the side chain. - let block_attachment = if chain_tip == block.parent_hash { - // check if the chain extends the currently tracked canonical head - let block_attachment = if canonical_fork.hash == canonical_chain.tip().hash { - BlockAttachment::Canonical - } else { - BlockAttachment::HistoricalFork - }; - - let block_hash = block.hash(); - let block_number = block.number; - debug!(target: "blockchain_tree", ?block_hash, ?block_number, "Appending block to side chain"); - parent_chain.append_block( - block, - block_hashes, - canonical_chain.inner(), - &self.externals, - canonical_fork, - block_attachment, - block_validation_kind, - )?; - - self.state.block_indices.insert_non_fork_block(block_number, block_hash, chain_id); - block_attachment - } else { - debug!(target: "blockchain_tree", ?canonical_fork, "Starting new fork from side chain"); - // the block starts a new fork - let chain = parent_chain.new_chain_fork( - block, - block_hashes, - canonical_chain.inner(), - canonical_fork, - &self.externals, - block_validation_kind, - )?; - self.insert_chain(chain); - BlockAttachment::HistoricalFork - }; - - // After we inserted the block, we try to connect any buffered blocks - self.try_connect_buffered_blocks(block_num_hash); - - Ok(BlockStatus::Valid(block_attachment)) - } - - /// Get all block hashes from a sidechain that are not part of the canonical chain. - /// This is a one time operation per block. - /// - /// # Note - /// - /// This is not cached in order to save memory. - fn all_chain_hashes(&self, chain_id: SidechainId) -> BTreeMap { - let mut chain_id = chain_id; - let mut hashes = BTreeMap::new(); - loop { - let Some(chain) = self.state.chains.get(&chain_id) else { return hashes }; - - // The parent chains might contain blocks with overlapping numbers or numbers greater - // than original chain tip. Insert the block hash only if it's not present - // for the given block number and the block number does not exceed the - // original chain tip. - let latest_block_number = hashes - .last_key_value() - .map(|(number, _)| *number) - .unwrap_or_else(|| chain.tip().number); - for block in chain.blocks().values().filter(|b| b.number <= latest_block_number) { - if let Entry::Vacant(e) = hashes.entry(block.number) { - e.insert(block.hash()); - } - } - - let fork_block = chain.fork_block(); - if let Some(next_chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) { - chain_id = next_chain_id; - } else { - // if there is no fork block that point to other chains, break the loop. - // it means that this fork joins to canonical block. - break - } - } - hashes - } - - /// Get the block at which the given chain forks off the current canonical chain. - /// - /// This is used to figure out what kind of state provider the executor should use to execute - /// the block on - /// - /// Returns `None` if the chain is unknown. - fn canonical_fork(&self, chain_id: SidechainId) -> Option { - let mut chain_id = chain_id; - let mut fork; - loop { - // chain fork block - fork = self.state.chains.get(&chain_id)?.fork_block(); - // get fork block chain - if let Some(fork_chain_id) = self.block_indices().get_side_chain_id(&fork.hash) { - chain_id = fork_chain_id; - continue - } - break - } - (self.block_indices().canonical_hash(&fork.number) == Some(fork.hash)).then_some(fork) - } - - /// Insert a chain into the tree. - /// - /// Inserts a chain into the tree and builds the block indices. - fn insert_chain(&mut self, chain: AppendableChain) -> Option { - self.state.insert_chain(chain) - } - - /// Iterate over all child chains that depend on this block and return - /// their ids. - fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { - // Find all forks of given block. - let mut dependent_block = - self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default(); - let mut dependent_chains = HashSet::default(); - - while let Some(block) = dependent_block.pop_back() { - // Get chain of dependent block. - let Some(chain_id) = self.block_indices().get_side_chain_id(&block) else { - debug!(target: "blockchain_tree", ?block, "Block not in tree"); - return Default::default(); - }; - - // Find all blocks that fork from this chain. - let Some(chain) = self.state.chains.get(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); - return Default::default(); - }; - for chain_block in chain.blocks().values() { - if let Some(forks) = self.block_indices().fork_to_child().get(&chain_block.hash()) { - // If there are sub forks append them for processing. - dependent_block.extend(forks); - } - } - // Insert dependent chain id. - dependent_chains.insert(chain_id); - } - dependent_chains - } - - /// Inserts unwound chain back into the tree and updates any dependent chains. - /// - /// This method searches for any chain that depended on this block being part of the canonical - /// chain. Each dependent chain's state is then updated with state entries removed from the - /// plain state during the unwind. - /// Returns the result of inserting the chain or None if any of the dependent chains is not - /// in the tree. - fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { - // iterate over all blocks in chain and find any fork blocks that are in tree. - for (number, block) in chain.blocks() { - let hash = block.hash(); - - // find all chains that fork from this block. - let chains_to_bump = self.find_all_dependent_chains(&hash); - if !chains_to_bump.is_empty() { - // if there is such chain, revert state to this block. - let mut cloned_execution_outcome = chain.execution_outcome().clone(); - cloned_execution_outcome.revert_to(*number); - - // prepend state to all chains that fork from this block. - for chain_id in chains_to_bump { - let Some(chain) = self.state.chains.get_mut(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); - return None; - }; - - debug!(target: "blockchain_tree", - unwound_block= ?block.num_hash(), - chain_id = ?chain_id, - chain_tip = ?chain.tip().num_hash(), - "Prepend unwound block state to blockchain tree chain"); - - chain.prepend_state(cloned_execution_outcome.state().clone()) - } - } - } - // Insert unwound chain to the tree. - self.insert_chain(chain) - } - - /// Checks the block buffer for the given block. - pub fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.state.get_buffered_block(hash) - } - - /// Gets the lowest ancestor for the given block in the block buffer. - pub fn lowest_buffered_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.state.lowest_buffered_ancestor(hash) - } - - /// Insert a new block into the tree. - /// - /// # Note - /// - /// This recovers transaction signers (unlike [`BlockchainTree::insert_block`]). - pub fn insert_block_without_senders( - &mut self, - block: SealedBlock, - ) -> Result { - match block.try_seal_with_senders() { - Ok(block) => self.insert_block(block, BlockValidationKind::Exhaustive), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - /// Insert block for future execution. - /// - /// Returns an error if the block is invalid. - pub fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - // validate block consensus rules - if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)); - } - - self.state.buffered_blocks.insert_block(block); - Ok(()) - } - - /// Validate if block is correct and satisfies all the consensus rules that concern the header - /// and block body itself. - fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { - if let Err(e) = - self.externals.consensus.validate_header_with_total_difficulty(block, U256::MAX) - { - error!(?block, "Failed to validate total difficulty for block {}: {e}", block.hash()); - return Err(e); - } - - if let Err(e) = self.externals.consensus.validate_header(block) { - error!(?block, "Failed to validate header {}: {e}", block.hash()); - return Err(e); - } - - if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) { - error!(?block, "Failed to validate block {}: {e}", block.hash()); - return Err(e); - } - - Ok(()) - } - - /// Check if block is found inside a sidechain and its attachment. - /// - /// if it is canonical or extends the canonical chain, return [`BlockAttachment::Canonical`] - /// if it does not extend the canonical chain, return [`BlockAttachment::HistoricalFork`] - /// if the block is not in the tree or its chain id is not valid, return None - #[track_caller] - fn is_block_inside_sidechain(&self, block: &BlockNumHash) -> Option { - // check if block known and is already in the tree - if let Some(chain_id) = self.block_indices().get_side_chain_id(&block.hash) { - // find the canonical fork of this chain - let Some(canonical_fork) = self.canonical_fork(chain_id) else { - debug!(target: "blockchain_tree", chain_id=?chain_id, block=?block.hash, "Chain id not valid"); - return None; - }; - // if the block's chain extends canonical chain - return if canonical_fork == self.block_indices().canonical_tip() { - Some(BlockAttachment::Canonical) - } else { - Some(BlockAttachment::HistoricalFork) - }; - } - None - } - - /// Insert a block (with recovered senders) into the tree. - /// - /// Returns the [`BlockStatus`] on success: - /// - /// - The block is already part of a sidechain in the tree, or - /// - The block is already part of the canonical chain, or - /// - The parent is part of a sidechain in the tree, and we can fork at this block, or - /// - The parent is part of the canonical chain, and we can fork at this block - /// - /// Otherwise, an error is returned, indicating that neither the block nor its parent are part - /// of the chain or any sidechains. - /// - /// This means that if the block becomes canonical, we need to fetch the missing blocks over - /// P2P. - /// - /// If the [`BlockValidationKind::SkipStateRootValidation`] variant is provided the state root - /// is not validated. - /// - /// # Note - /// - /// If the senders have not already been recovered, call - /// [`BlockchainTree::insert_block_without_senders`] instead. - pub fn insert_block( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - // check if we already have this block - match self.is_block_known(block.num_hash()) { - Ok(Some(status)) => return Ok(InsertPayloadOk::AlreadySeen(status)), - Err(err) => return Err(InsertBlockError::new(block.block, err)), - _ => {} - } - - // validate block consensus rules - if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)); - } - - let status = self - .try_insert_validated_block(block.clone(), block_validation_kind) - .map_err(|kind| InsertBlockError::new(block.block, kind))?; - Ok(InsertPayloadOk::Inserted(status)) - } - - /// Discard all blocks that precede block number from the buffer. - pub fn remove_old_blocks(&mut self, block: BlockNumber) { - self.state.buffered_blocks.remove_old_blocks(block); - } - - /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. - pub fn finalize_block(&mut self, finalized_block: BlockNumber) -> ProviderResult<()> { - // remove blocks - let mut remove_chains = self.state.block_indices.finalize_canonical_blocks( - finalized_block, - self.config.num_of_additional_canonical_block_hashes(), - ); - // remove chains of removed blocks - while let Some(chain_id) = remove_chains.pop_first() { - if let Some(chain) = self.state.chains.remove(&chain_id) { - remove_chains.extend(self.state.block_indices.remove_chain(&chain)); - } - } - // clean block buffer. - self.remove_old_blocks(finalized_block); - - // save finalized block in db. - self.externals.save_finalized_block_number(finalized_block)?; - - Ok(()) - } - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - /// - /// # Note - /// - /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using - /// [`BlockchainTree::finalize_block`]). - pub fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &mut self, - last_finalized_block: BlockNumber, - ) -> ProviderResult<()> { - self.finalize_block(last_finalized_block)?; - - let last_canonical_hashes = self.update_block_hashes()?; - - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; - - Ok(()) - } - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them and removes all chains. - pub fn update_block_hashes(&mut self) -> ProviderResult> { - let last_canonical_hashes = self - .externals - .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; - - let (mut remove_chains, _) = - self.state.block_indices.update_block_hashes(last_canonical_hashes.clone()); - - // remove all chains that got discarded - while let Some(chain_id) = remove_chains.first() { - if let Some(chain) = self.state.chains.remove(chain_id) { - remove_chains.extend(self.state.block_indices.remove_chain(&chain)); - } - } - - Ok(last_canonical_hashes) - } - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered - /// blocks before the tip. - pub fn update_block_hashes_and_clear_buffered( - &mut self, - ) -> ProviderResult> { - let chain = self.update_block_hashes()?; - - if let Some((block, _)) = chain.last_key_value() { - self.remove_old_blocks(*block); - } - - Ok(chain) - } - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> ProviderResult<()> { - let last_canonical_hashes = self - .externals - .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; - - Ok(()) - } - - fn connect_buffered_blocks_to_hashes( - &mut self, - hashes: impl IntoIterator>, - ) -> ProviderResult<()> { - // check unconnected block buffer for children of the canonical hashes - for added_block in hashes { - self.try_connect_buffered_blocks(added_block.into()) - } - - // check unconnected block buffer for children of the chains - let mut all_chain_blocks = Vec::new(); - for chain in self.state.chains.values() { - all_chain_blocks.reserve_exact(chain.blocks().len()); - for (&number, block) in chain.blocks() { - all_chain_blocks.push(BlockNumHash { number, hash: block.hash() }) - } - } - for block in all_chain_blocks { - self.try_connect_buffered_blocks(block) - } - - Ok(()) - } - - /// Connect unconnected (buffered) blocks if the new block closes a gap. - /// - /// This will try to insert all children of the new block, extending its chain. - /// - /// If all children are valid, then this essentially appends all child blocks to the - /// new block's chain. - fn try_connect_buffered_blocks(&mut self, new_block: BlockNumHash) { - trace!(target: "blockchain_tree", ?new_block, "try_connect_buffered_blocks"); - - // first remove all the children of the new block from the buffer - let include_blocks = self.state.buffered_blocks.remove_block_with_children(&new_block.hash); - // then try to reinsert them into the tree - for block in include_blocks { - // don't fail on error, just ignore the block. - let _ = self - .try_insert_validated_block(block, BlockValidationKind::SkipStateRootValidation) - .map_err(|err| { - debug!(target: "blockchain_tree", %err, "Failed to insert buffered block"); - err - }); - } - } - - /// Removes chain corresponding to provided chain id from block indices, - /// splits it at split target, and returns the canonical part of it. - /// Returns [None] if chain is missing. - /// - /// The pending part of the chain is reinserted back into the tree with the same `chain_id`. - fn remove_and_split_chain( - &mut self, - chain_id: SidechainId, - split_at: ChainSplitTarget, - ) -> Option { - let chain = self.state.chains.remove(&chain_id)?; - match chain.into_inner().split(split_at) { - ChainSplit::Split { canonical, pending } => { - trace!(target: "blockchain_tree", ?canonical, ?pending, "Split chain"); - // rest of split chain is inserted back with same chain_id. - self.state.block_indices.insert_chain(chain_id, &pending); - self.state.chains.insert(chain_id, AppendableChain::new(pending)); - Some(canonical) - } - ChainSplit::NoSplitCanonical(canonical) => { - trace!(target: "blockchain_tree", "No split on canonical chain"); - Some(canonical) - } - ChainSplit::NoSplitPending(_) => { - unreachable!("Should not happen as block indices guarantee structure of blocks") - } - } - } - - /// Attempts to find the header for the given block hash if it is canonical. - /// - /// Returns `Ok(None)` if the block hash is not canonical (block hash does not exist, or is - /// included in a sidechain). - /// - /// Note: this does not distinguish between a block that is finalized and a block that is not - /// finalized yet, only whether it is part of the canonical chain or not. - pub fn find_canonical_header( - &self, - hash: &BlockHash, - ) -> Result, ProviderError> { - // if the indices show that the block hash is not canonical, it's either in a sidechain or - // canonical, but in the db. If it is in a sidechain, it is not canonical. If it is missing - // in the db, then it is also not canonical. - - let provider = self.externals.provider_factory.provider()?; - - let mut header = None; - if let Some(num) = self.block_indices().canonical_number(hash) { - header = provider.header_by_number(num)?; - } - - if header.is_none() && self.sidechain_block_by_hash(*hash).is_some() { - return Ok(None) - } - - if header.is_none() { - header = provider.header(hash)? - } - - Ok(header.map(|header| SealedHeader::new(header, *hash))) - } - - /// Determines whether or not a block is canonical, checking the db if necessary. - /// - /// Note: this does not distinguish between a block that is finalized and a block that is not - /// finalized yet, only whether it is part of the canonical chain or not. - pub fn is_block_hash_canonical(&self, hash: &BlockHash) -> Result { - self.find_canonical_header(hash).map(|header| header.is_some()) - } - - /// Make a block and its parent(s) part of the canonical chain and commit them to the database - /// - /// # Note - /// - /// This unwinds the database if necessary, i.e. if parts of the canonical chain have been - /// reorged. - /// - /// # Returns - /// - /// Returns `Ok` if the blocks were canonicalized, or if the blocks were already canonical. - #[track_caller] - #[instrument(level = "trace", skip(self), target = "blockchain_tree")] - pub fn make_canonical( - &mut self, - block_hash: BlockHash, - ) -> Result { - let mut durations_recorder = MakeCanonicalDurationsRecorder::default(); - - let old_block_indices = self.block_indices().clone(); - let old_buffered_blocks = self.state.buffered_blocks.parent_to_child.clone(); - durations_recorder.record_relative(MakeCanonicalAction::CloneOldBlocks); - - // If block is already canonical don't return error. - let canonical_header = self.find_canonical_header(&block_hash)?; - durations_recorder.record_relative(MakeCanonicalAction::FindCanonicalHeader); - if let Some(header) = canonical_header { - info!(target: "blockchain_tree", %block_hash, "Block is already canonical, ignoring."); - // TODO: this could be fetched from the chainspec first - let td = - self.externals.provider_factory.provider()?.header_td(&block_hash)?.ok_or_else( - || { - CanonicalError::from(BlockValidationError::MissingTotalDifficulty { - hash: block_hash, - }) - }, - )?; - - if !self - .externals - .provider_factory - .chain_spec() - .fork(EthereumHardfork::Paris) - .active_at_ttd(td, U256::ZERO) - { - return Err(CanonicalError::from(BlockValidationError::BlockPreMerge { - hash: block_hash, - })) - } - - let head = self.state.block_indices.canonical_tip(); - return Ok(CanonicalOutcome::AlreadyCanonical { header, head }); - } - - let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) else { - debug!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices"); - return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { - block_hash, - })) - }; - - // we are splitting chain at the block hash that we want to make canonical - let Some(canonical) = self.remove_and_split_chain(chain_id, block_hash.into()) else { - debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present"); - return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency { - chain_id: chain_id.into(), - })) - }; - trace!(target: "blockchain_tree", chain = ?canonical, "Found chain to make canonical"); - durations_recorder.record_relative(MakeCanonicalAction::SplitChain); - - let mut fork_block = canonical.fork_block(); - let mut chains_to_promote = vec![canonical]; - - // loop while fork blocks are found in Tree. - while let Some(chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) { - // canonical chain is lower part of the chain. - let Some(canonical) = - self.remove_and_split_chain(chain_id, ChainSplitTarget::Number(fork_block.number)) - else { - debug!(target: "blockchain_tree", ?fork_block, ?chain_id, "Fork not present"); - return Err(CanonicalError::from( - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() }, - )); - }; - fork_block = canonical.fork_block(); - chains_to_promote.push(canonical); - } - durations_recorder.record_relative(MakeCanonicalAction::SplitChainForks); - - let old_tip = self.block_indices().canonical_tip(); - // Merge all chains into one chain. - let Some(mut new_canon_chain) = chains_to_promote.pop() else { - debug!(target: "blockchain_tree", "No blocks in the chain to make canonical"); - return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { - block_hash: fork_block.hash, - })) - }; - trace!(target: "blockchain_tree", ?new_canon_chain, "Merging chains"); - let mut chain_appended = false; - for chain in chains_to_promote.into_iter().rev() { - trace!(target: "blockchain_tree", ?chain, "Appending chain"); - let block_hash = chain.fork_block().hash; - new_canon_chain.append_chain(chain).map_err(|_| { - CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }) - })?; - chain_appended = true; - } - durations_recorder.record_relative(MakeCanonicalAction::MergeAllChains); - - if chain_appended { - trace!(target: "blockchain_tree", ?new_canon_chain, "Canonical chain appended"); - } - // update canonical index - self.state.block_indices.canonicalize_blocks(new_canon_chain.blocks()); - durations_recorder.record_relative(MakeCanonicalAction::UpdateCanonicalIndex); - - debug!( - target: "blockchain_tree", - "Committing new canonical chain: {}", DisplayBlocksChain(new_canon_chain.blocks()) - ); - - // If chain extends the tip - let chain_notification = if new_canon_chain.fork_block().hash == old_tip.hash { - // Commit new canonical chain to database. - self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?; - CanonStateNotification::Commit { new: Arc::new(new_canon_chain) } - } else { - // It forks to canonical block that is not the tip. - let canon_fork: BlockNumHash = new_canon_chain.fork_block(); - // sanity check - if self.block_indices().canonical_hash(&canon_fork.number) != Some(canon_fork.hash) { - error!( - target: "blockchain_tree", - ?canon_fork, - block_indices=?self.block_indices(), - "All chains should point to canonical chain" - ); - unreachable!("all chains should point to canonical chain."); - } - - let old_canon_chain = - self.revert_canonical_from_database(canon_fork.number).inspect_err(|error| { - error!( - target: "blockchain_tree", - "Reverting canonical chain failed with error: {:?}\n\ - Old BlockIndices are:{:?}\n\ - New BlockIndices are: {:?}\n\ - Old BufferedBlocks are:{:?}", - error, old_block_indices, self.block_indices(), old_buffered_blocks - ); - })?; - durations_recorder - .record_relative(MakeCanonicalAction::RevertCanonicalChainFromDatabase); - - // Commit new canonical chain. - self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?; - - if let Some(old_canon_chain) = old_canon_chain { - self.update_reorg_metrics(old_canon_chain.len() as f64); - - // Insert old canonical chain back into tree. - self.insert_unwound_chain(AppendableChain::new(old_canon_chain.clone())); - durations_recorder.record_relative(MakeCanonicalAction::InsertOldCanonicalChain); - - CanonStateNotification::Reorg { - old: Arc::new(old_canon_chain), - new: Arc::new(new_canon_chain), - } - } else { - // error here to confirm that we are reverting nothing from db. - error!(target: "blockchain_tree", %block_hash, "Nothing was removed from database"); - CanonStateNotification::Commit { new: Arc::new(new_canon_chain) } - } - }; - - debug!( - target: "blockchain_tree", - actions = ?durations_recorder.actions, - "Canonicalization finished" - ); - - // clear trie updates for other children - self.block_indices() - .fork_to_child() - .get(&old_tip.hash) - .cloned() - .unwrap_or_default() - .into_iter() - .for_each(|child| { - if let Some(chain_id) = self.block_indices().get_side_chain_id(&child) { - if let Some(chain) = self.state.chains.get_mut(&chain_id) { - chain.clear_trie_updates(); - } - } - }); - - durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChildren); - - // Send notification about new canonical chain and return outcome of canonicalization. - let outcome = - CanonicalOutcome::Committed { head: chain_notification.tip().sealed_header().clone() }; - let _ = self.canon_state_notification_sender.send(chain_notification); - Ok(outcome) - } - - /// Write the given chain to the database as canonical. - fn commit_canonical_to_database( - &self, - chain: Chain, - recorder: &mut MakeCanonicalDurationsRecorder, - ) -> Result<(), CanonicalError> { - let (blocks, state, chain_trie_updates) = chain.into_inner(); - let hashed_state = self.externals.provider_factory.hashed_post_state(state.state()); - let prefix_sets = hashed_state.construct_prefix_sets().freeze(); - let hashed_state_sorted = hashed_state.into_sorted(); - - // Compute state root or retrieve cached trie updates before opening write transaction. - let block_hash_numbers = - blocks.iter().map(|(number, b)| (number, b.hash())).collect::>(); - let trie_updates = match chain_trie_updates { - Some(updates) => { - debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Using cached trie updates"); - self.metrics.trie_updates_insert_cached.increment(1); - updates - } - None => { - debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Recomputing state root for insert"); - let provider = self - .externals - .provider_factory - .provider()? - // State root calculation can take a while, and we're sure no write transaction - // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/6168. - .disable_long_read_transaction_safety(); - let (state_root, trie_updates) = StateRoot::from_tx(provider.tx_ref()) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider.tx_ref()), - &hashed_state_sorted, - )) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(BlockValidationError::from)?; - let tip = blocks.tip(); - if state_root != tip.state_root { - return Err(ProviderError::StateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: state_root, expected: tip.state_root }, - block_number: tip.number, - block_hash: tip.hash(), - })) - .into()) - } - self.metrics.trie_updates_insert_recomputed.increment(1); - trie_updates - } - }; - recorder.record_relative(MakeCanonicalAction::RetrieveStateTrieUpdates); - - let provider_rw = self.externals.provider_factory.provider_rw()?; - provider_rw - .append_blocks_with_state( - blocks.into_blocks().collect(), - &state, - hashed_state_sorted, - trie_updates, - ) - .map_err(|e| CanonicalError::CanonicalCommit(e.to_string()))?; - - provider_rw.commit()?; - recorder.record_relative(MakeCanonicalAction::CommitCanonicalChainToDatabase); - - Ok(()) - } - - /// Unwind tables and put it inside state - pub fn unwind(&mut self, unwind_to: BlockNumber) -> Result<(), CanonicalError> { - // nothing to be done if unwind_to is higher then the tip - if self.block_indices().canonical_tip().number <= unwind_to { - return Ok(()); - } - // revert `N` blocks from current canonical chain and put them inside BlockchainTree - let old_canon_chain = self.revert_canonical_from_database(unwind_to)?; - - // check if there is block in chain - if let Some(old_canon_chain) = old_canon_chain { - self.state.block_indices.unwind_canonical_chain(unwind_to); - // insert old canonical chain to BlockchainTree. - self.insert_unwound_chain(AppendableChain::new(old_canon_chain)); - } - - Ok(()) - } - - /// Reverts the canonical chain down to the given block from the database and returns the - /// unwound chain. - /// - /// The block, `revert_until`, is __non-inclusive__, i.e. `revert_until` stays in the database. - fn revert_canonical_from_database( - &self, - revert_until: BlockNumber, - ) -> Result, CanonicalError> { - // This should only happen when an optimistic sync target was re-orged. - // - // Static files generally contain finalized data. The blockchain tree only deals - // with non-finalized data. The only scenario where canonical reverts go past the highest - // static file is when an optimistic sync occurred and non-finalized data was written to - // static files. - if self - .externals - .provider_factory - .static_file_provider() - .get_highest_static_file_block(StaticFileSegment::Headers) - .unwrap_or_default() > - revert_until - { - trace!( - target: "blockchain_tree", - "Reverting optimistic canonical chain to block {}", - revert_until - ); - return Err(CanonicalError::OptimisticTargetRevert(revert_until)); - } - - // read data that is needed for new sidechain - let provider_rw = self.externals.provider_factory.provider_rw()?; - - let tip = provider_rw.last_block_number()?; - let revert_range = (revert_until + 1)..=tip; - info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); - // read block and execution result from database. and remove traces of block from tables. - let blocks_and_execution = provider_rw - .take_block_and_execution_above(revert_until, StorageLocation::Database) - .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; - - provider_rw.commit()?; - - if blocks_and_execution.is_empty() { - Ok(None) - } else { - Ok(Some(blocks_and_execution)) - } - } - - fn update_reorg_metrics(&self, reorg_depth: f64) { - self.metrics.reorgs.increment(1); - self.metrics.latest_reorg_depth.set(reorg_depth); - } - - /// Update blockchain tree chains (canonical and sidechains) and sync metrics. - /// - /// NOTE: this method should not be called during the pipeline sync, because otherwise the sync - /// checkpoint metric will get overwritten. Buffered blocks metrics are updated in - /// [`BlockBuffer`](crate::block_buffer::BlockBuffer) during the pipeline sync. - pub(crate) fn update_chains_metrics(&mut self) { - let height = self.state.block_indices.canonical_tip().number; - - let longest_sidechain_height = - self.state.chains.values().map(|chain| chain.tip().number).max(); - if let Some(longest_sidechain_height) = longest_sidechain_height { - self.metrics.longest_sidechain_height.set(longest_sidechain_height as f64); - } - - self.metrics.sidechains.set(self.state.chains.len() as f64); - self.metrics.canonical_chain_height.set(height as f64); - if let Some(metrics_tx) = self.sync_metrics_tx.as_mut() { - let _ = metrics_tx.send(MetricEvent::SyncHeight { height }); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH}; - use alloy_eips::{ - eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, INITIAL_BASE_FEE}, - eip4895::Withdrawals, - }; - use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; - use assert_matches::assert_matches; - use linked_hash_set::LinkedHashSet; - use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; - use reth_consensus::test_utils::TestConsensus; - use reth_db::tables; - use reth_db_api::transaction::DbTxMut; - use reth_evm::test_utils::MockExecutorProvider; - use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_node_types::FullNodePrimitives; - use reth_primitives::{ - proofs::{calculate_receipt_root, calculate_transaction_root}, - Account, BlockBody, RecoveredTx, Transaction, TransactionSigned, - }; - use reth_provider::{ - providers::ProviderNodeTypes, - test_utils::{ - blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, - MockNodeTypesWithDB, - }, - ProviderFactory, StorageLocation, - }; - use reth_stages_api::StageCheckpoint; - use reth_trie::{root::state_root_unhashed, StateRoot}; - use std::collections::HashMap; - - fn setup_externals( - exec_res: Vec, - ) -> TreeExternals { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .shanghai_activated() - .build(), - ); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); - let consensus = Arc::new(TestConsensus::default()); - let executor_factory = MockExecutorProvider::default(); - executor_factory.extend(exec_res); - - TreeExternals::new(provider_factory, consensus, executor_factory) - } - - fn setup_genesis< - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - BlockBody = reth_primitives::BlockBody, - BlockHeader = reth_primitives::Header, - >, - >, - >( - factory: &ProviderFactory, - mut genesis: SealedBlock, - ) { - // insert genesis to db. - - genesis.set_block_number(10); - genesis.set_state_root(EMPTY_ROOT_HASH); - let provider = factory.provider_rw().unwrap(); - - provider - .insert_historical_block( - genesis.try_seal_with_senders().expect("invalid tx signature in genesis"), - ) - .unwrap(); - - // insert first 10 blocks - for i in 0..10 { - provider - .tx_ref() - .put::(i, B256::new([100 + i as u8; 32])) - .unwrap(); - } - provider - .tx_ref() - .put::("Finish".to_string(), StageCheckpoint::new(10)) - .unwrap(); - provider.commit().unwrap(); - } - - /// Test data structure that will check tree internals - #[derive(Default, Debug)] - struct TreeTester { - /// Number of chains - chain_num: Option, - /// Check block to chain index - block_to_chain: Option>, - /// Check fork to child index - fork_to_child: Option>>, - /// Pending blocks - pending_blocks: Option<(BlockNumber, HashSet)>, - /// Buffered blocks - buffered_blocks: Option>, - } - - impl TreeTester { - const fn with_chain_num(mut self, chain_num: usize) -> Self { - self.chain_num = Some(chain_num); - self - } - - fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { - self.block_to_chain = Some(block_to_chain); - self - } - - fn with_fork_to_child( - mut self, - fork_to_child: HashMap>, - ) -> Self { - self.fork_to_child = Some(fork_to_child); - self - } - - fn with_buffered_blocks( - mut self, - buffered_blocks: HashMap, - ) -> Self { - self.buffered_blocks = Some(buffered_blocks); - self - } - - fn with_pending_blocks( - mut self, - pending_blocks: (BlockNumber, HashSet), - ) -> Self { - self.pending_blocks = Some(pending_blocks); - self - } - - fn assert(self, tree: &BlockchainTree) { - if let Some(chain_num) = self.chain_num { - assert_eq!(tree.state.chains.len(), chain_num); - } - if let Some(block_to_chain) = self.block_to_chain { - assert_eq!(*tree.state.block_indices.blocks_to_chain(), block_to_chain); - } - if let Some(fork_to_child) = self.fork_to_child { - let mut x: HashMap> = - HashMap::with_capacity(fork_to_child.len()); - for (key, hash_set) in fork_to_child { - x.insert(key, hash_set.into_iter().collect()); - } - assert_eq!(*tree.state.block_indices.fork_to_child(), x); - } - if let Some(pending_blocks) = self.pending_blocks { - let (num, hashes) = tree.state.block_indices.pending_blocks(); - let hashes = hashes.into_iter().collect::>(); - assert_eq!((num, hashes), pending_blocks); - } - if let Some(buffered_blocks) = self.buffered_blocks { - assert_eq!(*tree.state.buffered_blocks.blocks(), buffered_blocks); - } - } - } - - #[test] - fn consecutive_reorgs() { - let signer = Address::random(); - let initial_signer_balance = U256::from(10).pow(U256::from(18)); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(Genesis { - alloc: BTreeMap::from([( - signer, - GenesisAccount { balance: initial_signer_balance, ..Default::default() }, - )]), - ..MAINNET.genesis.clone() - }) - .shanghai_activated() - .build(), - ); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - let consensus = Arc::new(TestConsensus::default()); - let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone()); - - { - let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw - .insert_block( - SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) - .try_seal_with_senders() - .unwrap(), - StorageLocation::Database, - ) - .unwrap(); - let account = Account { balance: initial_signer_balance, ..Default::default() }; - provider_rw.tx_ref().put::(signer, account).unwrap(); - provider_rw.tx_ref().put::(keccak256(signer), account).unwrap(); - provider_rw.commit().unwrap(); - } - - let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); - let mock_tx = |nonce: u64| -> RecoveredTx<_> { - TransactionSigned::new_unhashed( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce, - gas_limit: MIN_TRANSACTION_GAS, - to: Address::ZERO.into(), - max_fee_per_gas: INITIAL_BASE_FEE as u128, - ..Default::default() - }), - Signature::test_signature(), - ) - .with_signer(signer) - }; - - let mock_block = |number: u64, - parent: Option, - body: Vec>, - num_of_signer_txs: u64| - -> SealedBlockWithSenders { - let signed_body = body.clone().into_iter().map(|tx| tx.into_tx()).collect::>(); - let transactions_root = calculate_transaction_root(&signed_body); - let receipts = body - .iter() - .enumerate() - .map(|(idx, tx)| { - Receipt { - tx_type: tx.tx_type(), - success: true, - cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS, - ..Default::default() - } - .with_bloom() - }) - .collect::>(); - - // receipts root computation is different for OP - let receipts_root = calculate_receipt_root(&receipts); - - let header = Header { - number, - parent_hash: parent.unwrap_or_default(), - gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, - gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - mix_hash: B256::random(), - base_fee_per_gas: Some(INITIAL_BASE_FEE), - transactions_root, - receipts_root, - state_root: state_root_unhashed(HashMap::from([( - signer, - Account { - balance: initial_signer_balance - - (single_tx_cost * U256::from(num_of_signer_txs)), - nonce: num_of_signer_txs, - ..Default::default() - } - .into_trie_account(EMPTY_ROOT_HASH), - )])), - ..Default::default() - }; - - SealedBlockWithSenders::new( - SealedBlock::new( - SealedHeader::seal(header), - BlockBody { - transactions: signed_body, - ommers: Vec::new(), - withdrawals: Some(Withdrawals::default()), - }, - ), - body.iter().map(|tx| tx.signer()).collect(), - ) - .unwrap() - }; - - let fork_block = mock_block(1, Some(chain_spec.genesis_hash()), Vec::from([mock_tx(0)]), 1); - - let canonical_block_1 = - mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1), mock_tx(2)]), 3); - let canonical_block_2 = mock_block(3, Some(canonical_block_1.hash()), Vec::new(), 3); - let canonical_block_3 = - mock_block(4, Some(canonical_block_2.hash()), Vec::from([mock_tx(3)]), 4); - - let sidechain_block_1 = mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1)]), 2); - let sidechain_block_2 = - mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3); - - let mut tree = BlockchainTree::new( - TreeExternals::new(provider_factory, consensus, executor_provider), - BlockchainTreeConfig::default(), - ) - .expect("failed to create tree"); - - tree.insert_block(fork_block.clone(), BlockValidationKind::Exhaustive).unwrap(); - - assert_eq!( - tree.make_canonical(fork_block.hash()).unwrap(), - CanonicalOutcome::Committed { head: fork_block.sealed_header().clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.make_canonical(canonical_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_1.sealed_header().clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_2, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(sidechain_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(sidechain_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: sidechain_block_1.sealed_header().clone() } - ); - - assert_eq!( - tree.make_canonical(canonical_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_1.sealed_header().clone() } - ); - - assert_eq!( - tree.insert_block(sidechain_block_2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(sidechain_block_2.hash()).unwrap(), - CanonicalOutcome::Committed { head: sidechain_block_2.sealed_header().clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(canonical_block_3.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_3.sealed_header().clone() } - ); - } - - #[test] - fn sidechain_block_hashes() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let (block4, exec4) = data.blocks[3].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = - setup_externals(vec![exec3.clone(), exec2.clone(), exec4, exec3, exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block4, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - let mut block2a = block2; - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - let mut block3a = block3; - let block3a_hash = B256::new([0x35; 32]); - block3a.set_hash(block3a_hash); - block3a.set_parent_hash(block2a.hash()); - - assert_eq!( - tree.insert_block(block3a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) /* TODO: this is incorrect, figure out why */ - ); - - let block3a_chain_id = tree.state.block_indices.get_side_chain_id(&block3a.hash()).unwrap(); - assert_eq!( - tree.all_chain_hashes(block3a_chain_id), - BTreeMap::from([ - (block1.number, block1.hash()), - (block2a.number, block2a.hash()), - (block3a.number, block3a.hash()), - ]) - ); - } - - #[test] - fn cached_trie_updates() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let (block4, exec4) = data.blocks[3].clone(); - let (block5, exec5) = data.blocks[4].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec5.clone(), exec4, exec3, exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block1_chain_id = tree.state.block_indices.get_side_chain_id(&block1.hash()).unwrap(); - let block1_chain = tree.state.chains.get(&block1_chain_id).unwrap(); - assert!(block1_chain.trie_updates().is_some()); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block2_chain_id = tree.state.block_indices.get_side_chain_id(&block2.hash()).unwrap(); - let block2_chain = tree.state.chains.get(&block2_chain_id).unwrap(); - assert!(block2_chain.trie_updates().is_none()); - - assert_eq!( - tree.make_canonical(block2.hash()).unwrap(), - CanonicalOutcome::Committed { head: block2.sealed_header().clone() } - ); - - assert_eq!( - tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block3_chain_id = tree.state.block_indices.get_side_chain_id(&block3.hash()).unwrap(); - let block3_chain = tree.state.chains.get(&block3_chain_id).unwrap(); - assert!(block3_chain.trie_updates().is_some()); - - assert_eq!( - tree.make_canonical(block3.hash()).unwrap(), - CanonicalOutcome::Committed { head: block3.sealed_header().clone() } - ); - - assert_eq!( - tree.insert_block(block4.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block4_chain_id = tree.state.block_indices.get_side_chain_id(&block4.hash()).unwrap(); - let block4_chain = tree.state.chains.get(&block4_chain_id).unwrap(); - assert!(block4_chain.trie_updates().is_some()); - - assert_eq!( - tree.insert_block(block5.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - let block5_chain_id = tree.state.block_indices.get_side_chain_id(&block5.hash()).unwrap(); - let block5_chain = tree.state.chains.get(&block5_chain_id).unwrap(); - assert!(block5_chain.trie_updates().is_none()); - - assert_eq!( - tree.make_canonical(block5.hash()).unwrap(), - CanonicalOutcome::Committed { head: block5.sealed_header().clone() } - ); - - let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = tree - .externals - .provider_factory - .hashed_post_state(exec5.state()) - .construct_prefix_sets() - .freeze(); - let state_root = - StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); - assert_eq!(state_root, block5.state_root); - } - - #[test] - fn test_side_chain_fork() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec2.clone(), exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // we have one chain that has two blocks. - // Trie state: - // b2 (pending block) - // | - // | - // b1 (pending block) - // / - // / - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1.hash()]), - )])) - .assert(&tree); - - let mut block2a = block2.clone(); - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - // fork chain. - // Trie state: - // b2 b2a (pending blocks in tree) - // | / - // | / - // b1 - // / - // / - // g1 (canonical blocks) - // | - - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - (block2a.hash(), 1.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1.hash()])), - (block2a.parent_hash, HashSet::from([block2a.hash()])), - ])) - .assert(&tree); - // chain 0 has two blocks so receipts and reverts len is 2 - let chain0 = tree.state.chains.get(&0.into()).unwrap().execution_outcome(); - assert_eq!(chain0.receipts().len(), 2); - assert_eq!(chain0.state().reverts.len(), 2); - assert_eq!(chain0.first_block(), block1.number); - // chain 1 has one block so receipts and reverts len is 1 - let chain1 = tree.state.chains.get(&1.into()).unwrap().execution_outcome(); - assert_eq!(chain1.receipts().len(), 1); - assert_eq!(chain1.state().reverts.len(), 1); - assert_eq!(chain1.first_block(), block2.number); - } - - #[test] - fn sanity_path() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec2.clone(), exec1.clone(), exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - - let mut canon_notif = tree.subscribe_canon_state(); - // genesis block 10 is already canonical - let head = BlockNumHash::new(10, B256::ZERO); - tree.make_canonical(head.hash).unwrap(); - - // make sure is_block_hash_canonical returns true for genesis block - tree.is_block_hash_canonical(&B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(head.number).unwrap(); - - // block 2 parent is not known, block2 is buffered. - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head, - missing_ancestor: block2.parent_num_hash() - }) - ); - - // Buffered block: [block2] - // Trie state: - // | - // g1 (canonical blocks) - // | - - TreeTester::default() - .with_buffered_blocks(HashMap::from([(block2.hash(), block2.clone())])) - .assert(&tree); - - assert_eq!( - tree.is_block_known(block2.num_hash()).unwrap(), - Some(BlockStatus::Disconnected { head, missing_ancestor: block2.parent_num_hash() }) - ); - - // check if random block is known - let old_block = BlockNumHash::new(1, B256::new([32; 32])); - let err = BlockchainTreeError::PendingBlockIsFinalized { last_finalized: 10 }; - - assert_eq!(tree.is_block_known(old_block).unwrap_err().as_tree_error(), Some(err)); - - // insert block1 and buffered block2 is inserted - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // Buffered blocks: [] - // Trie state: - // b2 (pending block) - // | - // | - // b1 (pending block) - // / - // / - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1.hash()]), - )])) - .with_pending_blocks((block1.number, HashSet::from([block1.hash()]))) - .assert(&tree); - - // already inserted block will `InsertPayloadOk::AlreadySeen(_)` - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // block two is already inserted. - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // make block1 canonical - tree.make_canonical(block1.hash()).unwrap(); - // check notification - assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block1.number,block1.clone())])); - - // make block2 canonicals - tree.make_canonical(block2.hash()).unwrap(); - // check notification. - assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); - - // Trie state: - // b2 (canonical block) - // | - // | - // b1 (canonical block) - // | - // | - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(0) - .with_block_to_chain(HashMap::from([])) - .with_fork_to_child(HashMap::from([])) - .assert(&tree); - - /**** INSERT SIDE BLOCKS *** */ - - let mut block1a = block1.clone(); - let block1a_hash = B256::new([0x33; 32]); - block1a.set_hash(block1a_hash); - let mut block2a = block2.clone(); - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - // reinsert two blocks that point to canonical chain - assert_eq!( - tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block1a_hash, 1.into())])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1a_hash]), - )])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 1.into()), - (block2a_hash, 2.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2a_hash])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - // make b2a canonical - assert!(tree.make_canonical(block2a_hash).is_ok()); - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block2.number,block2.clone())]) - && *new.blocks() == BTreeMap::from([(block2a.number,block2a.clone())])); - - // Trie state: - // b2a b2 (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 1.into()), - (block2.hash(), 3.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2.hash()])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - assert_matches!(tree.make_canonical(block1a_hash), Ok(_)); - // Trie state: - // b2a b2 (side chain) - // | / - // | / - // b1a b1 (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 4.into()), - (block2a_hash, 4.into()), - (block2.hash(), 3.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1.hash()])), - (block1.hash(), HashSet::from([block2.hash()])), - ])) - .with_pending_blocks((block1a.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2a.number,block2a.clone())]) - && *new.blocks() == BTreeMap::from([(block1a.number,block1a.clone())])); - - // check that b2 and b1 are not canonical - assert!(!tree.is_block_hash_canonical(&block2.hash()).unwrap()); - assert!(!tree.is_block_hash_canonical(&block1.hash()).unwrap()); - - // ensure that b1a is canonical - assert!(tree.is_block_hash_canonical(&block1a.hash()).unwrap()); - - // make b2 canonical - tree.make_canonical(block2.hash()).unwrap(); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 5.into()), - (block2a_hash, 4.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2a_hash])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block1a.number,block1a.clone())]) - && *new.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2.number,block2.clone())])); - - // check that b2 is now canonical - assert!(tree.is_block_hash_canonical(&block2.hash()).unwrap()); - - // finalize b1 that would make b1a removed from tree - tree.finalize_block(11).unwrap(); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 (canon) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) - .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - // unwind canonical - assert!(tree.unwind(block1.number).is_ok()); - // Trie state: - // b2 b2a (pending block) - // / / - // / / - // / / - // b1 (canonical block) - // | - // | - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block2a_hash, 4.into()), - (block2.hash(), 6.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.hash(), - HashSet::from([block2a_hash, block2.hash()]), - )])) - .with_pending_blocks((block2.number, HashSet::from([block2.hash(), block2a.hash()]))) - .assert(&tree); - - // commit b2a - tree.make_canonical(block2.hash()).unwrap(); - - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 (finalized) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) - .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Commit{ new }) - if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); - - // insert unconnected block2b - let mut block2b = block2a.clone(); - block2b.set_hash(B256::new([0x99; 32])); - block2b.set_parent_hash(B256::new([0x88; 32])); - - assert_eq!( - tree.insert_block(block2b.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head: block2.num_hash(), - missing_ancestor: block2b.parent_num_hash() - }) - ); - - TreeTester::default() - .with_buffered_blocks(HashMap::from([(block2b.hash(), block2b.clone())])) - .assert(&tree); - - // update canonical block to b2, this would make b2a be removed - assert!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12).is_ok()); - - assert_eq!( - tree.is_block_known(block2.num_hash()).unwrap(), - Some(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // Trie state: - // b2 (finalized) - // | - // b1 (finalized) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(0) - .with_block_to_chain(HashMap::default()) - .with_fork_to_child(HashMap::default()) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .with_buffered_blocks(HashMap::default()) - .assert(&tree); - } - - #[test] - fn last_finalized_block_initialization() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = - setup_externals(vec![exec3.clone(), exec2.clone(), exec1.clone(), exec3, exec2, exec1]); - let cloned_externals_1 = TreeExternals { - provider_factory: externals.provider_factory.clone(), - executor_factory: externals.executor_factory.clone(), - consensus: externals.consensus.clone(), - }; - let cloned_externals_2 = TreeExternals { - provider_factory: externals.provider_factory.clone(), - executor_factory: externals.executor_factory.clone(), - consensus: externals.consensus.clone(), - }; - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block3, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - tree.make_canonical(block2.hash()).unwrap(); - - // restart - let mut tree = - BlockchainTree::new(cloned_externals_1, config).expect("failed to create tree"); - assert_eq!(tree.block_indices().last_finalized_block(), 0); - - let mut block1a = block1; - let block1a_hash = B256::new([0x33; 32]); - block1a.set_hash(block1a_hash); - - assert_eq!( - tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - tree.make_canonical(block1a.hash()).unwrap(); - tree.finalize_block(block1a.number).unwrap(); - - // restart - let tree = BlockchainTree::new(cloned_externals_2, config).expect("failed to create tree"); - - assert_eq!(tree.block_indices().last_finalized_block(), block1a.number); - } -} diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs deleted file mode 100644 index ef9fc21670c83..0000000000000 --- a/crates/blockchain-tree/src/bundle.rs +++ /dev/null @@ -1,69 +0,0 @@ -//! [`ExecutionDataProvider`] implementations used by the tree. - -use alloy_eips::ForkBlock; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_provider::{BlockExecutionForkProvider, ExecutionDataProvider, ExecutionOutcome}; -use std::collections::BTreeMap; - -/// Structure that combines references of required data to be an [`ExecutionDataProvider`]. -#[derive(Clone, Debug)] -pub struct BundleStateDataRef<'a> { - /// The execution outcome after execution of one or more transactions and/or blocks. - pub execution_outcome: &'a ExecutionOutcome, - /// The blocks in the sidechain. - pub sidechain_block_hashes: &'a BTreeMap, - /// The blocks in the canonical chain. - pub canonical_block_hashes: &'a BTreeMap, - /// Canonical fork - pub canonical_fork: ForkBlock, -} - -impl ExecutionDataProvider for BundleStateDataRef<'_> { - fn execution_outcome(&self) -> &ExecutionOutcome { - self.execution_outcome - } - - fn block_hash(&self, block_number: BlockNumber) -> Option { - let block_hash = self.sidechain_block_hashes.get(&block_number).copied(); - if block_hash.is_some() { - return block_hash; - } - - self.canonical_block_hashes.get(&block_number).copied() - } -} - -impl BlockExecutionForkProvider for BundleStateDataRef<'_> { - fn canonical_fork(&self) -> ForkBlock { - self.canonical_fork - } -} - -/// Structure that owns the relevant data needs to be an [`ExecutionDataProvider`] -#[derive(Clone, Debug)] -pub struct ExecutionData { - /// Execution outcome. - pub execution_outcome: ExecutionOutcome, - /// Parent block hashes needs for evm BLOCKHASH opcode. - /// NOTE: it does not mean that all hashes are there but all until finalized are there. - /// Other hashes can be obtained from provider - pub parent_block_hashes: BTreeMap, - /// Canonical block where state forked from. - pub canonical_fork: ForkBlock, -} - -impl ExecutionDataProvider for ExecutionData { - fn execution_outcome(&self) -> &ExecutionOutcome { - &self.execution_outcome - } - - fn block_hash(&self, block_number: BlockNumber) -> Option { - self.parent_block_hashes.get(&block_number).copied() - } -} - -impl BlockExecutionForkProvider for ExecutionData { - fn canonical_fork(&self) -> ForkBlock { - self.canonical_fork - } -} diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs deleted file mode 100644 index 253f799fe0f87..0000000000000 --- a/crates/blockchain-tree/src/canonical_chain.rs +++ /dev/null @@ -1,241 +0,0 @@ -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use std::collections::BTreeMap; - -/// This keeps track of (non-finalized) blocks of the canonical chain. -/// -/// This is a wrapper type around an ordered set of block numbers and hashes that belong to the -/// canonical chain that is not yet finalized. -#[derive(Debug, Clone, Default)] -pub(crate) struct CanonicalChain { - /// All blocks of the canonical chain in order of their block number. - chain: BTreeMap, -} - -impl CanonicalChain { - pub(crate) const fn new(chain: BTreeMap) -> Self { - Self { chain } - } - - /// Replaces the current chain with the given one. - #[inline] - pub(crate) fn replace(&mut self, chain: BTreeMap) { - self.chain = chain; - } - - /// Returns the block hash of the (non-finalized) canonical block with the given number. - #[inline] - pub(crate) fn canonical_hash(&self, number: &BlockNumber) -> Option { - self.chain.get(number).copied() - } - - /// Returns the block number of the (non-finalized) canonical block with the given hash. - #[inline] - pub(crate) fn canonical_number(&self, block_hash: &BlockHash) -> Option { - self.chain.iter().find_map(|(number, hash)| (hash == block_hash).then_some(*number)) - } - - /// Extends all items from the given iterator to the chain. - #[inline] - pub(crate) fn extend(&mut self, blocks: impl Iterator) { - self.chain.extend(blocks) - } - - /// Retains only the elements specified by the predicate. - #[inline] - pub(crate) fn retain(&mut self, f: F) - where - F: FnMut(&BlockNumber, &mut BlockHash) -> bool, - { - self.chain.retain(f) - } - - #[inline] - pub(crate) const fn inner(&self) -> &BTreeMap { - &self.chain - } - - #[inline] - pub(crate) fn tip(&self) -> BlockNumHash { - self.chain - .last_key_value() - .map(|(&number, &hash)| BlockNumHash { number, hash }) - .unwrap_or_default() - } - - #[inline] - pub(crate) fn iter(&self) -> impl Iterator + '_ { - self.chain.iter().map(|(&number, &hash)| (number, hash)) - } - - #[inline] - pub(crate) fn into_iter(self) -> impl Iterator { - self.chain.into_iter() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_replace_canonical_chain() { - // Initialize a chain with some blocks - let mut initial_chain = BTreeMap::new(); - initial_chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - initial_chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - - let mut canonical_chain = CanonicalChain::new(initial_chain.clone()); - - // Verify initial chain state - assert_eq!(canonical_chain.chain.len(), 2); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(1u64)), - Some(&BlockHash::from([0x01; 32])) - ); - - // Replace with a new chain - let mut new_chain = BTreeMap::new(); - new_chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - new_chain.insert(BlockNumber::from(4u64), BlockHash::from([0x04; 32])); - new_chain.insert(BlockNumber::from(5u64), BlockHash::from([0x05; 32])); - - canonical_chain.replace(new_chain.clone()); - - // Verify replaced chain state - assert_eq!(canonical_chain.chain.len(), 3); - assert!(!canonical_chain.chain.contains_key(&BlockNumber::from(1u64))); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(3u64)), - Some(&BlockHash::from([0x03; 32])) - ); - } - - #[test] - fn test_canonical_hash_canonical_chain() { - // Initialize a chain with some blocks - let mut chain = BTreeMap::new(); - chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - - // Create an instance of a canonical chain - let canonical_chain = CanonicalChain::new(chain.clone()); - - // Check that the function returns the correct hash for a given block number - let block_number = BlockNumber::from(2u64); - let expected_hash = BlockHash::from([0x02; 32]); - assert_eq!(canonical_chain.canonical_hash(&block_number), Some(expected_hash)); - - // Check that a non-existent block returns None - let non_existent_block = BlockNumber::from(5u64); - assert_eq!(canonical_chain.canonical_hash(&non_existent_block), None); - } - - #[test] - fn test_canonical_number_canonical_chain() { - // Initialize a chain with some blocks - let mut chain = BTreeMap::new(); - chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - - // Create an instance of a canonical chain - let canonical_chain = CanonicalChain::new(chain.clone()); - - // Check that the function returns the correct block number for a given block hash - let block_hash = BlockHash::from([0x02; 32]); - let expected_number = BlockNumber::from(2u64); - assert_eq!(canonical_chain.canonical_number(&block_hash), Some(expected_number)); - - // Check that a non-existent block hash returns None - let non_existent_hash = BlockHash::from([0x05; 32]); - assert_eq!(canonical_chain.canonical_number(&non_existent_hash), None); - } - - #[test] - fn test_extend_canonical_chain() { - // Initialize an empty chain - let mut canonical_chain = CanonicalChain::new(BTreeMap::new()); - - // Create an iterator with some blocks - let blocks = vec![ - (BlockNumber::from(1u64), BlockHash::from([0x01; 32])), - (BlockNumber::from(2u64), BlockHash::from([0x02; 32])), - ] - .into_iter(); - - // Extend the chain with the created blocks - canonical_chain.extend(blocks); - - // Check if the blocks were added correctly - assert_eq!(canonical_chain.chain.len(), 2); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(1u64)), - Some(&BlockHash::from([0x01; 32])) - ); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(2u64)), - Some(&BlockHash::from([0x02; 32])) - ); - - // Test extending with additional blocks again - let more_blocks = vec![(BlockNumber::from(3u64), BlockHash::from([0x03; 32]))].into_iter(); - canonical_chain.extend(more_blocks); - - assert_eq!(canonical_chain.chain.len(), 3); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(3u64)), - Some(&BlockHash::from([0x03; 32])) - ); - } - - #[test] - fn test_retain_canonical_chain() { - // Initialize a chain with some blocks - let mut chain = BTreeMap::new(); - chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - - // Create an instance of CanonicalChain - let mut canonical_chain = CanonicalChain::new(chain); - - // Retain only blocks with even block numbers - canonical_chain.retain(|number, _| number % 2 == 0); - - // Check if the chain only contains the block with number 2 - assert_eq!(canonical_chain.chain.len(), 1); - assert_eq!( - canonical_chain.chain.get(&BlockNumber::from(2u64)), - Some(&BlockHash::from([0x02; 32])) - ); - - // Ensure that the blocks with odd numbers were removed - assert_eq!(canonical_chain.chain.get(&BlockNumber::from(1u64)), None); - assert_eq!(canonical_chain.chain.get(&BlockNumber::from(3u64)), None); - } - - #[test] - fn test_tip_canonical_chain() { - // Initialize a chain with some blocks - let mut chain = BTreeMap::new(); - chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); - chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); - chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); - - // Create an instance of a canonical chain - let canonical_chain = CanonicalChain::new(chain); - - // Call the tip method and verify the returned value - let tip = canonical_chain.tip(); - assert_eq!(tip.number, BlockNumber::from(3u64)); - assert_eq!(tip.hash, BlockHash::from([0x03; 32])); - - // Test with an empty chain - let empty_chain = CanonicalChain::new(BTreeMap::new()); - let empty_tip = empty_chain.tip(); - assert_eq!(empty_tip.number, BlockNumber::default()); - assert_eq!(empty_tip.hash, BlockHash::default()); - } -} diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs deleted file mode 100644 index e607d00d2d9b4..0000000000000 --- a/crates/blockchain-tree/src/chain.rs +++ /dev/null @@ -1,311 +0,0 @@ -//! A chain in a [`BlockchainTree`][super::BlockchainTree]. -//! -//! A [`Chain`] contains the state of accounts for the chain after execution of its constituent -//! blocks, as well as a list of the blocks the chain is composed of. - -use super::externals::TreeExternals; -use crate::BundleStateDataRef; -use alloy_eips::ForkBlock; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, InsertBlockErrorKind}, - BlockAttachment, BlockValidationKind, -}; -use reth_consensus::{ConsensusError, PostExecutionInput}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -use reth_execution_errors::BlockExecutionError; -use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; -use reth_provider::{ - providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes}, - DBProvider, FullExecutionDataProvider, HashedPostStateProvider, ProviderError, - StateRootProvider, TryIntoHistoricalStateProvider, -}; -use reth_revm::database::StateProviderDatabase; -use reth_trie::{updates::TrieUpdates, TrieInput}; -use reth_trie_parallel::root::ParallelStateRoot; -use std::{ - collections::BTreeMap, - ops::{Deref, DerefMut}, - time::Instant, -}; - -/// A chain in the blockchain tree that has functionality to execute blocks and append them to -/// itself. -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct AppendableChain { - chain: Chain, -} - -impl Deref for AppendableChain { - type Target = Chain; - - fn deref(&self) -> &Self::Target { - &self.chain - } -} - -impl DerefMut for AppendableChain { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.chain - } -} - -impl AppendableChain { - /// Create a new appendable chain from a given chain. - pub const fn new(chain: Chain) -> Self { - Self { chain } - } - - /// Get the chain. - pub fn into_inner(self) -> Chain { - self.chain - } - - /// Create a new chain that forks off of the canonical chain. - /// - /// if [`BlockValidationKind::Exhaustive`] is specified, the method will verify the state root - /// of the block. - pub fn new_canonical_fork( - block: SealedBlockWithSenders, - parent_header: &SealedHeader, - canonical_block_hashes: &BTreeMap, - canonical_fork: ForkBlock, - externals: &TreeExternals, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let execution_outcome = ExecutionOutcome::default(); - let empty = BTreeMap::new(); - - let state_provider = BundleStateDataRef { - execution_outcome: &execution_outcome, - sidechain_block_hashes: &empty, - canonical_block_hashes, - canonical_fork, - }; - - let (bundle_state, trie_updates) = Self::validate_and_execute( - block.clone(), - parent_header, - state_provider, - externals, - block_attachment, - block_validation_kind, - )?; - - Ok(Self::new(Chain::new(vec![block], bundle_state, trie_updates))) - } - - /// Create a new chain that forks off of an existing sidechain. - /// - /// This differs from [`AppendableChain::new_canonical_fork`] in that this starts a new fork. - pub(crate) fn new_chain_fork( - &self, - block: SealedBlockWithSenders, - side_chain_block_hashes: BTreeMap, - canonical_block_hashes: &BTreeMap, - canonical_fork: ForkBlock, - externals: &TreeExternals, - block_validation_kind: BlockValidationKind, - ) -> Result - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let parent_number = - block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; - let parent = self.blocks().get(&parent_number).ok_or( - BlockchainTreeError::BlockNumberNotFoundInChain { block_number: parent_number }, - )?; - - let mut execution_outcome = self.execution_outcome().clone(); - - // Revert state to the state after execution of the parent block - execution_outcome.revert_to(parent.number); - - // Revert changesets to get the state of the parent that we need to apply the change. - let bundle_state_data = BundleStateDataRef { - execution_outcome: &execution_outcome, - sidechain_block_hashes: &side_chain_block_hashes, - canonical_block_hashes, - canonical_fork, - }; - let (block_state, _) = Self::validate_and_execute( - block.clone(), - parent, - bundle_state_data, - externals, - BlockAttachment::HistoricalFork, - block_validation_kind, - )?; - // extending will also optimize few things, mostly related to selfdestruct and wiping of - // storage. - execution_outcome.extend(block_state); - - // remove all receipts and reverts (except the last one), as they belong to the chain we - // forked from and not the new chain we are creating. - let size = execution_outcome.receipts().len(); - execution_outcome.receipts_mut().drain(0..size - 1); - execution_outcome.state_mut().take_n_reverts(size - 1); - execution_outcome.set_first_block(block.number); - - // If all is okay, return new chain back. Present chain is not modified. - Ok(Self::new(Chain::from_block(block, execution_outcome, None))) - } - - /// Validate and execute the given block that _extends the canonical chain_, validating its - /// state root after execution if possible and requested. - /// - /// Note: State root validation is limited to blocks that extend the canonical chain and is - /// optional, see [`BlockValidationKind`]. So this function takes two parameters to determine - /// if the state can and should be validated. - /// - [`BlockAttachment`] represents if the block extends the canonical chain, and thus we can - /// cache the trie state updates. - /// - [`BlockValidationKind`] determines if the state root __should__ be validated. - fn validate_and_execute( - block: SealedBlockWithSenders, - parent_block: &SealedHeader, - bundle_state_data_provider: EDP, - externals: &TreeExternals, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result<(ExecutionOutcome, Option), BlockExecutionError> - where - EDP: FullExecutionDataProvider, - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - // some checks are done before blocks comes here. - externals.consensus.validate_header_against_parent(&block, parent_block)?; - - // get the state provider. - let canonical_fork = bundle_state_data_provider.canonical_fork(); - - // SAFETY: For block execution and parallel state root computation below we open multiple - // independent database transactions. Upon opening the database transaction the consistent - // view will check a current tip in the database and throw an error if it doesn't match - // the one recorded during initialization. - // It is safe to use consistent view without any special error handling as long as - // we guarantee that plain state cannot change during processing of new payload. - // The usage has to be re-evaluated if that was ever to change. - let consistent_view = - ConsistentDbView::new_with_latest_tip(externals.provider_factory.clone())?; - let state_provider = consistent_view - .provider_ro()? - // State root calculation can take a while, and we're sure no write transaction - // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/7509. - .disable_long_read_transaction_safety() - .try_into_history_at_block(canonical_fork.number)?; - - let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); - - let db = StateProviderDatabase::new(&provider); - let executor = externals.executor_factory.executor(db); - let block_hash = block.hash(); - let block = block.unseal(); - - let state = executor.execute(&block)?; - externals.consensus.validate_block_post_execution( - &block, - PostExecutionInput::new(&state.receipts, &state.requests), - )?; - - let initial_execution_outcome = ExecutionOutcome::from((state, block.number)); - - // check state root if the block extends the canonical chain __and__ if state root - // validation was requested. - if block_validation_kind.is_exhaustive() { - // calculate and check state root - let start = Instant::now(); - let (state_root, trie_updates) = if block_attachment.is_canonical() { - let mut execution_outcome = - provider.block_execution_data_provider.execution_outcome().clone(); - execution_outcome.extend(initial_execution_outcome.clone()); - ParallelStateRoot::new( - consistent_view, - TrieInput::from_state(provider.hashed_post_state(execution_outcome.state())), - ) - .incremental_root_with_updates() - .map(|(root, updates)| (root, Some(updates))) - .map_err(ProviderError::from)? - } else { - let hashed_state = provider.hashed_post_state(initial_execution_outcome.state()); - let state_root = provider.state_root(hashed_state)?; - (state_root, None) - }; - if block.state_root != state_root { - return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), - ) - .into()) - } - - tracing::debug!( - target: "blockchain_tree::chain", - number = block.number, - hash = %block_hash, - elapsed = ?start.elapsed(), - "Validated state root" - ); - - Ok((initial_execution_outcome, trie_updates)) - } else { - Ok((initial_execution_outcome, None)) - } - } - - /// Validate and execute the given block, and append it to this chain. - /// - /// This expects that the block's ancestors can be traced back to the `canonical_fork` (the - /// first parent block of the `block`'s chain that is in the canonical chain). - /// - /// In other words, expects a gap less (side-) chain: [`canonical_fork..block`] in order to be - /// able to __execute__ the block. - /// - /// CAUTION: This will only perform state root check if it's possible: if the `canonical_fork` - /// is the canonical head, or: state root check can't be performed if the given canonical is - /// __not__ the canonical head. - #[track_caller] - #[allow(clippy::too_many_arguments)] - pub(crate) fn append_block( - &mut self, - block: SealedBlockWithSenders, - side_chain_block_hashes: BTreeMap, - canonical_block_hashes: &BTreeMap, - externals: &TreeExternals, - canonical_fork: ForkBlock, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result<(), InsertBlockErrorKind> - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let parent_block = self.chain.tip(); - - let bundle_state_data = BundleStateDataRef { - execution_outcome: self.execution_outcome(), - sidechain_block_hashes: &side_chain_block_hashes, - canonical_block_hashes, - canonical_fork, - }; - - let (block_state, _) = Self::validate_and_execute( - block.clone(), - parent_block, - bundle_state_data, - externals, - block_attachment, - block_validation_kind, - )?; - // extend the state. - self.chain.append_block(block, block_state); - - Ok(()) - } -} diff --git a/crates/blockchain-tree/src/config.rs b/crates/blockchain-tree/src/config.rs deleted file mode 100644 index 8dda5dc82098d..0000000000000 --- a/crates/blockchain-tree/src/config.rs +++ /dev/null @@ -1,91 +0,0 @@ -//! Blockchain tree configuration - -/// The configuration for the blockchain tree. -#[derive(Clone, Copy, Debug)] -pub struct BlockchainTreeConfig { - /// Number of blocks after the last finalized block that we are storing. - /// - /// It should be more than the finalization window for the canonical chain. - max_blocks_in_chain: u64, - /// The number of blocks that can be re-orged (finalization windows) - max_reorg_depth: u64, - /// The number of unconnected blocks that we are buffering - max_unconnected_blocks: u32, - /// Number of additional block hashes to save in blockchain tree. For `BLOCKHASH` EVM opcode we - /// need last 256 block hashes. - /// - /// The total number of block hashes retained in-memory will be - /// `max(additional_canonical_block_hashes, max_reorg_depth)`, and for Ethereum that would - /// be 256. It covers both number of blocks required for reorg, and number of blocks - /// required for `BLOCKHASH` EVM opcode. - num_of_additional_canonical_block_hashes: u64, -} - -impl Default for BlockchainTreeConfig { - fn default() -> Self { - // The defaults for Ethereum mainnet - Self { - // Gasper allows reorgs of any length from 1 to 64. - max_reorg_depth: 64, - // This default is just an assumption. Has to be greater than the `max_reorg_depth`. - max_blocks_in_chain: 65, - // EVM requires that last 256 block hashes are available. - num_of_additional_canonical_block_hashes: 256, - // max unconnected blocks. - max_unconnected_blocks: 200, - } - } -} - -impl BlockchainTreeConfig { - /// Create tree configuration. - pub fn new( - max_reorg_depth: u64, - max_blocks_in_chain: u64, - num_of_additional_canonical_block_hashes: u64, - max_unconnected_blocks: u32, - ) -> Self { - assert!( - max_reorg_depth <= max_blocks_in_chain, - "Side chain size should be more than finalization window" - ); - Self { - max_blocks_in_chain, - max_reorg_depth, - num_of_additional_canonical_block_hashes, - max_unconnected_blocks, - } - } - - /// Return the maximum reorg depth. - pub const fn max_reorg_depth(&self) -> u64 { - self.max_reorg_depth - } - - /// Return the maximum number of blocks in one chain. - pub const fn max_blocks_in_chain(&self) -> u64 { - self.max_blocks_in_chain - } - - /// Return number of additional canonical block hashes that we need to retain - /// in order to have enough information for EVM execution. - pub const fn num_of_additional_canonical_block_hashes(&self) -> u64 { - self.num_of_additional_canonical_block_hashes - } - - /// Return total number of canonical hashes that we need to retain in order to have enough - /// information for reorg and EVM execution. - /// - /// It is calculated as the maximum of `max_reorg_depth` (which is the number of blocks required - /// for the deepest reorg possible according to the consensus protocol) and - /// `num_of_additional_canonical_block_hashes` (which is the number of block hashes needed to - /// satisfy the `BLOCKHASH` opcode in the EVM. See [`crate::BundleStateDataRef`]). - pub fn num_of_canonical_hashes(&self) -> u64 { - self.max_reorg_depth.max(self.num_of_additional_canonical_block_hashes) - } - - /// Return max number of unconnected blocks that we are buffering - pub const fn max_unconnected_blocks(&self) -> u32 { - self.max_unconnected_blocks - } -} diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs deleted file mode 100644 index ad22417a91d7a..0000000000000 --- a/crates/blockchain-tree/src/externals.rs +++ /dev/null @@ -1,106 +0,0 @@ -//! Blockchain tree externals. - -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_consensus::{ConsensusError, FullConsensus}; -use reth_db::{static_file::BlockHashMask, tables}; -use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::StaticFileSegment; -use reth_provider::{ - providers::ProviderNodeTypes, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, - StaticFileProviderFactory, StatsReader, -}; -use reth_storage_errors::provider::ProviderResult; -use std::{collections::BTreeMap, sync::Arc}; - -pub use reth_provider::providers::{NodeTypesForTree, TreeNodeTypes}; - -/// A container for external components. -/// -/// This is a simple container for external components used throughout the blockchain tree -/// implementation: -/// -/// - A handle to the database -/// - A handle to the consensus engine -/// - The executor factory to execute blocks with -#[derive(Debug)] -pub struct TreeExternals { - /// The provider factory, used to commit the canonical chain, or unwind it. - pub(crate) provider_factory: ProviderFactory, - /// The consensus engine. - pub(crate) consensus: Arc>, - /// The executor factory to execute blocks with. - pub(crate) executor_factory: E, -} - -impl TreeExternals { - /// Create new tree externals. - pub fn new( - provider_factory: ProviderFactory, - consensus: Arc>, - executor_factory: E, - ) -> Self { - Self { provider_factory, consensus, executor_factory } - } -} - -impl TreeExternals { - /// Fetches the latest canonical block hashes by walking backwards from the head. - /// - /// Returns the hashes sorted by increasing block numbers - pub(crate) fn fetch_latest_canonical_hashes( - &self, - num_hashes: usize, - ) -> ProviderResult> { - // Fetch the latest canonical hashes from the database - let mut hashes = self - .provider_factory - .provider()? - .tx_ref() - .cursor_read::()? - .walk_back(None)? - .take(num_hashes) - .collect::, _>>()?; - - // Fetch the same number of latest canonical hashes from the static_files and merge them - // with the database hashes. It is needed due to the fact that we're writing - // directly to static_files in pipeline sync, but to the database in live sync, - // which means that the latest canonical hashes in the static file might be more recent - // than in the database, and vice versa, or even some ranges of the latest - // `num_hashes` blocks may be in database, and some ranges in static_files. - let static_file_provider = self.provider_factory.static_file_provider(); - let total_headers = static_file_provider.count_entries::()? as u64; - if total_headers > 0 { - let range = - total_headers.saturating_sub(1).saturating_sub(num_hashes as u64)..total_headers; - - hashes.extend(range.clone().zip(static_file_provider.fetch_range_with_predicate( - StaticFileSegment::Headers, - range, - |cursor, number| cursor.get_one::(number.into()), - |_| true, - )?)); - } - - // We may have fetched more than `num_hashes` hashes, so we need to truncate the result to - // the requested number. - let hashes = hashes.into_iter().rev().take(num_hashes).collect(); - Ok(hashes) - } - - pub(crate) fn fetch_latest_finalized_block_number( - &self, - ) -> ProviderResult> { - self.provider_factory.provider()?.last_finalized_block_number() - } - - pub(crate) fn save_finalized_block_number( - &self, - block_number: BlockNumber, - ) -> ProviderResult<()> { - let provider_rw = self.provider_factory.provider_rw()?; - provider_rw.save_finalized_block_number(block_number)?; - provider_rw.commit()?; - Ok(()) - } -} diff --git a/crates/blockchain-tree/src/lib.rs b/crates/blockchain-tree/src/lib.rs deleted file mode 100644 index 3f501bead071a..0000000000000 --- a/crates/blockchain-tree/src/lib.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! Implementation of a tree-like structure for blockchains. -//! -//! The [`BlockchainTree`] can validate, execute, and revert blocks in multiple competing -//! sidechains. This structure is used for Reth's sync mode at the tip instead of the pipeline, and -//! is the primary executor and validator of payloads sent from the consensus layer. -//! -//! Blocks and their resulting state transitions are kept in-memory until they are persisted. -//! -//! ## Feature Flags -//! -//! - `test-utils`: Export utilities for testing - -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] - -/// Re-export of the blockchain tree API. -pub use reth_blockchain_tree_api::*; - -pub mod blockchain_tree; -pub use blockchain_tree::BlockchainTree; - -pub mod block_indices; -pub use block_indices::BlockIndices; - -pub mod chain; -pub use chain::AppendableChain; - -pub mod config; -pub use config::BlockchainTreeConfig; - -pub mod externals; -pub use externals::TreeExternals; - -pub mod shareable; -pub use shareable::ShareableBlockchainTree; - -mod bundle; -pub use bundle::{BundleStateDataRef, ExecutionData}; - -/// Buffer of not executed blocks. -pub mod block_buffer; -mod canonical_chain; - -/// Common blockchain tree metrics. -pub mod metrics; - -pub use block_buffer::BlockBuffer; - -/// Implementation of Tree traits that does nothing. -pub mod noop; - -mod state; - -use aquamarine as _; diff --git a/crates/blockchain-tree/src/metrics.rs b/crates/blockchain-tree/src/metrics.rs deleted file mode 100644 index 121d0a69786f0..0000000000000 --- a/crates/blockchain-tree/src/metrics.rs +++ /dev/null @@ -1,153 +0,0 @@ -use metrics::Histogram; -use reth_metrics::{ - metrics::{Counter, Gauge}, - Metrics, -}; -use std::time::{Duration, Instant}; - -/// Metrics for the blockchain tree block buffer -#[derive(Metrics)] -#[metrics(scope = "blockchain_tree.block_buffer")] -pub struct BlockBufferMetrics { - /// Total blocks in the block buffer - pub blocks: Gauge, -} - -#[derive(Debug)] -pub(crate) struct MakeCanonicalDurationsRecorder { - start: Instant, - pub(crate) actions: Vec<(MakeCanonicalAction, Duration)>, - latest: Option, - current_metrics: MakeCanonicalMetrics, -} - -impl Default for MakeCanonicalDurationsRecorder { - fn default() -> Self { - Self { - start: Instant::now(), - actions: Vec::new(), - latest: None, - current_metrics: MakeCanonicalMetrics::default(), - } - } -} - -impl MakeCanonicalDurationsRecorder { - /// Records the duration since last record, saves it for future logging and instantly reports as - /// a metric with `action` label. - pub(crate) fn record_relative(&mut self, action: MakeCanonicalAction) { - let elapsed = self.start.elapsed(); - let duration = elapsed - self.latest.unwrap_or_default(); - - self.actions.push((action, duration)); - self.current_metrics.record(action, duration); - self.latest = Some(elapsed); - } -} - -/// Metrics for the entire blockchain tree -#[derive(Metrics)] -#[metrics(scope = "blockchain_tree")] -pub struct TreeMetrics { - /// Total number of sidechains (not including the canonical chain) - pub sidechains: Gauge, - /// The highest block number in the canonical chain - pub canonical_chain_height: Gauge, - /// The number of reorgs - pub reorgs: Counter, - /// The latest reorg depth - pub latest_reorg_depth: Gauge, - /// Longest sidechain height - pub longest_sidechain_height: Gauge, - /// The number of times cached trie updates were used for insert. - pub trie_updates_insert_cached: Counter, - /// The number of times trie updates were recomputed for insert. - pub trie_updates_insert_recomputed: Counter, -} - -/// Represents actions for making a canonical chain. -#[derive(Debug, Copy, Clone)] -pub(crate) enum MakeCanonicalAction { - /// Cloning old blocks for canonicalization. - CloneOldBlocks, - /// Finding the canonical header. - FindCanonicalHeader, - /// Splitting the chain for canonicalization. - SplitChain, - /// Splitting chain forks for canonicalization. - SplitChainForks, - /// Merging all chains for canonicalization. - MergeAllChains, - /// Updating the canonical index during canonicalization. - UpdateCanonicalIndex, - /// Retrieving (cached or recomputed) state trie updates - RetrieveStateTrieUpdates, - /// Committing the canonical chain to the database. - CommitCanonicalChainToDatabase, - /// Reverting the canonical chain from the database. - RevertCanonicalChainFromDatabase, - /// Inserting an old canonical chain. - InsertOldCanonicalChain, - /// Clearing trie updates of other children chains after fork choice update. - ClearTrieUpdatesForOtherChildren, -} - -/// Canonicalization metrics -#[derive(Metrics)] -#[metrics(scope = "blockchain_tree.make_canonical")] -struct MakeCanonicalMetrics { - /// Duration of the clone old blocks action. - clone_old_blocks: Histogram, - /// Duration of the find canonical header action. - find_canonical_header: Histogram, - /// Duration of the split chain action. - split_chain: Histogram, - /// Duration of the split chain forks action. - split_chain_forks: Histogram, - /// Duration of the merge all chains action. - merge_all_chains: Histogram, - /// Duration of the update canonical index action. - update_canonical_index: Histogram, - /// Duration of the retrieve state trie updates action. - retrieve_state_trie_updates: Histogram, - /// Duration of the commit canonical chain to database action. - commit_canonical_chain_to_database: Histogram, - /// Duration of the revert canonical chain from database action. - revert_canonical_chain_from_database: Histogram, - /// Duration of the insert old canonical chain action. - insert_old_canonical_chain: Histogram, - /// Duration of the clear trie updates of other children chains after fork choice update - /// action. - clear_trie_updates_for_other_children: Histogram, -} - -impl MakeCanonicalMetrics { - /// Records the duration for the given action. - pub(crate) fn record(&self, action: MakeCanonicalAction, duration: Duration) { - match action { - MakeCanonicalAction::CloneOldBlocks => self.clone_old_blocks.record(duration), - MakeCanonicalAction::FindCanonicalHeader => self.find_canonical_header.record(duration), - MakeCanonicalAction::SplitChain => self.split_chain.record(duration), - MakeCanonicalAction::SplitChainForks => self.split_chain_forks.record(duration), - MakeCanonicalAction::MergeAllChains => self.merge_all_chains.record(duration), - MakeCanonicalAction::UpdateCanonicalIndex => { - self.update_canonical_index.record(duration) - } - MakeCanonicalAction::RetrieveStateTrieUpdates => { - self.retrieve_state_trie_updates.record(duration) - } - MakeCanonicalAction::CommitCanonicalChainToDatabase => { - self.commit_canonical_chain_to_database.record(duration) - } - MakeCanonicalAction::RevertCanonicalChainFromDatabase => { - self.revert_canonical_chain_from_database.record(duration) - } - MakeCanonicalAction::InsertOldCanonicalChain => { - self.insert_old_canonical_chain.record(duration) - } - MakeCanonicalAction::ClearTrieUpdatesForOtherChildren => { - self.clear_trie_updates_for_other_children.record(duration) - } - } - } -} diff --git a/crates/blockchain-tree/src/noop.rs b/crates/blockchain-tree/src/noop.rs deleted file mode 100644 index f5d2ad8c6f786..0000000000000 --- a/crates/blockchain-tree/src/noop.rs +++ /dev/null @@ -1,140 +0,0 @@ -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_blockchain_tree_api::{ - self, - error::{BlockchainTreeError, CanonicalError, InsertBlockError, ProviderError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, -}; -use reth_primitives::{EthPrimitives, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; -use reth_provider::{ - BlockchainTreePendingStateProvider, CanonStateNotificationSender, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, -}; -use reth_storage_errors::provider::ProviderResult; -use std::collections::BTreeMap; - -/// A `BlockchainTree` that does nothing. -/// -/// Caution: this is only intended for testing purposes, or for wiring components together. -#[derive(Debug, Clone, Default)] -#[non_exhaustive] -pub struct NoopBlockchainTree { - /// Broadcast channel for canon state changes notifications. - pub canon_state_notification_sender: Option, -} - -impl NoopBlockchainTree { - /// Create a new `NoopBlockchainTree` with a canon state notification sender. - pub const fn with_canon_state_notifications( - canon_state_notification_sender: CanonStateNotificationSender, - ) -> Self { - Self { canon_state_notification_sender: Some(canon_state_notification_sender) } - } -} - -impl BlockchainTreeEngine for NoopBlockchainTree { - fn buffer_block(&self, _block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - Ok(()) - } - - fn insert_block( - &self, - block: SealedBlockWithSenders, - _validation_kind: BlockValidationKind, - ) -> Result { - Err(InsertBlockError::tree_error( - BlockchainTreeError::BlockHashNotFoundInChain { block_hash: block.hash() }, - block.block, - )) - } - - fn finalize_block(&self, _finalized_block: BlockNumber) -> ProviderResult<()> { - Ok(()) - } - - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - _last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError> { - Ok(()) - } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - Ok(BTreeMap::new()) - } - - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { - Ok(()) - } - - fn make_canonical(&self, block_hash: BlockHash) -> Result { - Err(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }.into()) - } -} - -impl BlockchainTreeViewer for NoopBlockchainTree { - fn header_by_hash(&self, _hash: BlockHash) -> Option { - None - } - - fn block_by_hash(&self, _hash: BlockHash) -> Option { - None - } - - fn block_with_senders_by_hash(&self, _hash: BlockHash) -> Option { - None - } - - fn buffered_header_by_hash(&self, _block_hash: BlockHash) -> Option { - None - } - - fn is_canonical(&self, _block_hash: BlockHash) -> Result { - Ok(false) - } - - fn lowest_buffered_ancestor(&self, _hash: BlockHash) -> Option { - None - } - - fn canonical_tip(&self) -> BlockNumHash { - Default::default() - } - - fn pending_block_num_hash(&self) -> Option { - None - } - - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { - None - } - - fn receipts_by_block_hash(&self, _block_hash: BlockHash) -> Option> { - None - } -} - -impl BlockchainTreePendingStateProvider for NoopBlockchainTree { - fn find_pending_state_provider( - &self, - _block_hash: BlockHash, - ) -> Option> { - None - } -} - -impl NodePrimitivesProvider for NoopBlockchainTree { - type Primitives = EthPrimitives; -} - -impl CanonStateSubscriptions for NoopBlockchainTree { - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.canon_state_notification_sender - .as_ref() - .map(|sender| sender.subscribe()) - .unwrap_or_else(|| CanonStateNotificationSender::new(1).subscribe()) - } -} diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs deleted file mode 100644 index 6cb36cfab7cc2..0000000000000 --- a/crates/blockchain-tree/src/shareable.rs +++ /dev/null @@ -1,205 +0,0 @@ -//! Wrapper around `BlockchainTree` that allows for it to be shared. - -use crate::externals::TreeNodeTypes; - -use super::BlockchainTree; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use parking_lot::RwLock; -use reth_blockchain_tree_api::{ - error::{CanonicalError, InsertBlockError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, -}; -use reth_evm::execute::BlockExecutorProvider; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; -use reth_provider::{ - providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError, -}; -use reth_storage_errors::provider::ProviderResult; -use std::{collections::BTreeMap, sync::Arc}; -use tracing::trace; - -/// Shareable blockchain tree that is behind a `RwLock` -#[derive(Clone, Debug)] -pub struct ShareableBlockchainTree { - /// `BlockchainTree` - pub tree: Arc>>, -} - -impl ShareableBlockchainTree { - /// Create a new shareable database. - pub fn new(tree: BlockchainTree) -> Self { - Self { tree: Arc::new(RwLock::new(tree)) } - } -} - -impl BlockchainTreeEngine for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - let mut tree = self.tree.write(); - // Blockchain tree metrics shouldn't be updated here, see - // `BlockchainTree::update_chains_metrics` documentation. - tree.buffer_block(block) - } - - fn insert_block( - &self, - block: SealedBlockWithSenders, - validation_kind: BlockValidationKind, - ) -> Result { - trace!(target: "blockchain_tree", hash = %block.hash(), number = block.number, parent_hash = %block.parent_hash, "Inserting block"); - let mut tree = self.tree.write(); - let res = tree.insert_block(block, validation_kind); - tree.update_chains_metrics(); - res - } - - fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()> { - trace!(target: "blockchain_tree", finalized_block, "Finalizing block"); - let mut tree = self.tree.write(); - tree.finalize_block(finalized_block)?; - tree.update_chains_metrics(); - - Ok(()) - } - - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError> { - trace!(target: "blockchain_tree", last_finalized_block, "Connecting buffered blocks to canonical hashes and finalizing the tree"); - let mut tree = self.tree.write(); - let res = - tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block); - tree.update_chains_metrics(); - Ok(res?) - } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - let mut tree = self.tree.write(); - let res = tree.update_block_hashes_and_clear_buffered(); - tree.update_chains_metrics(); - Ok(res?) - } - - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { - trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); - let mut tree = self.tree.write(); - let res = tree.connect_buffered_blocks_to_canonical_hashes(); - tree.update_chains_metrics(); - Ok(res?) - } - - fn make_canonical(&self, block_hash: BlockHash) -> Result { - trace!(target: "blockchain_tree", %block_hash, "Making block canonical"); - let mut tree = self.tree.write(); - let res = tree.make_canonical(block_hash); - tree.update_chains_metrics(); - res - } -} - -impl BlockchainTreeViewer for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn header_by_hash(&self, hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); - self.tree.read().sidechain_block_by_hash(hash).map(|b| b.sealed_header().clone()) - } - - fn block_by_hash(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); - self.tree.read().sidechain_block_by_hash(block_hash).cloned() - } - - fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); - self.tree.read().block_with_senders_by_hash(block_hash).cloned() - } - - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.read().get_buffered_block(&block_hash).map(|b| b.sealed_header().clone()) - } - - fn is_canonical(&self, hash: BlockHash) -> Result { - trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical"); - self.tree.read().is_block_hash_canonical(&hash) - } - - fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?hash, "Returning lowest buffered ancestor"); - self.tree.read().lowest_buffered_ancestor(&hash).cloned() - } - - fn canonical_tip(&self) -> BlockNumHash { - trace!(target: "blockchain_tree", "Returning canonical tip"); - self.tree.read().block_indices().canonical_tip() - } - - fn pending_block_num_hash(&self) -> Option { - trace!(target: "blockchain_tree", "Returning first pending block"); - self.tree.read().block_indices().pending_block_num_hash() - } - - fn pending_block(&self) -> Option { - trace!(target: "blockchain_tree", "Returning first pending block"); - self.tree.read().pending_block().cloned() - } - - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { - let tree = self.tree.read(); - let pending_block = tree.pending_block()?.clone(); - let receipts = - tree.receipts_by_block_hash(pending_block.hash())?.into_iter().cloned().collect(); - Some((pending_block, receipts)) - } - - fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - let tree = self.tree.read(); - Some(tree.receipts_by_block_hash(block_hash)?.into_iter().cloned().collect()) - } -} - -impl BlockchainTreePendingStateProvider for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option> { - trace!(target: "blockchain_tree", ?block_hash, "Finding pending state provider"); - let provider = self.tree.read().post_state_data(block_hash)?; - Some(Box::new(provider)) - } -} - -impl NodePrimitivesProvider for ShareableBlockchainTree -where - N: ProviderNodeTypes, - E: Send + Sync, -{ - type Primitives = N::Primitives; -} - -impl CanonStateSubscriptions for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: Send + Sync, -{ - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); - self.tree.read().subscribe_canon_state() - } -} diff --git a/crates/blockchain-tree/src/state.rs b/crates/blockchain-tree/src/state.rs deleted file mode 100644 index 762ced6bf486b..0000000000000 --- a/crates/blockchain-tree/src/state.rs +++ /dev/null @@ -1,430 +0,0 @@ -//! Blockchain tree state. - -use crate::{AppendableChain, BlockBuffer, BlockIndices}; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders}; -use std::collections::{BTreeMap, HashMap}; - -/// Container to hold the state of the blockchain tree. -#[derive(Debug)] -pub(crate) struct TreeState { - /// Keeps track of new unique identifiers for chains - block_chain_id_generator: u64, - /// The tracked chains and their current data. - pub(crate) chains: HashMap, - /// Indices to block and their connection to the canonical chain. - /// - /// This gets modified by the tree itself and is read from engine API/RPC to access the pending - /// block for example. - pub(crate) block_indices: BlockIndices, - /// Unconnected block buffer. - pub(crate) buffered_blocks: BlockBuffer, -} - -impl TreeState { - /// Initializes the tree state with the given last finalized block number and last canonical - /// hashes. - pub(crate) fn new( - last_finalized_block_number: BlockNumber, - last_canonical_hashes: impl IntoIterator, - buffer_limit: u32, - ) -> Self { - Self { - block_chain_id_generator: 0, - chains: Default::default(), - block_indices: BlockIndices::new( - last_finalized_block_number, - BTreeMap::from_iter(last_canonical_hashes), - ), - buffered_blocks: BlockBuffer::new(buffer_limit), - } - } - - /// Issues a new unique identifier for a new sidechain. - #[inline] - fn next_id(&mut self) -> SidechainId { - let id = self.block_chain_id_generator; - self.block_chain_id_generator += 1; - SidechainId(id) - } - - /// Expose internal indices of the `BlockchainTree`. - #[inline] - pub(crate) const fn block_indices(&self) -> &BlockIndices { - &self.block_indices - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub(crate) fn block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { - self.block_with_senders_by_hash(block_hash).map(|block| &block.block) - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub(crate) fn block_with_senders_by_hash( - &self, - block_hash: BlockHash, - ) -> Option<&SealedBlockWithSenders> { - let id = self.block_indices.get_side_chain_id(&block_hash)?; - let chain = self.chains.get(&id)?; - chain.block_with_senders(block_hash) - } - - /// Returns the block's receipts with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - pub(crate) fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - let id = self.block_indices.get_side_chain_id(&block_hash)?; - let chain = self.chains.get(&id)?; - chain.receipts_by_block_hash(block_hash) - } - - /// Insert a chain into the tree. - /// - /// Inserts a chain into the tree and builds the block indices. - pub(crate) fn insert_chain(&mut self, chain: AppendableChain) -> Option { - if chain.is_empty() { - return None - } - let chain_id = self.next_id(); - - self.block_indices.insert_chain(chain_id, &chain); - // add chain_id -> chain index - self.chains.insert(chain_id, chain); - Some(chain_id) - } - - /// Checks the block buffer for the given block. - pub(crate) fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.buffered_blocks.block(hash) - } - - /// Gets the lowest ancestor for the given block in the block buffer. - pub(crate) fn lowest_buffered_ancestor( - &self, - hash: &BlockHash, - ) -> Option<&SealedBlockWithSenders> { - self.buffered_blocks.lowest_ancestor(hash) - } -} - -/// The ID of a sidechain internally in a [`BlockchainTree`][super::BlockchainTree]. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] -pub(crate) struct SidechainId(u64); - -impl From for u64 { - fn from(value: SidechainId) -> Self { - value.0 - } -} - -#[cfg(test)] -impl From for SidechainId { - fn from(value: u64) -> Self { - Self(value) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::canonical_chain::CanonicalChain; - use alloy_primitives::B256; - use reth_execution_types::Chain; - use reth_provider::ExecutionOutcome; - - #[test] - fn test_tree_state_initialization() { - // Set up some dummy data for initialization - let last_finalized_block_number = 10u64; - let last_canonical_hashes = vec![(9u64, B256::random()), (10u64, B256::random())]; - let buffer_limit = 5; - - // Initialize the tree state - let tree_state = TreeState::new( - last_finalized_block_number, - last_canonical_hashes.clone(), - buffer_limit, - ); - - // Verify the tree state after initialization - assert_eq!(tree_state.block_chain_id_generator, 0); - assert_eq!(tree_state.block_indices().last_finalized_block(), last_finalized_block_number); - assert_eq!( - *tree_state.block_indices.canonical_chain().inner(), - *CanonicalChain::new(last_canonical_hashes.into_iter().collect()).inner() - ); - assert!(tree_state.chains.is_empty()); - assert!(tree_state.buffered_blocks.lru.is_empty()); - } - - #[test] - fn test_tree_state_next_id() { - // Initialize the tree state - let mut tree_state = TreeState::new(0, vec![], 5); - - // Generate a few sidechain IDs - let first_id = tree_state.next_id(); - let second_id = tree_state.next_id(); - - // Verify the generated sidechain IDs and the updated generator state - assert_eq!(first_id, SidechainId(0)); - assert_eq!(second_id, SidechainId(1)); - assert_eq!(tree_state.block_chain_id_generator, 2); - } - - #[test] - fn test_tree_state_insert_chain() { - // Initialize tree state - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create a chain with two blocks - let block: SealedBlockWithSenders = Default::default(); - let block1_hash = B256::random(); - let block2_hash = B256::random(); - - let mut block1 = block.clone(); - let mut block2 = block; - - block1.block.set_hash(block1_hash); - block1.block.set_block_number(9); - block2.block.set_hash(block2_hash); - block2.block.set_block_number(10); - - let chain = AppendableChain::new(Chain::new( - [block1, block2], - Default::default(), - Default::default(), - )); - - // Insert the chain into the TreeState - let chain_id = tree_state.insert_chain(chain).unwrap(); - - // Verify the chain ID and that it was added to the chains collection - assert_eq!(chain_id, SidechainId(0)); - assert!(tree_state.chains.contains_key(&chain_id)); - - // Ensure that the block indices are updated - assert_eq!( - tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), - SidechainId(0) - ); - assert_eq!( - tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), - SidechainId(0) - ); - - // Ensure that the block chain ID generator was updated - assert_eq!(tree_state.block_chain_id_generator, 1); - - // Create an empty chain - let chain_empty = AppendableChain::new(Chain::default()); - - // Insert the empty chain into the tree state - let chain_id = tree_state.insert_chain(chain_empty); - - // Ensure that the empty chain was not inserted - assert!(chain_id.is_none()); - - // Nothing should have changed and no new chain should have been added - assert!(tree_state.chains.contains_key(&SidechainId(0))); - assert!(!tree_state.chains.contains_key(&SidechainId(1))); - assert_eq!( - tree_state.block_indices.get_side_chain_id(&block1_hash).unwrap(), - SidechainId(0) - ); - assert_eq!( - tree_state.block_indices.get_side_chain_id(&block2_hash).unwrap(), - SidechainId(0) - ); - assert_eq!(tree_state.block_chain_id_generator, 1); - } - - #[test] - fn test_block_by_hash_side_chain() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create two side-chain blocks with random hashes - let block1_hash = B256::random(); - let block2_hash = B256::random(); - - let mut block1: SealedBlockWithSenders = Default::default(); - let mut block2: SealedBlockWithSenders = Default::default(); - - block1.block.set_hash(block1_hash); - block1.block.set_block_number(9); - block2.block.set_hash(block2_hash); - block2.block.set_block_number(10); - - // Create an chain with these blocks - let chain = AppendableChain::new(Chain::new( - vec![block1.clone(), block2.clone()], - Default::default(), - Default::default(), - )); - - // Insert the side chain into the TreeState - tree_state.insert_chain(chain).unwrap(); - - // Retrieve the blocks by their hashes - let retrieved_block1 = tree_state.block_by_hash(block1_hash); - assert_eq!(*retrieved_block1.unwrap(), block1.block); - - let retrieved_block2 = tree_state.block_by_hash(block2_hash); - assert_eq!(*retrieved_block2.unwrap(), block2.block); - - // Test block_by_hash with a random hash that doesn't exist - let non_existent_hash = B256::random(); - let result = tree_state.block_by_hash(non_existent_hash); - - // Ensure that no block is found - assert!(result.is_none()); - } - - #[test] - fn test_block_with_senders_by_hash() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create two side-chain blocks with random hashes - let block1_hash = B256::random(); - let block2_hash = B256::random(); - - let mut block1: SealedBlockWithSenders = Default::default(); - let mut block2: SealedBlockWithSenders = Default::default(); - - block1.block.set_hash(block1_hash); - block1.block.set_block_number(9); - block2.block.set_hash(block2_hash); - block2.block.set_block_number(10); - - // Create a chain with these blocks - let chain = AppendableChain::new(Chain::new( - vec![block1.clone(), block2.clone()], - Default::default(), - Default::default(), - )); - - // Insert the side chain into the TreeState - tree_state.insert_chain(chain).unwrap(); - - // Test to retrieve the blocks with senders by their hashes - let retrieved_block1 = tree_state.block_with_senders_by_hash(block1_hash); - assert_eq!(*retrieved_block1.unwrap(), block1); - - let retrieved_block2 = tree_state.block_with_senders_by_hash(block2_hash); - assert_eq!(*retrieved_block2.unwrap(), block2); - - // Test block_with_senders_by_hash with a random hash that doesn't exist - let non_existent_hash = B256::random(); - let result = tree_state.block_with_senders_by_hash(non_existent_hash); - - // Ensure that no block is found - assert!(result.is_none()); - } - - #[test] - fn test_get_buffered_block() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create a block with a random hash and add it to the buffer - let block_hash = B256::random(); - let mut block: SealedBlockWithSenders = Default::default(); - block.block.set_hash(block_hash); - - // Add the block to the buffered blocks in the TreeState - tree_state.buffered_blocks.insert_block(block.clone()); - - // Test get_buffered_block to retrieve the block by its hash - let retrieved_block = tree_state.get_buffered_block(&block_hash); - assert_eq!(*retrieved_block.unwrap(), block); - - // Test get_buffered_block with a non-existent hash - let non_existent_hash = B256::random(); - let result = tree_state.get_buffered_block(&non_existent_hash); - - // Ensure that no block is found - assert!(result.is_none()); - } - - #[test] - fn test_lowest_buffered_ancestor() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create blocks with random hashes and set up parent-child relationships - let ancestor_hash = B256::random(); - let descendant_hash = B256::random(); - - let mut ancestor_block: SealedBlockWithSenders = Default::default(); - let mut descendant_block: SealedBlockWithSenders = Default::default(); - - ancestor_block.block.set_hash(ancestor_hash); - descendant_block.block.set_hash(descendant_hash); - descendant_block.block.set_parent_hash(ancestor_hash); - - // Insert the blocks into the buffer - tree_state.buffered_blocks.insert_block(ancestor_block.clone()); - tree_state.buffered_blocks.insert_block(descendant_block.clone()); - - // Test lowest_buffered_ancestor for the descendant block - let lowest_ancestor = tree_state.lowest_buffered_ancestor(&descendant_hash); - assert!(lowest_ancestor.is_some()); - assert_eq!(lowest_ancestor.unwrap().block.hash(), ancestor_hash); - - // Test lowest_buffered_ancestor with a non-existent hash - let non_existent_hash = B256::random(); - let result = tree_state.lowest_buffered_ancestor(&non_existent_hash); - - // Ensure that no ancestor is found - assert!(result.is_none()); - } - - #[test] - fn test_receipts_by_block_hash() { - // Initialize a tree state with some dummy data - let mut tree_state = TreeState::new(0, vec![], 5); - - // Create a block with a random hash and receipts - let block_hash = B256::random(); - let receipt1 = Receipt::default(); - let receipt2 = Receipt::default(); - - let mut block: SealedBlockWithSenders = Default::default(); - block.block.set_hash(block_hash); - - let receipts = vec![receipt1, receipt2]; - - // Create a chain with the block and its receipts - let chain = AppendableChain::new(Chain::new( - vec![block.clone()], - ExecutionOutcome { receipts: receipts.clone().into(), ..Default::default() }, - Default::default(), - )); - - // Insert the chain into the TreeState - tree_state.insert_chain(chain).unwrap(); - - // Test receipts_by_block_hash for the inserted block - let retrieved_receipts = tree_state.receipts_by_block_hash(block_hash); - assert!(retrieved_receipts.is_some()); - - // Check if the correct receipts are returned - let receipts_ref: Vec<&Receipt> = receipts.iter().collect(); - assert_eq!(retrieved_receipts.unwrap(), receipts_ref); - - // Test receipts_by_block_hash with a non-existent block hash - let non_existent_hash = B256::random(); - let result = tree_state.receipts_by_block_hash(non_existent_hash); - - // Ensure that no receipts are found - assert!(result.is_none()); - } -} From a3f8a9d38bd5d5c0fb3952e7e1d727f28de78755 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 13:52:17 +0100 Subject: [PATCH 022/113] chore!: rename blockchainprovider2 (#13727) --- .../src/commands/debug_cmd/build_block.rs | 4 +- bin/reth/src/main.rs | 4 +- crates/e2e-test-utils/src/lib.rs | 22 ++--- crates/engine/local/src/service.rs | 4 +- crates/engine/service/src/service.rs | 8 +- crates/ethereum/node/tests/e2e/dev.rs | 4 +- crates/ethereum/node/tests/it/builder.rs | 4 +- crates/exex/exex/src/backfill/job.rs | 6 +- crates/exex/exex/src/backfill/stream.rs | 6 +- crates/exex/exex/src/manager.rs | 12 +-- crates/exex/exex/src/notifications.rs | 10 +-- crates/exex/test-utils/src/lib.rs | 6 +- crates/node/builder/src/builder/mod.rs | 4 +- crates/node/builder/src/launch/engine.rs | 6 +- crates/optimism/bin/src/main.rs | 4 +- crates/optimism/node/src/utils.rs | 4 +- crates/optimism/node/tests/it/builder.rs | 4 +- crates/optimism/node/tests/it/priority.rs | 4 +- crates/prune/prune/src/segments/mod.rs | 10 +-- .../src/providers/blockchain_provider.rs | 86 +++++++++---------- .../provider/src/providers/consistent.rs | 8 +- crates/storage/provider/src/providers/mod.rs | 2 +- examples/rpc-db/src/main.rs | 4 +- 23 files changed, 112 insertions(+), 114 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index f4ab8150eb2d3..7507e8bf11bc3 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -29,7 +29,7 @@ use reth_primitives::{ TransactionSigned, }; use reth_provider::{ - providers::{BlockchainProvider2, ProviderNodeTypes}, + providers::{BlockchainProvider, ProviderNodeTypes}, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, StageCheckpointReader, StateProviderFactory, }; @@ -132,7 +132,7 @@ impl> Command { .lookup_best_block(provider_factory.clone()) .wrap_err("the head block is missing")?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; let blob_store = InMemoryBlobStore::default(); let validator = diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index f1f0a7d68cfba..5daaa93ee3bf1 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -13,7 +13,7 @@ use reth_node_builder::{ EngineNodeLauncher, }; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_tracing::tracing::warn; use tracing::info; @@ -79,7 +79,7 @@ fn main() { .with_memory_block_buffer_target(engine_args.memory_block_buffer_target) .with_state_root_task(engine_args.state_root_task_enabled); let handle = builder - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(EthereumNode::components()) .with_add_ons(EthereumAddOns::default()) .launch_with_fn(|builder| { diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 8378cbbd73108..091e871844bd0 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -13,7 +13,7 @@ use reth_node_builder::{ PayloadTypes, }; use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; -use reth_provider::providers::{BlockchainProvider2, NodeTypesForProvider, NodeTypesForTree}; +use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider, NodeTypesForTree}; use reth_rpc_server_types::RpcModuleSelection; use reth_tasks::TaskManager; use std::sync::Arc; @@ -114,24 +114,24 @@ pub async fn setup_engine( is_dev: bool, attributes_generator: impl Fn(u64) -> <::Engine as PayloadTypes>::PayloadBuilderAttributes + Copy + 'static, ) -> eyre::Result<( - Vec>>>, + Vec>>>, TaskManager, Wallet, )> where N: Default - + Node>>> + + Node>>> + NodeTypesWithEngine + NodeTypesForProvider, N::ComponentsBuilder: NodeComponentsBuilder< - TmpNodeAdapter>>, + TmpNodeAdapter>>, Components: NodeComponents< - TmpNodeAdapter>>, + TmpNodeAdapter>>, Network: PeersHandleProvider, >, >, - N::AddOns: RethRpcAddOns>>> - + EngineValidatorAddOn>>>, + N::AddOns: RethRpcAddOns>>> + + EngineValidatorAddOn>>>, LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Engine as PayloadTypes>::PayloadAttributes, >, @@ -164,7 +164,7 @@ where let node = N::default(); let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) .testing_node(exec.clone()) - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(node.components_builder()) .with_add_ons(node.add_ons()) .launch_with_fn(|builder| { @@ -204,11 +204,11 @@ where /// Testing database pub type TmpDB = Arc>; -type TmpNodeAdapter>> = +type TmpNodeAdapter>> = FullNodeTypesAdapter; /// Type alias for a `NodeAdapter` -pub type Adapter>> = NodeAdapter< +pub type Adapter>> = NodeAdapter< TmpNodeAdapter, <>>::ComponentsBuilder as NodeComponentsBuilder< TmpNodeAdapter, @@ -216,5 +216,5 @@ pub type Adapter; /// Type alias for a type of `NodeHelper` -pub type NodeHelperType>> = +pub type NodeHelperType>> = NodeTestContext, >>::AddOns>; diff --git a/crates/engine/local/src/service.rs b/crates/engine/local/src/service.rs index 1b2888cee60f3..77b61c8221a9a 100644 --- a/crates/engine/local/src/service.rs +++ b/crates/engine/local/src/service.rs @@ -34,7 +34,7 @@ use reth_node_types::BlockTy; use reth_payload_builder::PayloadBuilderHandle; use reth_payload_primitives::{PayloadAttributesBuilder, PayloadTypes}; use reth_provider::{ - providers::{BlockchainProvider2, EngineNodeTypes}, + providers::{BlockchainProvider, EngineNodeTypes}, ChainSpecProvider, ProviderFactory, }; use reth_prune::PrunerWithFactory; @@ -69,7 +69,7 @@ where consensus: Arc>, executor_factory: impl BlockExecutorProvider, provider: ProviderFactory, - blockchain_db: BlockchainProvider2, + blockchain_db: BlockchainProvider, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, payload_validator: V, diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 9ce69220be80a..b099e56ae07bf 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -20,7 +20,7 @@ use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine} use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::EthPrimitives; use reth_provider::{ - providers::{BlockchainProvider2, EngineNodeTypes}, + providers::{BlockchainProvider, EngineNodeTypes}, ProviderFactory, }; use reth_prune::PrunerWithFactory; @@ -79,7 +79,7 @@ where pipeline: Pipeline, pipeline_task_spawner: Box, provider: ProviderFactory, - blockchain_db: BlockchainProvider2, + blockchain_db: BlockchainProvider, pruner: PrunerWithFactory>, payload_builder: PayloadBuilderHandle, payload_validator: V, @@ -162,7 +162,7 @@ mod tests { use reth_network_p2p::test_utils::TestFullBlockClient; use reth_primitives::SealedHeader; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, }; use reth_prune::Pruner; use reth_tasks::TokioTaskExecutor; @@ -192,7 +192,7 @@ mod tests { let executor_factory = EthExecutorProvider::ethereum(chain_spec.clone()); let blockchain_db = - BlockchainProvider2::with_latest(provider_factory.clone(), SealedHeader::default()) + BlockchainProvider::with_latest(provider_factory.clone(), SealedHeader::default()) .unwrap(); let engine_payload_validator = EthereumEngineValidator::new(chain_spec.clone()); let (_tx, rx) = watch::channel(FinishedExExHeight::NoExExs); diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 325575998c26e..cb8eb1556a4c3 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -9,7 +9,7 @@ use reth_node_builder::{ }; use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; -use reth_provider::{providers::BlockchainProvider2, CanonStateSubscriptions}; +use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; use reth_rpc_eth_api::helpers::EthTransactions; use reth_tasks::TaskManager; use std::sync::Arc; @@ -25,7 +25,7 @@ async fn can_run_dev_node() -> eyre::Result<()> { .with_dev(DevArgs { dev: true, ..Default::default() }); let NodeHandle { node, .. } = NodeBuilder::new(node_config.clone()) .testing_node(exec.clone()) - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(EthereumNode::components()) .with_add_ons(EthereumAddOns::default()) .launch_with_fn(|builder| { diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index 218839fbe0190..e3d78182ed527 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -9,7 +9,7 @@ use reth_db::{ use reth_node_api::NodeTypesWithDBAdapter; use reth_node_builder::{EngineNodeLauncher, FullNodeComponents, NodeBuilder, NodeConfig}; use reth_node_ethereum::node::{EthereumAddOns, EthereumNode}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_tasks::TaskManager; #[test] @@ -50,7 +50,7 @@ async fn test_eth_launcher() { let _builder = NodeBuilder::new(config) .with_database(db) - .with_types_and_provider::>>, >>() .with_components(EthereumNode::components()) diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index fca2cb9a2ef71..161253d2b18c3 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -235,7 +235,7 @@ mod tests { use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives_traits::crypto::secp256k1::public_key_to_address; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, }; use reth_testing_utils::generators; use secp256k1::Keypair; @@ -253,7 +253,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; let blocks_and_execution_outputs = blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; @@ -289,7 +289,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; let blocks_and_execution_outcomes = blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 6111ae8fe4f93..2fef2dd57d14a 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -246,7 +246,7 @@ mod tests { use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives_traits::crypto::secp256k1::public_key_to_address; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory_with_chain_spec, + providers::BlockchainProvider, test_utils::create_test_provider_factory_with_chain_spec, }; use reth_stages_api::ExecutionStageThresholds; use reth_testing_utils::generators; @@ -265,7 +265,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; // Create first 2 blocks let blocks_and_execution_outcomes = @@ -303,7 +303,7 @@ mod tests { let executor = EthExecutorProvider::ethereum(chain_spec.clone()); let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); init_genesis(&provider_factory)?; - let blockchain_db = BlockchainProvider2::new(provider_factory.clone())?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; // Create first 2 blocks let (blocks, execution_outcome) = diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 32944bd2805f2..165ae8b7b7d69 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -666,7 +666,7 @@ mod tests { use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::SealedBlockWithSenders; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, + providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockReader, BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, TransactionVariant, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; @@ -1098,7 +1098,7 @@ mod tests { async fn exex_handle_new() { let provider_factory = create_test_provider_factory(); init_genesis(&provider_factory).unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1153,7 +1153,7 @@ mod tests { async fn test_notification_if_finished_height_gt_chain_tip() { let provider_factory = create_test_provider_factory(); init_genesis(&provider_factory).unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1203,7 +1203,7 @@ mod tests { async fn test_sends_chain_reorged_notification() { let provider_factory = create_test_provider_factory(); init_genesis(&provider_factory).unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1246,7 +1246,7 @@ mod tests { async fn test_sends_chain_reverted_notification() { let provider_factory = create_test_provider_factory(); init_genesis(&provider_factory).unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); @@ -1306,7 +1306,7 @@ mod tests { provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); provider_rw.commit().unwrap(); - let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let provider = BlockchainProvider::new(provider_factory).unwrap(); let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 05892e2f90d57..0d361de300935 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -446,7 +446,7 @@ mod tests { use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::{Block, BlockExt}; use reth_provider::{ - providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockWriter, + providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, }; use reth_testing_utils::generators::{self, random_block, BlockParams}; @@ -465,7 +465,7 @@ mod tests { .block(genesis_hash.into())? .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - let provider = BlockchainProvider2::new(provider_factory.clone())?; + let provider = BlockchainProvider::new(provider_factory.clone())?; let node_head_block = random_block( &mut rng, @@ -547,7 +547,7 @@ mod tests { .block(genesis_hash.into())? .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - let provider = BlockchainProvider2::new(provider_factory)?; + let provider = BlockchainProvider::new(provider_factory)?; let node_head = Head { number: genesis_block.number, hash: genesis_hash, ..Default::default() }; @@ -604,7 +604,7 @@ mod tests { .block(genesis_hash.into())? .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - let provider = BlockchainProvider2::new(provider_factory)?; + let provider = BlockchainProvider::new(provider_factory)?; let node_head_block = random_block( &mut rng, @@ -704,7 +704,7 @@ mod tests { .block(genesis_hash.into())? .ok_or_else(|| eyre::eyre!("genesis block not found"))?; - let provider = BlockchainProvider2::new(provider_factory)?; + let provider = BlockchainProvider::new(provider_factory)?; let exex_head_block = random_block( &mut rng, diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 471b0c5b26fee..bbb8c6710edc2 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -50,7 +50,7 @@ use reth_provider::{providers::StaticFileProvider, BlockReader, EthStorage, Prov use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use tempfile::TempDir; use thiserror::Error; use tokio::sync::mpsc::{Sender, UnboundedReceiver}; @@ -174,7 +174,7 @@ pub type Adapter = NodeAdapter< FullNodeTypesAdapter< TestNode, TmpDB, - BlockchainProvider2>, + BlockchainProvider>, >, >>::ComponentsBuilder as NodeComponentsBuilder>>::Components, >; @@ -271,7 +271,7 @@ pub async fn test_exex_context_with_chain_spec( ); let genesis_hash = init_genesis(&provider_factory)?; - let provider = BlockchainProvider2::new(provider_factory.clone())?; + let provider = BlockchainProvider::new(provider_factory.clone())?; let network_manager = NetworkManager::new( NetworkConfigBuilder::new(SecretKey::new(&mut rand::thread_rng())) diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index a30797573011a..82e81209a057e 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -33,7 +33,7 @@ use reth_node_core::{ primitives::Head, }; use reth_provider::{ - providers::{BlockchainProvider2, NodeTypesForProvider, NodeTypesForTree}, + providers::{BlockchainProvider, NodeTypesForProvider, NodeTypesForTree}, ChainSpecProvider, FullProvider, }; use reth_tasks::TaskExecutor; @@ -50,7 +50,7 @@ pub use states::*; /// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. pub type RethFullAdapter = - FullNodeTypesAdapter>>; + FullNodeTypesAdapter>>; #[allow(clippy::doc_markdown)] #[cfg_attr(doc, aquamarine::aquamarine)] diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 6a6cab80c26b4..31968197d8185 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -29,7 +29,7 @@ use reth_node_core::{ }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::EthereumHardforks; -use reth_provider::providers::{BlockchainProvider2, NodeTypesForProvider}; +use reth_provider::providers::{BlockchainProvider, NodeTypesForProvider}; use reth_tasks::TaskExecutor; use reth_tokio_util::EventSender; use reth_tracing::tracing::{debug, error, info}; @@ -75,7 +75,7 @@ where T: FullNodeTypes< Types = Types, DB = DB, - Provider = BlockchainProvider2>, + Provider = BlockchainProvider>, >, CB: NodeComponentsBuilder, AO: RethRpcAddOns> @@ -127,7 +127,7 @@ where // passing FullNodeTypes as type parameter here so that we can build // later the components. .with_blockchain_db::(move |provider_factory| { - Ok(BlockchainProvider2::new(provider_factory)?) + Ok(BlockchainProvider::new(provider_factory)?) })? .with_components(components_builder, on_component_initialized).await?; diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index db4fd9ec01f26..55eb923470c4e 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -6,7 +6,7 @@ use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher, Node}; use reth_optimism_cli::{chainspec::OpChainSpecParser, Cli}; use reth_optimism_node::{args::RollupArgs, OpNode}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use tracing as _; @@ -29,7 +29,7 @@ fn main() { let op_node = OpNode::new(rollup_args.clone()); let handle = builder - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(op_node.components()) .with_add_ons(op_node.add_ons()) .launch_with_fn(|builder| { diff --git a/crates/optimism/node/src/utils.rs b/crates/optimism/node/src/utils.rs index c7482288f3743..95875a7673669 100644 --- a/crates/optimism/node/src/utils.rs +++ b/crates/optimism/node/src/utils.rs @@ -8,14 +8,14 @@ use reth_e2e_test_utils::{ use reth_node_api::NodeTypesWithDBAdapter; use reth_optimism_chainspec::OpChainSpecBuilder; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_tasks::TaskManager; use std::sync::Arc; use tokio::sync::Mutex; /// Optimism Node Helper type pub(crate) type OpNode = - NodeHelperType>>; + NodeHelperType>>; /// Creates the initial setup with `num_nodes` of the node config, started and connected. pub async fn setup(num_nodes: usize) -> eyre::Result<(Vec, TaskManager, Wallet)> { diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index fc0016fbcaf7f..eba2aed422dc0 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -5,7 +5,7 @@ use reth_node_api::{FullNodeComponents, NodeTypesWithDBAdapter}; use reth_node_builder::{Node, NodeBuilder, NodeConfig}; use reth_optimism_chainspec::BASE_MAINNET; use reth_optimism_node::{args::RollupArgs, OpNode}; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; #[test] fn test_basic_setup() { @@ -16,7 +16,7 @@ fn test_basic_setup() { let op_node = OpNode::new(args); let _builder = NodeBuilder::new(config) .with_database(db) - .with_types_and_provider::>>() + .with_types_and_provider::>>() .with_components(op_node.components()) .with_add_ons(op_node.add_ons()) .on_component_initialized(move |ctx| { diff --git a/crates/optimism/node/tests/it/priority.rs b/crates/optimism/node/tests/it/priority.rs index d34dac4836728..defce4466267a 100644 --- a/crates/optimism/node/tests/it/priority.rs +++ b/crates/optimism/node/tests/it/priority.rs @@ -29,7 +29,7 @@ use reth_optimism_payload_builder::builder::OpPayloadTransactions; use reth_optimism_primitives::{OpPrimitives, OpTransactionSigned}; use reth_payload_util::{PayloadTransactions, PayloadTransactionsChain, PayloadTransactionsFixed}; use reth_primitives::RecoveredTx; -use reth_provider::providers::BlockchainProvider2; +use reth_provider::providers::BlockchainProvider; use reth_tasks::TaskManager; use reth_transaction_pool::{pool::BestPayloadTransactions, PoolTransaction}; use std::sync::Arc; @@ -148,7 +148,7 @@ async fn test_custom_block_priority_config() { let tasks = TaskManager::current(); let node_handle = NodeBuilder::new(config.clone()) .with_database(db) - .with_types_and_provider::>() + .with_types_and_provider::>() .with_components(build_components(config.chain.chain_id())) .with_add_ons(OpAddOns::default()) .launch_with_fn(|builder| { diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index 9f9e989dc06a7..c1e23063fe6f6 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -147,7 +147,7 @@ mod tests { use super::*; use alloy_primitives::B256; use reth_provider::{ - providers::BlockchainProvider2, + providers::BlockchainProvider, test_utils::{create_test_provider_factory, MockEthProvider}, }; use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams}; @@ -198,7 +198,7 @@ mod tests { provider_rw.commit().expect("failed to commit"); // Create a new provider - let provider = BlockchainProvider2::new(factory).unwrap(); + let provider = BlockchainProvider::new(factory).unwrap(); // Since there are no transactions, expected None let range = input.get_next_tx_num_range(&provider).expect("Expected range"); @@ -236,7 +236,7 @@ mod tests { provider_rw.commit().expect("failed to commit"); // Create a new provider - let provider = BlockchainProvider2::new(factory).unwrap(); + let provider = BlockchainProvider::new(factory).unwrap(); // Get the next tx number range let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); @@ -282,7 +282,7 @@ mod tests { provider_rw.commit().expect("failed to commit"); // Create a new provider - let provider = BlockchainProvider2::new(factory).unwrap(); + let provider = BlockchainProvider::new(factory).unwrap(); // Fetch the range and check if it is correct let range = input.get_next_tx_num_range(&provider).expect("Expected range").unwrap(); @@ -318,7 +318,7 @@ mod tests { provider_rw.commit().expect("failed to commit"); // Create a new provider - let provider = BlockchainProvider2::new(factory).unwrap(); + let provider = BlockchainProvider::new(factory).unwrap(); // Get the last tx number // Calculate the total number of transactions diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 818e224d6f870..9d1d7abc01027 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1,6 +1,6 @@ #![allow(unused)] use crate::{ - providers::{ConsistentProvider, StaticFileProvider}, + providers::{ConsistentProvider, ProviderNodeTypes, StaticFileProvider}, AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, CanonChainTracker, CanonStateNotifications, CanonStateSubscriptions, ChainSpecProvider, ChainStateBlockReader, ChangeSetReader, DatabaseProvider, @@ -51,15 +51,13 @@ use std::{ }; use tracing::trace; -use crate::providers::ProviderNodeTypes; - /// The main type for interacting with the blockchain. /// /// This type serves as the main entry point for interacting with the blockchain and provides data /// from database storage and from the blockchain tree (pending state etc.) It is a simple wrapper /// type that holds an instance of the database and the blockchain tree. #[derive(Debug)] -pub struct BlockchainProvider2 { +pub struct BlockchainProvider { /// Provider factory used to access the database. pub(crate) database: ProviderFactory, /// Tracks the chain info wrt forkchoice updates and in memory canonical @@ -67,7 +65,7 @@ pub struct BlockchainProvider2 { pub(crate) canonical_in_memory_state: CanonicalInMemoryState, } -impl Clone for BlockchainProvider2 { +impl Clone for BlockchainProvider { fn clone(&self) -> Self { Self { database: self.database.clone(), @@ -76,8 +74,8 @@ impl Clone for BlockchainProvider2 { } } -impl BlockchainProvider2 { - /// Create a new [`BlockchainProvider2`] using only the storage, fetching the latest +impl BlockchainProvider { + /// Create a new [`BlockchainProvider`] using only the storage, fetching the latest /// header from the database to initialize the provider. pub fn new(storage: ProviderFactory) -> ProviderResult { let provider = storage.provider()?; @@ -160,11 +158,11 @@ impl BlockchainProvider2 { } } -impl NodePrimitivesProvider for BlockchainProvider2 { +impl NodePrimitivesProvider for BlockchainProvider { type Primitives = N::Primitives; } -impl DatabaseProviderFactory for BlockchainProvider2 { +impl DatabaseProviderFactory for BlockchainProvider { type DB = N::DB; type Provider = as DatabaseProviderFactory>::Provider; type ProviderRW = as DatabaseProviderFactory>::ProviderRW; @@ -178,17 +176,17 @@ impl DatabaseProviderFactory for BlockchainProvider2 { } } -impl StateCommitmentProvider for BlockchainProvider2 { +impl StateCommitmentProvider for BlockchainProvider { type StateCommitment = N::StateCommitment; } -impl StaticFileProviderFactory for BlockchainProvider2 { +impl StaticFileProviderFactory for BlockchainProvider { fn static_file_provider(&self) -> StaticFileProvider { self.database.static_file_provider() } } -impl HeaderProvider for BlockchainProvider2 { +impl HeaderProvider for BlockchainProvider { type Header = HeaderTy; fn header(&self, block_hash: &BlockHash) -> ProviderResult> { @@ -237,7 +235,7 @@ impl HeaderProvider for BlockchainProvider2 { } } -impl BlockHashReader for BlockchainProvider2 { +impl BlockHashReader for BlockchainProvider { fn block_hash(&self, number: u64) -> ProviderResult> { self.consistent_provider()?.block_hash(number) } @@ -251,7 +249,7 @@ impl BlockHashReader for BlockchainProvider2 { } } -impl BlockNumReader for BlockchainProvider2 { +impl BlockNumReader for BlockchainProvider { fn chain_info(&self) -> ProviderResult { Ok(self.canonical_in_memory_state.chain_info()) } @@ -269,7 +267,7 @@ impl BlockNumReader for BlockchainProvider2 { } } -impl BlockIdReader for BlockchainProvider2 { +impl BlockIdReader for BlockchainProvider { fn pending_block_num_hash(&self) -> ProviderResult> { Ok(self.canonical_in_memory_state.pending_block_num_hash()) } @@ -283,7 +281,7 @@ impl BlockIdReader for BlockchainProvider2 { } } -impl BlockReader for BlockchainProvider2 { +impl BlockReader for BlockchainProvider { type Block = BlockTy; fn find_block_by_hash( @@ -355,7 +353,7 @@ impl BlockReader for BlockchainProvider2 { } } -impl TransactionsProvider for BlockchainProvider2 { +impl TransactionsProvider for BlockchainProvider { type Transaction = TxTy; fn transaction_id(&self, tx_hash: TxHash) -> ProviderResult> { @@ -421,7 +419,7 @@ impl TransactionsProvider for BlockchainProvider2 { } } -impl ReceiptProvider for BlockchainProvider2 { +impl ReceiptProvider for BlockchainProvider { type Receipt = ReceiptTy; fn receipt(&self, id: TxNumber) -> ProviderResult> { @@ -447,13 +445,13 @@ impl ReceiptProvider for BlockchainProvider2 { } } -impl ReceiptProviderIdExt for BlockchainProvider2 { +impl ReceiptProviderIdExt for BlockchainProvider { fn receipts_by_block_id(&self, block: BlockId) -> ProviderResult>> { self.consistent_provider()?.receipts_by_block_id(block) } } -impl WithdrawalsProvider for BlockchainProvider2 { +impl WithdrawalsProvider for BlockchainProvider { fn withdrawals_by_block( &self, id: BlockHashOrNumber, @@ -463,13 +461,13 @@ impl WithdrawalsProvider for BlockchainProvider2 { } } -impl OmmersProvider for BlockchainProvider2 { +impl OmmersProvider for BlockchainProvider { fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { self.consistent_provider()?.ommers(id) } } -impl BlockBodyIndicesProvider for BlockchainProvider2 { +impl BlockBodyIndicesProvider for BlockchainProvider { fn block_body_indices( &self, number: BlockNumber, @@ -478,7 +476,7 @@ impl BlockBodyIndicesProvider for BlockchainProvider2 { } } -impl StageCheckpointReader for BlockchainProvider2 { +impl StageCheckpointReader for BlockchainProvider { fn get_stage_checkpoint(&self, id: StageId) -> ProviderResult> { self.consistent_provider()?.get_stage_checkpoint(id) } @@ -492,7 +490,7 @@ impl StageCheckpointReader for BlockchainProvider2 { } } -impl PruneCheckpointReader for BlockchainProvider2 { +impl PruneCheckpointReader for BlockchainProvider { fn get_prune_checkpoint( &self, segment: PruneSegment, @@ -505,7 +503,7 @@ impl PruneCheckpointReader for BlockchainProvider2 { } } -impl ChainSpecProvider for BlockchainProvider2 { +impl ChainSpecProvider for BlockchainProvider { type ChainSpec = N::ChainSpec; fn chain_spec(&self) -> Arc { @@ -513,7 +511,7 @@ impl ChainSpecProvider for BlockchainProvider2 { } } -impl StateProviderFactory for BlockchainProvider2 { +impl StateProviderFactory for BlockchainProvider { /// Storage provider for latest block fn latest(&self) -> ProviderResult { trace!(target: "providers::blockchain", "Getting latest block state provider"); @@ -622,7 +620,7 @@ impl StateProviderFactory for BlockchainProvider2 { } } -impl HashedPostStateProvider for BlockchainProvider2 { +impl HashedPostStateProvider for BlockchainProvider { fn hashed_post_state(&self, bundle_state: &BundleState) -> HashedPostState { HashedPostState::from_bundle_state::<::KeyHasher>( bundle_state.state(), @@ -630,7 +628,7 @@ impl HashedPostStateProvider for BlockchainProvider2 { } } -impl CanonChainTracker for BlockchainProvider2 { +impl CanonChainTracker for BlockchainProvider { type Header = HeaderTy; fn on_forkchoice_update_received(&self, _update: &ForkchoiceState) { @@ -663,7 +661,7 @@ impl CanonChainTracker for BlockchainProvider2 { } } -impl BlockReaderIdExt for BlockchainProvider2 +impl BlockReaderIdExt for BlockchainProvider where Self: ReceiptProviderIdExt, { @@ -701,13 +699,13 @@ where } } -impl CanonStateSubscriptions for BlockchainProvider2 { +impl CanonStateSubscriptions for BlockchainProvider { fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { self.canonical_in_memory_state.subscribe_canon_state() } } -impl ForkChoiceSubscriptions for BlockchainProvider2 { +impl ForkChoiceSubscriptions for BlockchainProvider { type Header = HeaderTy; fn subscribe_safe_block(&self) -> ForkChoiceNotifications { @@ -721,7 +719,7 @@ impl ForkChoiceSubscriptions for BlockchainProvider2 { } } -impl StorageChangeSetReader for BlockchainProvider2 { +impl StorageChangeSetReader for BlockchainProvider { fn storage_changeset( &self, block_number: BlockNumber, @@ -730,7 +728,7 @@ impl StorageChangeSetReader for BlockchainProvider2 { } } -impl ChangeSetReader for BlockchainProvider2 { +impl ChangeSetReader for BlockchainProvider { fn account_block_changeset( &self, block_number: BlockNumber, @@ -739,14 +737,14 @@ impl ChangeSetReader for BlockchainProvider2 { } } -impl AccountReader for BlockchainProvider2 { +impl AccountReader for BlockchainProvider { /// Get basic account information. fn basic_account(&self, address: &Address) -> ProviderResult> { self.consistent_provider()?.basic_account(address) } } -impl StateReader for BlockchainProvider2 { +impl StateReader for BlockchainProvider { type Receipt = ReceiptTy; /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. @@ -769,7 +767,7 @@ impl StateReader for BlockchainProvider2 { #[cfg(test)] mod tests { use crate::{ - providers::BlockchainProvider2, + providers::BlockchainProvider, test_utils::{ create_test_provider_factory, create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB, @@ -860,7 +858,7 @@ mod tests { in_memory_blocks: usize, block_range_params: BlockRangeParams, ) -> eyre::Result<( - BlockchainProvider2, + BlockchainProvider, Vec, Vec, Vec>, @@ -916,7 +914,7 @@ mod tests { // Commit to both storages: database and static files UnifiedStorageWriter::commit(provider_rw)?; - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; // Insert the rest of the blocks and receipts into the in-memory state let chain = NewCanonicalChain::Commit { @@ -962,7 +960,7 @@ mod tests { in_memory_blocks: usize, block_range_params: BlockRangeParams, ) -> eyre::Result<( - BlockchainProvider2, + BlockchainProvider, Vec, Vec, Vec>, @@ -982,7 +980,7 @@ mod tests { /// This simulates a RPC method having a different view than when its database transaction was /// created. fn persist_block_after_db_tx_creation( - provider: BlockchainProvider2, + provider: BlockchainProvider, block_number: BlockNumber, ) { let hook_provider = provider.clone(); @@ -1035,7 +1033,7 @@ mod tests { provider_rw.commit()?; // Create a new provider - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; // Useful blocks let first_db_block = database_blocks.first().unwrap(); @@ -1133,7 +1131,7 @@ mod tests { provider_rw.commit()?; // Create a new provider - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; // First in memory block let first_in_mem_block = in_memory_blocks.first().unwrap(); @@ -1397,7 +1395,7 @@ mod tests { provider_rw.insert_historical_block(block_1)?; provider_rw.commit()?; - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; // Subscribe twice for canonical state updates. let in_memory_state = provider.canonical_in_memory_state(); @@ -1833,7 +1831,7 @@ mod tests { )?; provider_rw.commit()?; - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let in_memory_changesets = in_memory_changesets.into_iter().next().unwrap(); let chain = NewCanonicalChain::Commit { diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index e0022a61d2f04..9ac33e3476e7a 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1444,7 +1444,7 @@ impl StateReader for ConsistentProvider { #[cfg(test)] mod tests { use crate::{ - providers::blockchain_provider::BlockchainProvider2, + providers::blockchain_provider::BlockchainProvider, test_utils::create_test_provider_factory, BlockWriter, }; use alloy_eips::BlockHashOrNumber; @@ -1524,7 +1524,7 @@ mod tests { provider_rw.commit()?; // Create a new provider - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let consistent_provider = provider.consistent_provider()?; // Useful blocks @@ -1635,7 +1635,7 @@ mod tests { provider_rw.commit()?; // Create a new provider - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let consistent_provider = provider.consistent_provider()?; // First in memory block @@ -1753,7 +1753,7 @@ mod tests { )?; provider_rw.commit()?; - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let in_memory_changesets = in_memory_changesets.into_iter().next().unwrap(); let chain = NewCanonicalChain::Commit { diff --git a/crates/storage/provider/src/providers/mod.rs b/crates/storage/provider/src/providers/mod.rs index 6ff53e4afeae6..7ac5bde40741c 100644 --- a/crates/storage/provider/src/providers/mod.rs +++ b/crates/storage/provider/src/providers/mod.rs @@ -27,7 +27,7 @@ mod consistent_view; pub use consistent_view::{ConsistentDbView, ConsistentViewError}; mod blockchain_provider; -pub use blockchain_provider::BlockchainProvider2; +pub use blockchain_provider::BlockchainProvider; mod consistent; pub use consistent::ConsistentProvider; diff --git a/examples/rpc-db/src/main.rs b/examples/rpc-db/src/main.rs index 1af1d3e5e5ffb..19c108e5e7de0 100644 --- a/examples/rpc-db/src/main.rs +++ b/examples/rpc-db/src/main.rs @@ -18,7 +18,7 @@ use reth::{ api::NodeTypesWithDBAdapter, beacon_consensus::EthBeaconConsensus, providers::{ - providers::{BlockchainProvider2, StaticFileProvider}, + providers::{BlockchainProvider, StaticFileProvider}, ProviderFactory, }, rpc::eth::EthApi, @@ -61,7 +61,7 @@ async fn main() -> eyre::Result<()> { // 2. Setup the blockchain provider using only the database provider and a noop for the tree to // satisfy trait bounds. Tree is not used in this example since we are only operating on the // disk and don't handle new blocks/live sync etc, which is done by the blockchain tree. - let provider = BlockchainProvider2::new(factory)?; + let provider = BlockchainProvider::new(factory)?; let rpc_builder = RpcModuleBuilder::default() .with_provider(provider.clone()) From bd4947112d1d8f4048d0e67e30364acb16025606 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 8 Jan 2025 13:49:39 +0100 Subject: [PATCH 023/113] perf(trie): set trie mask bits directly (#13724) --- crates/trie/sparse/src/trie.rs | 51 +++++++++++++--------------------- 1 file changed, 19 insertions(+), 32 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 8f54d9454022e..f472578e3d9d8 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -700,9 +700,8 @@ impl

RevealedSparseTrie

{ .resize(buffers.branch_child_buf.len(), Default::default()); let mut added_children = false; - // TODO(alexey): set the `TrieMask` bits directly - let mut tree_mask_values = Vec::new(); - let mut hash_mask_values = Vec::new(); + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); let mut hashes = Vec::new(); for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) { @@ -711,18 +710,21 @@ impl

RevealedSparseTrie

{ // Update the masks only if we need to retain trie updates if retain_updates { - // Set the trie mask - let tree_mask_value = if node_type.store_in_db_trie() { + // SAFETY: it's a child, so it's never empty + let last_child_nibble = child_path.last().unwrap(); + + // Determine whether we need to set trie mask bit. + let should_set_tree_mask_bit = // A branch or an extension node explicitly set the // `store_in_db_trie` flag - true - } else { + node_type.store_in_db_trie() || // Set the flag according to whether a child node was // pre-calculated (`calculated = false`), meaning that it wasn't // in the database - !calculated - }; - tree_mask_values.push(tree_mask_value); + !calculated; + if should_set_tree_mask_bit { + tree_mask.set_bit(last_child_nibble); + } // Set the hash mask. If a child node is a revealed branch node OR // is a blinded node that has its hash mask bit set according to the @@ -733,12 +735,11 @@ impl

RevealedSparseTrie

{ self.branch_node_hash_masks .get(&path) .is_some_and(|mask| { - mask.is_bit_set(child_path.last().unwrap()) + mask.is_bit_set(last_child_nibble) })) }); - let hash_mask_value = hash.is_some(); - hash_mask_values.push(hash_mask_value); if let Some(hash) = hash { + hash_mask.set_bit(last_child_nibble); hashes.push(hash); } @@ -746,16 +747,17 @@ impl

RevealedSparseTrie

{ target: "trie::sparse", ?path, ?child_path, - ?tree_mask_value, - ?hash_mask_value, + tree_mask_bit_set = should_set_tree_mask_bit, + hash_mask_bit_set = hash.is_some(), "Updating branch node child masks" ); } // Insert children in the resulting buffer in a normal order, // because initially we iterated in reverse. - buffers.branch_value_stack_buf - [buffers.branch_child_buf.len() - i - 1] = child; + // SAFETY: i < len and len is never 0 + let original_idx = buffers.branch_child_buf.len() - i - 1; + buffers.branch_value_stack_buf[original_idx] = child; added_children = true; } else { debug_assert!(!added_children); @@ -778,21 +780,6 @@ impl

RevealedSparseTrie

{ let store_in_db_trie_value = if let Some(updates) = self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) { - let mut tree_mask_values = tree_mask_values.into_iter().rev(); - let mut hash_mask_values = hash_mask_values.into_iter().rev(); - let mut tree_mask = TrieMask::default(); - let mut hash_mask = TrieMask::default(); - for (i, child) in branch_node_ref.children() { - if child.is_some() { - if hash_mask_values.next().unwrap() { - hash_mask.set_bit(i); - } - if tree_mask_values.next().unwrap() { - tree_mask.set_bit(i); - } - } - } - // Store in DB trie if there are either any children that are stored in the // DB trie, or any children represent hashed values let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); From cb22b17b3e6da030ac74cf4ae83913f8af1714b3 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 14:34:28 +0100 Subject: [PATCH 024/113] chore: rm blockchaintree dep from engine-tree (#13729) --- Cargo.lock | 1 - crates/engine/tree/Cargo.toml | 1 - crates/engine/tree/src/lib.rs | 3 - crates/engine/tree/src/tree/error.rs | 199 +++++++++++++++++++++++++++ crates/engine/tree/src/tree/mod.rs | 49 +++++-- 5 files changed, 233 insertions(+), 20 deletions(-) create mode 100644 crates/engine/tree/src/tree/error.rs diff --git a/Cargo.lock b/Cargo.lock index 0e3c08dc5eeb2..23c3eabb2bd22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7195,7 +7195,6 @@ dependencies = [ "proptest", "rand 0.8.5", "rayon", - "reth-blockchain-tree-api", "reth-chain-state", "reth-chainspec", "reth-consensus", diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 7376bf238f2f8..bd5e70319a6aa 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -12,7 +12,6 @@ workspace = true [dependencies] # reth -reth-blockchain-tree-api.workspace = true reth-chain-state.workspace = true reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true diff --git a/crates/engine/tree/src/lib.rs b/crates/engine/tree/src/lib.rs index 19eecf8d6c888..f197dd764aab7 100644 --- a/crates/engine/tree/src/lib.rs +++ b/crates/engine/tree/src/lib.rs @@ -92,9 +92,6 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![cfg_attr(not(test), warn(unused_crate_dependencies))] -/// Re-export of the blockchain tree API. -pub use reth_blockchain_tree_api::*; - /// Support for backfill sync mode. pub mod backfill; /// The type that drives the chain forward. diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs new file mode 100644 index 0000000000000..025655315f1f7 --- /dev/null +++ b/crates/engine/tree/src/tree/error.rs @@ -0,0 +1,199 @@ +//! Internal errors for the tree module. + +use alloy_consensus::BlockHeader; +use reth_consensus::ConsensusError; +use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; +use reth_evm::execute::InternalBlockExecutionError; +use reth_primitives::SealedBlockFor; +use reth_primitives_traits::{Block, BlockBody}; +use tokio::sync::oneshot::error::TryRecvError; + +/// This is an error that can come from advancing persistence. Either this can be a +/// [`TryRecvError`], or this can be a [`ProviderError`] +#[derive(Debug, thiserror::Error)] +pub enum AdvancePersistenceError { + /// An error that can be from failing to receive a value from persistence + #[error(transparent)] + RecvError(#[from] TryRecvError), + /// A provider error + #[error(transparent)] + Provider(#[from] ProviderError), +} + +#[derive(thiserror::Error)] +#[error("Failed to insert block (hash={}, number={}, parent_hash={}): {}", + .block.hash(), + .block.number(), + .block.parent_hash(), + .kind)] +struct InsertBlockErrorDataTwo { + block: SealedBlockFor, + #[source] + kind: InsertBlockErrorKindTwo, +} + +impl std::fmt::Debug for InsertBlockErrorDataTwo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("InsertBlockError") + .field("error", &self.kind) + .field("hash", &self.block.hash()) + .field("number", &self.block.number()) + .field("parent_hash", &self.block.parent_hash()) + .field("num_txs", &self.block.body().transactions().len()) + .finish_non_exhaustive() + } +} + +impl InsertBlockErrorDataTwo { + const fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { + Self { block, kind } + } + + fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Box { + Box::new(Self::new(block, kind)) + } +} + +/// Error thrown when inserting a block failed because the block is considered invalid. +#[derive(thiserror::Error)] +#[error(transparent)] +pub struct InsertBlockErrorTwo { + inner: Box>, +} + +// === impl InsertBlockErrorTwo === + +impl InsertBlockErrorTwo { + /// Create a new `InsertInvalidBlockErrorTwo` + pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { + Self { inner: InsertBlockErrorDataTwo::boxed(block, kind) } + } + + /// Create a new `InsertInvalidBlockError` from a consensus error + pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { + Self::new(block, InsertBlockErrorKindTwo::Consensus(error)) + } + + /// Create a new `InsertInvalidBlockError` from a consensus error + pub fn sender_recovery_error(block: SealedBlockFor) -> Self { + Self::new(block, InsertBlockErrorKindTwo::SenderRecovery) + } + + /// Consumes the error and returns the block that resulted in the error + #[inline] + pub fn into_block(self) -> SealedBlockFor { + self.inner.block + } + + /// Returns the error kind + #[inline] + pub const fn kind(&self) -> &InsertBlockErrorKindTwo { + &self.inner.kind + } + + /// Returns the block that resulted in the error + #[inline] + pub const fn block(&self) -> &SealedBlockFor { + &self.inner.block + } + + /// Consumes the type and returns the block and error kind. + #[inline] + pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKindTwo) { + let inner = *self.inner; + (inner.block, inner.kind) + } +} + +impl std::fmt::Debug for InsertBlockErrorTwo { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Debug::fmt(&self.inner, f) + } +} + +/// All error variants possible when inserting a block +#[derive(Debug, thiserror::Error)] +pub enum InsertBlockErrorKindTwo { + /// Failed to recover senders for the block + #[error("failed to recover senders for block")] + SenderRecovery, + /// Block violated consensus rules. + #[error(transparent)] + Consensus(#[from] ConsensusError), + /// Block execution failed. + #[error(transparent)] + Execution(#[from] BlockExecutionError), + /// Provider error. + #[error(transparent)] + Provider(#[from] ProviderError), + /// Other errors. + #[error(transparent)] + Other(#[from] Box), +} + +impl InsertBlockErrorKindTwo { + /// Returns an [`InsertBlockValidationError`] if the error is caused by an invalid block. + /// + /// Returns an [`InsertBlockFatalError`] if the error is caused by an error that is not + /// validation related or is otherwise fatal. + /// + /// This is intended to be used to determine if we should respond `INVALID` as a response when + /// processing a new block. + pub fn ensure_validation_error( + self, + ) -> Result { + match self { + Self::SenderRecovery => Ok(InsertBlockValidationError::SenderRecovery), + Self::Consensus(err) => Ok(InsertBlockValidationError::Consensus(err)), + // other execution errors that are considered internal errors + Self::Execution(err) => { + match err { + BlockExecutionError::Validation(err) => { + Ok(InsertBlockValidationError::Validation(err)) + } + BlockExecutionError::Consensus(err) => { + Ok(InsertBlockValidationError::Consensus(err)) + } + // these are internal errors, not caused by an invalid block + BlockExecutionError::Internal(error) => { + Err(InsertBlockFatalError::BlockExecutionError(error)) + } + } + } + Self::Provider(err) => Err(InsertBlockFatalError::Provider(err)), + Self::Other(err) => Err(InternalBlockExecutionError::Other(err).into()), + } + } +} + +/// Error variants that are not caused by invalid blocks +#[derive(Debug, thiserror::Error)] +pub enum InsertBlockFatalError { + /// A provider error + #[error(transparent)] + Provider(#[from] ProviderError), + /// An internal / fatal block execution error + #[error(transparent)] + BlockExecutionError(#[from] InternalBlockExecutionError), +} + +/// Error variants that are caused by invalid blocks +#[derive(Debug, thiserror::Error)] +pub enum InsertBlockValidationError { + /// Failed to recover senders for the block + #[error("failed to recover senders for block")] + SenderRecovery, + /// Block violated consensus rules. + #[error(transparent)] + Consensus(#[from] ConsensusError), + /// Validation error, transparently wrapping [`BlockValidationError`] + #[error(transparent)] + Validation(#[from] BlockValidationError), +} + +impl InsertBlockValidationError { + /// Returns true if this is a block pre merge error. + pub const fn is_block_pre_merge(&self) -> bool { + matches!(self, Self::Validation(BlockValidationError::BlockPreMerge { .. })) + } +} diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 1db3e4a70d980..c678e290fe981 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -16,10 +16,7 @@ use alloy_rpc_types_engine::{ PayloadValidationError, }; use block_buffer::BlockBuffer; -use reth_blockchain_tree_api::{ - error::{InsertBlockErrorKindTwo, InsertBlockErrorTwo, InsertBlockFatalError}, - BlockStatus2, InsertPayloadOk2, -}; +use error::{InsertBlockErrorKindTwo, InsertBlockErrorTwo, InsertBlockFatalError}; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; @@ -78,6 +75,7 @@ use tracing::*; mod block_buffer; pub mod config; +pub mod error; mod invalid_block_hook; mod invalid_headers; mod metrics; @@ -85,7 +83,10 @@ mod persistence_state; pub mod root; mod trie_updates; -use crate::tree::{config::MIN_BLOCKS_FOR_PIPELINE_RUN, invalid_headers::InvalidHeaderCache}; +use crate::tree::{ + config::MIN_BLOCKS_FOR_PIPELINE_RUN, error::AdvancePersistenceError, + invalid_headers::InvalidHeaderCache, +}; pub use config::TreeConfig; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use persistence_state::PersistenceState; @@ -2764,16 +2765,34 @@ where } } -/// This is an error that can come from advancing persistence. Either this can be a -/// [`TryRecvError`], or this can be a [`ProviderError`] -#[derive(Debug, thiserror::Error)] -pub enum AdvancePersistenceError { - /// An error that can be from failing to receive a value from persistence - #[error(transparent)] - RecvError(#[from] TryRecvError), - /// A provider error - #[error(transparent)] - Provider(#[from] ProviderError), +/// Block inclusion can be valid, accepted, or invalid. Invalid blocks are returned as an error +/// variant. +/// +/// If we don't know the block's parent, we return `Disconnected`, as we can't claim that the block +/// is valid or not. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum BlockStatus2 { + /// The block is valid and block extends canonical chain. + Valid, + /// The block may be valid and has an unknown missing ancestor. + Disconnected { + /// Current canonical head. + head: BlockNumHash, + /// The lowest ancestor block that is not connected to the canonical chain. + missing_ancestor: BlockNumHash, + }, +} + +/// How a payload was inserted if it was valid. +/// +/// If the payload was valid, but has already been seen, [`InsertPayloadOk2::AlreadySeen(_)`] is +/// returned, otherwise [`InsertPayloadOk2::Inserted(_)`] is returned. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum InsertPayloadOk2 { + /// The payload was valid, but we have already seen it. + AlreadySeen(BlockStatus2), + /// The payload was valid and inserted into the tree. + Inserted(BlockStatus2), } #[cfg(test)] From 4d3cd2f2485d3be66c9253ee292d8f68a61c785b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 14:40:18 +0100 Subject: [PATCH 025/113] chore: rm blockchaintree dep from errors (#13730) --- Cargo.lock | 1 - crates/errors/Cargo.toml | 1 - crates/errors/src/error.rs | 14 +------------- crates/errors/src/lib.rs | 1 - 4 files changed, 1 insertion(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 23c3eabb2bd22..4eb6c84ece021 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7271,7 +7271,6 @@ dependencies = [ name = "reth-errors" version = "1.1.5" dependencies = [ - "reth-blockchain-tree-api", "reth-consensus", "reth-execution-errors", "reth-fs-util", diff --git a/crates/errors/Cargo.toml b/crates/errors/Cargo.toml index bb56a8bace560..11ecd708449d6 100644 --- a/crates/errors/Cargo.toml +++ b/crates/errors/Cargo.toml @@ -11,7 +11,6 @@ repository.workspace = true workspace = true [dependencies] -reth-blockchain-tree-api.workspace = true reth-consensus.workspace = true reth-execution-errors.workspace = true reth-fs-util.workspace = true diff --git a/crates/errors/src/error.rs b/crates/errors/src/error.rs index 2d97572f529a6..5141a7457f459 100644 --- a/crates/errors/src/error.rs +++ b/crates/errors/src/error.rs @@ -1,4 +1,3 @@ -use reth_blockchain_tree_api::error::{BlockchainTreeError, CanonicalError}; use reth_consensus::ConsensusError; use reth_execution_errors::BlockExecutionError; use reth_fs_util::FsPathError; @@ -31,10 +30,6 @@ pub enum RethError { #[error(transparent)] Provider(#[from] ProviderError), - /// Canonical errors encountered. - #[error(transparent)] - Canonical(#[from] CanonicalError), - /// Any other error. #[error(transparent)] Other(Box), @@ -55,12 +50,6 @@ impl RethError { } } -impl From for RethError { - fn from(error: BlockchainTreeError) -> Self { - Self::Canonical(CanonicalError::BlockchainTree(error)) - } -} - impl From for RethError { fn from(err: FsPathError) -> Self { Self::other(err) @@ -78,10 +67,9 @@ mod size_asserts { }; } - static_assert_size!(RethError, 64); + static_assert_size!(RethError, 56); static_assert_size!(BlockExecutionError, 56); static_assert_size!(ConsensusError, 48); static_assert_size!(DatabaseError, 32); static_assert_size!(ProviderError, 48); - static_assert_size!(CanonicalError, 56); } diff --git a/crates/errors/src/lib.rs b/crates/errors/src/lib.rs index 9dc0ce0ca5bc4..fc464eb98cbd4 100644 --- a/crates/errors/src/lib.rs +++ b/crates/errors/src/lib.rs @@ -15,7 +15,6 @@ mod error; pub use error::{RethError, RethResult}; -pub use reth_blockchain_tree_api::error::{BlockchainTreeError, CanonicalError}; pub use reth_consensus::ConsensusError; pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; pub use reth_storage_errors::{ From 53ccb5d46577426a41a4d451a77e6f01c6949939 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 14:56:15 +0100 Subject: [PATCH 026/113] chore: rm blockchaintree api crate (#13731) --- Cargo.lock | 15 - Cargo.toml | 2 - crates/blockchain-tree-api/Cargo.toml | 26 -- crates/blockchain-tree-api/src/error.rs | 530 ------------------------ crates/blockchain-tree-api/src/lib.rs | 372 ----------------- 5 files changed, 945 deletions(-) delete mode 100644 crates/blockchain-tree-api/Cargo.toml delete mode 100644 crates/blockchain-tree-api/src/error.rs delete mode 100644 crates/blockchain-tree-api/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 4eb6c84ece021..f83bda895de68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6517,21 +6517,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-blockchain-tree-api" -version = "1.1.5" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-primitives", - "reth-consensus", - "reth-execution-errors", - "reth-primitives", - "reth-primitives-traits", - "reth-storage-errors", - "thiserror 2.0.9", -] - [[package]] name = "reth-chain-state" version = "1.1.5" diff --git a/Cargo.toml b/Cargo.toml index 890b79a14198b..16933fc7db1b0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,6 @@ exclude = [".github/"] members = [ "bin/reth-bench/", "bin/reth/", - "crates/blockchain-tree-api/", "crates/chain-state/", "crates/chainspec/", "crates/cli/cli/", @@ -303,7 +302,6 @@ op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-bench = { path = "bin/reth-bench" } -reth-blockchain-tree-api = { path = "crates/blockchain-tree-api" } reth-chain-state = { path = "crates/chain-state" } reth-chainspec = { path = "crates/chainspec", default-features = false } reth-cli = { path = "crates/cli/cli" } diff --git a/crates/blockchain-tree-api/Cargo.toml b/crates/blockchain-tree-api/Cargo.toml deleted file mode 100644 index 83ae378090b60..0000000000000 --- a/crates/blockchain-tree-api/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "reth-blockchain-tree-api" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -reth-consensus.workspace = true -reth-execution-errors.workspace = true -reth-primitives.workspace = true -reth-primitives-traits.workspace = true -reth-storage-errors.workspace = true - -# alloy -alloy-consensus.workspace = true -alloy-primitives.workspace = true -alloy-eips.workspace = true - -# misc -thiserror.workspace = true diff --git a/crates/blockchain-tree-api/src/error.rs b/crates/blockchain-tree-api/src/error.rs deleted file mode 100644 index ddd7cea7993c8..0000000000000 --- a/crates/blockchain-tree-api/src/error.rs +++ /dev/null @@ -1,530 +0,0 @@ -//! Error handling for the blockchain tree - -use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_consensus::ConsensusError; -use reth_execution_errors::{ - BlockExecutionError, BlockValidationError, InternalBlockExecutionError, -}; -use reth_primitives::{SealedBlock, SealedBlockFor}; -use reth_primitives_traits::{Block, BlockBody}; -pub use reth_storage_errors::provider::ProviderError; - -/// Various error cases that can occur when a block violates tree assumptions. -#[derive(Debug, Clone, Copy, thiserror::Error, Eq, PartialEq)] -pub enum BlockchainTreeError { - /// Thrown if the block number is lower than the last finalized block number. - #[error("block number is lower than the last finalized block number #{last_finalized}")] - PendingBlockIsFinalized { - /// The block number of the last finalized block. - last_finalized: BlockNumber, - }, - /// Thrown if no side chain could be found for the block. - #[error("chainId can't be found in BlockchainTree with internal index {chain_id}")] - BlockSideChainIdConsistency { - /// The internal identifier for the side chain. - chain_id: u64, - }, - /// Thrown if a canonical chain header cannot be found. - #[error("canonical chain header {block_hash} can't be found")] - CanonicalChain { - /// The block hash of the missing canonical chain header. - block_hash: BlockHash, - }, - /// Thrown if a block number cannot be found in the blockchain tree chain. - #[error("block number #{block_number} not found in blockchain tree chain")] - BlockNumberNotFoundInChain { - /// The block number that could not be found. - block_number: BlockNumber, - }, - /// Thrown if a block hash cannot be found in the blockchain tree chain. - #[error("block hash {block_hash} not found in blockchain tree chain")] - BlockHashNotFoundInChain { - /// The block hash that could not be found. - block_hash: BlockHash, - }, - /// Thrown if the block failed to buffer - #[error("block with hash {block_hash} failed to buffer")] - BlockBufferingFailed { - /// The block hash of the block that failed to buffer. - block_hash: BlockHash, - }, - /// Thrown when trying to access genesis parent. - #[error("genesis block has no parent")] - GenesisBlockHasNoParent, -} - -/// Canonical Errors -#[derive(thiserror::Error, Debug, Clone)] -pub enum CanonicalError { - /// Error originating from validation operations. - #[error(transparent)] - Validation(#[from] BlockValidationError), - /// Error originating from blockchain tree operations. - #[error(transparent)] - BlockchainTree(#[from] BlockchainTreeError), - /// Error originating from a provider operation. - #[error(transparent)] - Provider(#[from] ProviderError), - /// Error indicating a transaction reverted during execution. - #[error("transaction error on revert: {0}")] - CanonicalRevert(String), - /// Error indicating a transaction failed to commit during execution. - #[error("transaction error on commit: {0}")] - CanonicalCommit(String), - /// Error indicating that a previous optimistic sync target was re-orged - #[error("optimistic sync target was re-orged at block: {0}")] - OptimisticTargetRevert(BlockNumber), -} - -impl CanonicalError { - /// Returns `true` if the error is fatal. - pub const fn is_fatal(&self) -> bool { - matches!(self, Self::CanonicalCommit(_) | Self::CanonicalRevert(_)) - } - - /// Returns `true` if the underlying error matches - /// [`BlockchainTreeError::BlockHashNotFoundInChain`]. - pub const fn is_block_hash_not_found(&self) -> bool { - matches!(self, Self::BlockchainTree(BlockchainTreeError::BlockHashNotFoundInChain { .. })) - } - - /// Returns `Some(BlockNumber)` if the underlying error matches - /// [`CanonicalError::OptimisticTargetRevert`]. - pub const fn optimistic_revert_block_number(&self) -> Option { - match self { - Self::OptimisticTargetRevert(block_number) => Some(*block_number), - _ => None, - } - } -} - -/// Error thrown when inserting a block failed because the block is considered invalid. -#[derive(thiserror::Error)] -#[error(transparent)] -pub struct InsertBlockError { - inner: Box, -} - -// === impl InsertBlockError === - -impl InsertBlockError { - /// Create a new `InsertInvalidBlockError` - pub fn new(block: SealedBlock, kind: InsertBlockErrorKind) -> Self { - Self { inner: InsertBlockErrorData::boxed(block, kind) } - } - - /// Create a new `InsertInvalidBlockError` from a tree error - pub fn tree_error(error: BlockchainTreeError, block: SealedBlock) -> Self { - Self::new(block, InsertBlockErrorKind::Tree(error)) - } - - /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn consensus_error(error: ConsensusError, block: SealedBlock) -> Self { - Self::new(block, InsertBlockErrorKind::Consensus(error)) - } - - /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn sender_recovery_error(block: SealedBlock) -> Self { - Self::new(block, InsertBlockErrorKind::SenderRecovery) - } - - /// Create a new `InsertInvalidBlockError` from an execution error - pub fn execution_error(error: BlockExecutionError, block: SealedBlock) -> Self { - Self::new(block, InsertBlockErrorKind::Execution(error)) - } - - /// Consumes the error and returns the block that resulted in the error - #[inline] - pub fn into_block(self) -> SealedBlock { - self.inner.block - } - - /// Returns the error kind - #[inline] - pub const fn kind(&self) -> &InsertBlockErrorKind { - &self.inner.kind - } - - /// Returns the block that resulted in the error - #[inline] - pub const fn block(&self) -> &SealedBlock { - &self.inner.block - } - - /// Consumes the type and returns the block and error kind. - #[inline] - pub fn split(self) -> (SealedBlock, InsertBlockErrorKind) { - let inner = *self.inner; - (inner.block, inner.kind) - } -} - -impl std::fmt::Debug for InsertBlockError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::fmt::Debug::fmt(&self.inner, f) - } -} - -#[derive(thiserror::Error, Debug)] -#[error("Failed to insert block (hash={}, number={}, parent_hash={}): {kind}", - .block.hash(), - .block.number, - .block.parent_hash)] -struct InsertBlockErrorData { - block: SealedBlock, - #[source] - kind: InsertBlockErrorKind, -} - -impl InsertBlockErrorData { - const fn new(block: SealedBlock, kind: InsertBlockErrorKind) -> Self { - Self { block, kind } - } - - fn boxed(block: SealedBlock, kind: InsertBlockErrorKind) -> Box { - Box::new(Self::new(block, kind)) - } -} - -#[derive(thiserror::Error)] -#[error("Failed to insert block (hash={}, number={}, parent_hash={}): {}", - .block.hash(), - .block.number(), - .block.parent_hash(), - .kind)] -struct InsertBlockErrorDataTwo { - block: SealedBlockFor, - #[source] - kind: InsertBlockErrorKindTwo, -} - -impl std::fmt::Debug for InsertBlockErrorDataTwo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("InsertBlockError") - .field("error", &self.kind) - .field("hash", &self.block.hash()) - .field("number", &self.block.number()) - .field("parent_hash", &self.block.parent_hash()) - .field("num_txs", &self.block.body().transactions().len()) - .finish_non_exhaustive() - } -} - -impl InsertBlockErrorDataTwo { - const fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { - Self { block, kind } - } - - fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Box { - Box::new(Self::new(block, kind)) - } -} - -/// Error thrown when inserting a block failed because the block is considered invalid. -#[derive(thiserror::Error)] -#[error(transparent)] -pub struct InsertBlockErrorTwo { - inner: Box>, -} - -// === impl InsertBlockErrorTwo === - -impl InsertBlockErrorTwo { - /// Create a new `InsertInvalidBlockErrorTwo` - pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { - Self { inner: InsertBlockErrorDataTwo::boxed(block, kind) } - } - - /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { - Self::new(block, InsertBlockErrorKindTwo::Consensus(error)) - } - - /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn sender_recovery_error(block: SealedBlockFor) -> Self { - Self::new(block, InsertBlockErrorKindTwo::SenderRecovery) - } - - /// Create a new `InsertInvalidBlockError` from an execution error - pub fn execution_error(error: BlockExecutionError, block: SealedBlockFor) -> Self { - Self::new(block, InsertBlockErrorKindTwo::Execution(error)) - } - - /// Consumes the error and returns the block that resulted in the error - #[inline] - pub fn into_block(self) -> SealedBlockFor { - self.inner.block - } - - /// Returns the error kind - #[inline] - pub const fn kind(&self) -> &InsertBlockErrorKindTwo { - &self.inner.kind - } - - /// Returns the block that resulted in the error - #[inline] - pub const fn block(&self) -> &SealedBlockFor { - &self.inner.block - } - - /// Consumes the type and returns the block and error kind. - #[inline] - pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKindTwo) { - let inner = *self.inner; - (inner.block, inner.kind) - } -} - -impl std::fmt::Debug for InsertBlockErrorTwo { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::fmt::Debug::fmt(&self.inner, f) - } -} - -/// All error variants possible when inserting a block -#[derive(Debug, thiserror::Error)] -pub enum InsertBlockErrorKindTwo { - /// Failed to recover senders for the block - #[error("failed to recover senders for block")] - SenderRecovery, - /// Block violated consensus rules. - #[error(transparent)] - Consensus(#[from] ConsensusError), - /// Block execution failed. - #[error(transparent)] - Execution(#[from] BlockExecutionError), - /// Provider error. - #[error(transparent)] - Provider(#[from] ProviderError), - /// Other errors. - #[error(transparent)] - Other(#[from] Box), -} - -impl InsertBlockErrorKindTwo { - /// Returns an [`InsertBlockValidationError`] if the error is caused by an invalid block. - /// - /// Returns an [`InsertBlockFatalError`] if the error is caused by an error that is not - /// validation related or is otherwise fatal. - /// - /// This is intended to be used to determine if we should respond `INVALID` as a response when - /// processing a new block. - pub fn ensure_validation_error( - self, - ) -> Result { - match self { - Self::SenderRecovery => Ok(InsertBlockValidationError::SenderRecovery), - Self::Consensus(err) => Ok(InsertBlockValidationError::Consensus(err)), - // other execution errors that are considered internal errors - Self::Execution(err) => { - match err { - BlockExecutionError::Validation(err) => { - Ok(InsertBlockValidationError::Validation(err)) - } - BlockExecutionError::Consensus(err) => { - Ok(InsertBlockValidationError::Consensus(err)) - } - // these are internal errors, not caused by an invalid block - BlockExecutionError::Internal(error) => { - Err(InsertBlockFatalError::BlockExecutionError(error)) - } - } - } - Self::Provider(err) => Err(InsertBlockFatalError::Provider(err)), - Self::Other(err) => Err(InternalBlockExecutionError::Other(err).into()), - } - } -} - -/// Error variants that are not caused by invalid blocks -#[derive(Debug, thiserror::Error)] -pub enum InsertBlockFatalError { - /// A provider error - #[error(transparent)] - Provider(#[from] ProviderError), - /// An internal / fatal block execution error - #[error(transparent)] - BlockExecutionError(#[from] InternalBlockExecutionError), -} - -/// Error variants that are caused by invalid blocks -#[derive(Debug, thiserror::Error)] -pub enum InsertBlockValidationError { - /// Failed to recover senders for the block - #[error("failed to recover senders for block")] - SenderRecovery, - /// Block violated consensus rules. - #[error(transparent)] - Consensus(#[from] ConsensusError), - /// Validation error, transparently wrapping [`BlockValidationError`] - #[error(transparent)] - Validation(#[from] BlockValidationError), -} - -impl InsertBlockValidationError { - /// Returns true if this is a block pre merge error. - pub const fn is_block_pre_merge(&self) -> bool { - matches!(self, Self::Validation(BlockValidationError::BlockPreMerge { .. })) - } -} - -/// All error variants possible when inserting a block -#[derive(Debug, thiserror::Error)] -pub enum InsertBlockErrorKind { - /// Failed to recover senders for the block - #[error("failed to recover senders for block")] - SenderRecovery, - /// Block violated consensus rules. - #[error(transparent)] - Consensus(#[from] ConsensusError), - /// Block execution failed. - #[error(transparent)] - Execution(#[from] BlockExecutionError), - /// Block violated tree invariants. - #[error(transparent)] - Tree(#[from] BlockchainTreeError), - /// Provider error. - #[error(transparent)] - Provider(#[from] ProviderError), - /// An internal error occurred, like interacting with the database. - #[error(transparent)] - Internal(#[from] Box), - /// Canonical error. - #[error(transparent)] - Canonical(#[from] CanonicalError), -} - -impl InsertBlockErrorKind { - /// Returns true if the error is a tree error - pub const fn is_tree_error(&self) -> bool { - matches!(self, Self::Tree(_)) - } - - /// Returns true if the error is a consensus error - pub const fn is_consensus_error(&self) -> bool { - matches!(self, Self::Consensus(_)) - } - - /// Returns true if this error is a state root error - pub const fn is_state_root_error(&self) -> bool { - // we need to get the state root errors inside of the different variant branches - match self { - Self::Execution(err) => { - matches!( - err, - BlockExecutionError::Validation(BlockValidationError::StateRoot { .. }) - ) - } - Self::Canonical(err) => { - matches!( - err, - CanonicalError::Validation(BlockValidationError::StateRoot { .. }) | - CanonicalError::Provider( - ProviderError::StateRootMismatch(_) | - ProviderError::UnwindStateRootMismatch(_) - ) - ) - } - Self::Provider(err) => { - matches!( - err, - ProviderError::StateRootMismatch(_) | ProviderError::UnwindStateRootMismatch(_) - ) - } - _ => false, - } - } - - /// Returns true if the error is caused by an invalid block - /// - /// This is intended to be used to determine if the block should be marked as invalid. - #[allow(clippy::match_same_arms)] - pub const fn is_invalid_block(&self) -> bool { - match self { - Self::SenderRecovery | Self::Consensus(_) => true, - // other execution errors that are considered internal errors - Self::Execution(err) => { - match err { - BlockExecutionError::Validation(_) | BlockExecutionError::Consensus(_) => { - // this is caused by an invalid block - true - } - // these are internal errors, not caused by an invalid block - BlockExecutionError::Internal(_) => false, - } - } - Self::Tree(err) => { - match err { - BlockchainTreeError::PendingBlockIsFinalized { .. } => { - // the block's number is lower than the finalized block's number - true - } - BlockchainTreeError::BlockSideChainIdConsistency { .. } | - BlockchainTreeError::CanonicalChain { .. } | - BlockchainTreeError::BlockNumberNotFoundInChain { .. } | - BlockchainTreeError::BlockHashNotFoundInChain { .. } | - BlockchainTreeError::BlockBufferingFailed { .. } | - BlockchainTreeError::GenesisBlockHasNoParent => false, - } - } - Self::Provider(_) | Self::Internal(_) => { - // any other error, such as database errors, are considered internal errors - false - } - Self::Canonical(err) => match err { - CanonicalError::BlockchainTree(_) | - CanonicalError::CanonicalCommit(_) | - CanonicalError::CanonicalRevert(_) | - CanonicalError::OptimisticTargetRevert(_) | - CanonicalError::Provider(_) => false, - CanonicalError::Validation(_) => true, - }, - } - } - - /// Returns true if this is a block pre merge error. - pub const fn is_block_pre_merge(&self) -> bool { - matches!( - self, - Self::Execution(BlockExecutionError::Validation( - BlockValidationError::BlockPreMerge { .. } - )) - ) - } - - /// Returns true if the error is an execution error - pub const fn is_execution_error(&self) -> bool { - matches!(self, Self::Execution(_)) - } - - /// Returns true if the error is an internal error - pub const fn is_internal(&self) -> bool { - matches!(self, Self::Internal(_)) - } - - /// Returns the error if it is a tree error - pub const fn as_tree_error(&self) -> Option { - match self { - Self::Tree(err) => Some(*err), - _ => None, - } - } - - /// Returns the error if it is a consensus error - pub const fn as_consensus_error(&self) -> Option<&ConsensusError> { - match self { - Self::Consensus(err) => Some(err), - _ => None, - } - } - - /// Returns the error if it is an execution error - pub const fn as_execution_error(&self) -> Option<&BlockExecutionError> { - match self { - Self::Execution(err) => Some(err), - _ => None, - } - } -} diff --git a/crates/blockchain-tree-api/src/lib.rs b/crates/blockchain-tree-api/src/lib.rs deleted file mode 100644 index 7e1d0d714c149..0000000000000 --- a/crates/blockchain-tree-api/src/lib.rs +++ /dev/null @@ -1,372 +0,0 @@ -//! Interfaces and types for interacting with the blockchain tree. -#![doc( - html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", - html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", - issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -use self::error::CanonicalError; -use crate::error::InsertBlockError; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; -use reth_storage_errors::provider::{ProviderError, ProviderResult}; -use std::collections::BTreeMap; - -pub mod error; - -/// * [`BlockchainTreeEngine::insert_block`]: Connect block to chain, execute it and if valid insert -/// block inside tree. -/// * [`BlockchainTreeEngine::finalize_block`]: Remove chains that join to now finalized block, as -/// chain becomes invalid. -/// * [`BlockchainTreeEngine::make_canonical`]: Check if we have the hash of block that we want to -/// finalize and commit it to db. If we don't have the block, syncing should start to fetch the -/// blocks from p2p. Do reorg in tables if canonical chain if needed. -pub trait BlockchainTreeEngine: BlockchainTreeViewer + Send + Sync { - /// Recover senders and call [`BlockchainTreeEngine::insert_block`]. - /// - /// This will recover all senders of the transactions in the block first, and then try to insert - /// the block. - fn insert_block_without_senders( - &self, - block: SealedBlock, - validation_kind: BlockValidationKind, - ) -> Result { - match block.try_seal_with_senders() { - Ok(block) => self.insert_block(block, validation_kind), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - /// Recover senders and call [`BlockchainTreeEngine::buffer_block`]. - /// - /// This will recover all senders of the transactions in the block first, and then try to buffer - /// the block. - fn buffer_block_without_senders(&self, block: SealedBlock) -> Result<(), InsertBlockError> { - match block.try_seal_with_senders() { - Ok(block) => self.buffer_block(block), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - /// Buffer block with senders - fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError>; - - /// Inserts block with senders - /// - /// The `validation_kind` parameter controls which validation checks are performed. - /// - /// Caution: If the block was received from the consensus layer, this should always be called - /// with [`BlockValidationKind::Exhaustive`] to validate the state root, if possible to adhere - /// to the engine API spec. - fn insert_block( - &self, - block: SealedBlockWithSenders, - validation_kind: BlockValidationKind, - ) -> Result; - - /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. - fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()>; - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - /// - /// # Note - /// - /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using - /// [`BlockchainTreeEngine::finalize_block`]). - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError>; - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered - /// blocks before the tip. - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError>; - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError>; - - /// Make a block and its parent chain part of the canonical chain by committing it to the - /// database. - /// - /// # Note - /// - /// This unwinds the database if necessary, i.e. if parts of the canonical chain have been - /// re-orged. - /// - /// # Returns - /// - /// Returns `Ok` if the blocks were canonicalized, or if the blocks were already canonical. - fn make_canonical(&self, block_hash: BlockHash) -> Result; -} - -/// Represents the kind of validation that should be performed when inserting a block. -/// -/// The motivation for this is that the engine API spec requires that block's state root is -/// validated when received from the CL. -/// -/// This step is very expensive due to how changesets are stored in the database, so we want to -/// avoid doing it if not necessary. Blocks can also originate from the network where this step is -/// not required. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] -pub enum BlockValidationKind { - /// All validation checks that can be performed. - /// - /// This includes validating the state root, if possible. - /// - /// Note: This should always be used when inserting blocks that originate from the consensus - /// layer. - #[default] - Exhaustive, - /// Perform all validation checks except for state root validation. - SkipStateRootValidation, -} - -impl BlockValidationKind { - /// Returns true if the state root should be validated if possible. - pub const fn is_exhaustive(&self) -> bool { - matches!(self, Self::Exhaustive) - } -} - -impl std::fmt::Display for BlockValidationKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Exhaustive => { - write!(f, "Exhaustive") - } - Self::SkipStateRootValidation => { - write!(f, "SkipStateRootValidation") - } - } - } -} - -/// All possible outcomes of a canonicalization attempt of [`BlockchainTreeEngine::make_canonical`]. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum CanonicalOutcome { - /// The block is already canonical. - AlreadyCanonical { - /// Block number and hash of current head. - head: BlockNumHash, - /// The corresponding [`SealedHeader`] that was attempted to be made a current head and - /// is already canonical. - header: SealedHeader, - }, - /// Committed the block to the database. - Committed { - /// The new corresponding canonical head - head: SealedHeader, - }, -} - -impl CanonicalOutcome { - /// Returns the header of the block that was made canonical. - pub const fn header(&self) -> &SealedHeader { - match self { - Self::AlreadyCanonical { header, .. } => header, - Self::Committed { head } => head, - } - } - - /// Consumes the outcome and returns the header of the block that was made canonical. - pub fn into_header(self) -> SealedHeader { - match self { - Self::AlreadyCanonical { header, .. } => header, - Self::Committed { head } => head, - } - } - - /// Returns true if the block was already canonical. - pub const fn is_already_canonical(&self) -> bool { - matches!(self, Self::AlreadyCanonical { .. }) - } -} - -/// Block inclusion can be valid, accepted, or invalid. Invalid blocks are returned as an error -/// variant. -/// -/// If we don't know the block's parent, we return `Disconnected`, as we can't claim that the block -/// is valid or not. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum BlockStatus2 { - /// The block is valid and block extends canonical chain. - Valid, - /// The block may be valid and has an unknown missing ancestor. - Disconnected { - /// Current canonical head. - head: BlockNumHash, - /// The lowest ancestor block that is not connected to the canonical chain. - missing_ancestor: BlockNumHash, - }, -} - -/// How a payload was inserted if it was valid. -/// -/// If the payload was valid, but has already been seen, [`InsertPayloadOk2::AlreadySeen(_)`] is -/// returned, otherwise [`InsertPayloadOk2::Inserted(_)`] is returned. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum InsertPayloadOk2 { - /// The payload was valid, but we have already seen it. - AlreadySeen(BlockStatus2), - /// The payload was valid and inserted into the tree. - Inserted(BlockStatus2), -} - -/// From Engine API spec, block inclusion can be valid, accepted or invalid. -/// Invalid case is already covered by error, but we need to make distinction -/// between valid blocks that extend canonical chain and the ones that fork off -/// into side chains (see [`BlockAttachment`]). If we don't know the block -/// parent we are returning Disconnected status as we can't make a claim if -/// block is valid or not. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum BlockStatus { - /// If block is valid and block extends canonical chain. - /// In `BlockchainTree` terms, it forks off canonical tip. - Valid(BlockAttachment), - /// If block is valid and block forks off canonical chain. - /// If blocks is not connected to canonical chain. - Disconnected { - /// Current canonical head. - head: BlockNumHash, - /// The lowest ancestor block that is not connected to the canonical chain. - missing_ancestor: BlockNumHash, - }, -} - -/// Represents what kind of block is being executed and validated. -/// -/// This is required to: -/// - differentiate whether trie state updates should be cached. -/// - inform other -/// -/// This is required because the state root check can only be performed if the targeted block can be -/// traced back to the canonical __head__. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum BlockAttachment { - /// The `block` is canonical or a descendant of the canonical head. - /// ([`head..(block.parent)*,block`]) - Canonical, - /// The block can be traced back to an ancestor of the canonical head: a historical block, but - /// this chain does __not__ include the canonical head. - HistoricalFork, -} - -impl BlockAttachment { - /// Returns `true` if the block is canonical or a descendant of the canonical head. - #[inline] - pub const fn is_canonical(&self) -> bool { - matches!(self, Self::Canonical) - } -} - -/// How a payload was inserted if it was valid. -/// -/// If the payload was valid, but has already been seen, [`InsertPayloadOk::AlreadySeen(_)`] is -/// returned, otherwise [`InsertPayloadOk::Inserted(_)`] is returned. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum InsertPayloadOk { - /// The payload was valid, but we have already seen it. - AlreadySeen(BlockStatus), - /// The payload was valid and inserted into the tree. - Inserted(BlockStatus), -} - -/// Allows read only functionality on the blockchain tree. -/// -/// Tree contains all blocks that are not canonical that can potentially be included -/// as canonical chain. For better explanation we can group blocks into four groups: -/// * Canonical chain blocks -/// * Side chain blocks. Side chain are block that forks from canonical chain but not its tip. -/// * Pending blocks that extend the canonical chain but are not yet included. -/// * Future pending blocks that extend the pending blocks. -pub trait BlockchainTreeViewer: Send + Sync { - /// Returns the header with matching hash from the tree, if it exists. - /// - /// Caution: This will not return headers from the canonical chain. - fn header_by_hash(&self, hash: BlockHash) -> Option; - - /// Returns the block with matching hash from the tree, if it exists. - /// - /// Caution: This will not return blocks from the canonical chain or buffered blocks that are - /// disconnected from the canonical chain. - fn block_by_hash(&self, hash: BlockHash) -> Option; - - /// Returns the block with matching hash from the tree, if it exists. - /// - /// Caution: This will not return blocks from the canonical chain or buffered blocks that are - /// disconnected from the canonical chain. - fn block_with_senders_by_hash(&self, hash: BlockHash) -> Option; - - /// Returns the _buffered_ (disconnected) header with matching hash from the internal buffer if - /// it exists. - /// - /// Caution: Unlike [`Self::block_by_hash`] this will only return headers that are currently - /// disconnected from the canonical chain. - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option; - - /// Returns true if the tree contains the block with matching hash. - fn contains(&self, hash: BlockHash) -> bool { - self.block_by_hash(hash).is_some() - } - - /// Return whether or not the block is known and in the canonical chain. - fn is_canonical(&self, hash: BlockHash) -> Result; - - /// Given the hash of a block, this checks the buffered blocks for the lowest ancestor in the - /// buffer. - /// - /// If there is a buffered block with the given hash, this returns the block itself. - fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option; - - /// Return `BlockchainTree` best known canonical chain tip (`BlockHash`, `BlockNumber`) - fn canonical_tip(&self) -> BlockNumHash; - - /// Return block number and hash that extends the canonical chain tip by one. - /// - /// If there is no such block, this returns `None`. - fn pending_block_num_hash(&self) -> Option; - - /// Returns the pending block if there is one. - fn pending_block(&self) -> Option { - self.block_by_hash(self.pending_block_num_hash()?.hash) - } - - /// Returns the pending block if there is one. - fn pending_block_with_senders(&self) -> Option { - self.block_with_senders_by_hash(self.pending_block_num_hash()?.hash) - } - - /// Returns the pending block and its receipts in one call. - /// - /// This exists to prevent a potential data race if the pending block changes in between - /// [`Self::pending_block`] and [`Self::pending_receipts`] calls. - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)>; - - /// Returns the pending receipts if there is one. - fn pending_receipts(&self) -> Option> { - self.receipts_by_block_hash(self.pending_block_num_hash()?.hash) - } - - /// Returns the pending receipts if there is one. - fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option>; - - /// Returns the pending block if there is one. - fn pending_header(&self) -> Option { - self.header_by_hash(self.pending_block_num_hash()?.hash) - } -} From 9d51260fbcdd8249d6e0855250a27f416ac1f6a2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 15:00:32 +0100 Subject: [PATCH 027/113] chore: rename error types (#13732) --- crates/engine/tree/src/tree/error.rs | 36 +++++++------- crates/engine/tree/src/tree/mod.rs | 71 +++++++++++++--------------- 2 files changed, 52 insertions(+), 55 deletions(-) diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs index 025655315f1f7..54c274abbf68b 100644 --- a/crates/engine/tree/src/tree/error.rs +++ b/crates/engine/tree/src/tree/error.rs @@ -26,13 +26,13 @@ pub enum AdvancePersistenceError { .block.number(), .block.parent_hash(), .kind)] -struct InsertBlockErrorDataTwo { +struct InsertBlockErrorData { block: SealedBlockFor, #[source] - kind: InsertBlockErrorKindTwo, + kind: InsertBlockErrorKind, } -impl std::fmt::Debug for InsertBlockErrorDataTwo { +impl std::fmt::Debug for InsertBlockErrorData { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("InsertBlockError") .field("error", &self.kind) @@ -44,12 +44,12 @@ impl std::fmt::Debug for InsertBlockErrorDataTwo { } } -impl InsertBlockErrorDataTwo { - const fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { +impl InsertBlockErrorData { + const fn new(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Self { Self { block, kind } } - fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Box { + fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Box { Box::new(Self::new(block, kind)) } } @@ -57,26 +57,26 @@ impl InsertBlockErrorDataTwo { /// Error thrown when inserting a block failed because the block is considered invalid. #[derive(thiserror::Error)] #[error(transparent)] -pub struct InsertBlockErrorTwo { - inner: Box>, +pub struct InsertBlockError { + inner: Box>, } // === impl InsertBlockErrorTwo === -impl InsertBlockErrorTwo { +impl InsertBlockError { /// Create a new `InsertInvalidBlockErrorTwo` - pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKindTwo) -> Self { - Self { inner: InsertBlockErrorDataTwo::boxed(block, kind) } + pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Self { + Self { inner: InsertBlockErrorData::boxed(block, kind) } } /// Create a new `InsertInvalidBlockError` from a consensus error pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { - Self::new(block, InsertBlockErrorKindTwo::Consensus(error)) + Self::new(block, InsertBlockErrorKind::Consensus(error)) } /// Create a new `InsertInvalidBlockError` from a consensus error pub fn sender_recovery_error(block: SealedBlockFor) -> Self { - Self::new(block, InsertBlockErrorKindTwo::SenderRecovery) + Self::new(block, InsertBlockErrorKind::SenderRecovery) } /// Consumes the error and returns the block that resulted in the error @@ -87,7 +87,7 @@ impl InsertBlockErrorTwo { /// Returns the error kind #[inline] - pub const fn kind(&self) -> &InsertBlockErrorKindTwo { + pub const fn kind(&self) -> &InsertBlockErrorKind { &self.inner.kind } @@ -99,13 +99,13 @@ impl InsertBlockErrorTwo { /// Consumes the type and returns the block and error kind. #[inline] - pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKindTwo) { + pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKind) { let inner = *self.inner; (inner.block, inner.kind) } } -impl std::fmt::Debug for InsertBlockErrorTwo { +impl std::fmt::Debug for InsertBlockError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.inner, f) } @@ -113,7 +113,7 @@ impl std::fmt::Debug for InsertBlockErrorTwo { /// All error variants possible when inserting a block #[derive(Debug, thiserror::Error)] -pub enum InsertBlockErrorKindTwo { +pub enum InsertBlockErrorKind { /// Failed to recover senders for the block #[error("failed to recover senders for block")] SenderRecovery, @@ -131,7 +131,7 @@ pub enum InsertBlockErrorKindTwo { Other(#[from] Box), } -impl InsertBlockErrorKindTwo { +impl InsertBlockErrorKind { /// Returns an [`InsertBlockValidationError`] if the error is caused by an invalid block. /// /// Returns an [`InsertBlockFatalError`] if the error is caused by an error that is not diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index c678e290fe981..c09cf9b31c247 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -16,7 +16,7 @@ use alloy_rpc_types_engine::{ PayloadValidationError, }; use block_buffer::BlockBuffer; -use error::{InsertBlockErrorKindTwo, InsertBlockErrorTwo, InsertBlockFatalError}; +use error::{InsertBlockError, InsertBlockErrorKind, InsertBlockFatalError}; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, MemoryOverlayStateProvider, NewCanonicalChain, }; @@ -846,17 +846,17 @@ where match self.insert_block_without_senders(block) { Ok(status) => { let status = match status { - InsertPayloadOk2::Inserted(BlockStatus2::Valid) => { + InsertPayloadOk::Inserted(BlockStatus::Valid) => { latest_valid_hash = Some(block_hash); self.try_connect_buffered_blocks(num_hash)?; PayloadStatusEnum::Valid } - InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid) => { + InsertPayloadOk::AlreadySeen(BlockStatus::Valid) => { latest_valid_hash = Some(block_hash); PayloadStatusEnum::Valid } - InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { .. }) | - InsertPayloadOk2::AlreadySeen(BlockStatus2::Disconnected { .. }) => { + InsertPayloadOk::Inserted(BlockStatus::Disconnected { .. }) | + InsertPayloadOk::AlreadySeen(BlockStatus::Disconnected { .. }) => { // not known to be invalid, but we don't know anything else PayloadStatusEnum::Syncing } @@ -1839,7 +1839,7 @@ where Ok(res) => { debug!(target: "engine::tree", child =?child_num_hash, ?res, "connected buffered block"); if self.is_sync_target_head(child_num_hash.hash) && - matches!(res, InsertPayloadOk2::Inserted(BlockStatus2::Valid)) + matches!(res, InsertPayloadOk::Inserted(BlockStatus::Valid)) { self.make_canonical(child_num_hash.hash)?; } @@ -1864,10 +1864,10 @@ where fn buffer_block_without_senders( &mut self, block: SealedBlockFor, - ) -> Result<(), InsertBlockErrorTwo> { + ) -> Result<(), InsertBlockError> { match block.try_seal_with_senders() { Ok(block) => self.buffer_block(block), - Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), } } @@ -1875,9 +1875,9 @@ where fn buffer_block( &mut self, block: SealedBlockWithSenders, - ) -> Result<(), InsertBlockErrorTwo> { + ) -> Result<(), InsertBlockError> { if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockErrorTwo::consensus_error(err, block.block)) + return Err(InsertBlockError::consensus_error(err, block.block)) } self.state.buffer.insert_block(block); Ok(()) @@ -2149,7 +2149,7 @@ where // try to append the block match self.insert_block(block) { - Ok(InsertPayloadOk2::Inserted(BlockStatus2::Valid)) => { + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) => { if self.is_sync_target_head(block_num_hash.hash) { trace!(target: "engine::tree", "appended downloaded sync target block"); @@ -2162,10 +2162,7 @@ where trace!(target: "engine::tree", "appended downloaded block"); self.try_connect_buffered_blocks(block_num_hash)?; } - Ok(InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { - head, - missing_ancestor, - })) => { + Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { head, missing_ancestor })) => { // block is not connected to the canonical head, we need to download // its missing branch first return Ok(self.on_disconnected_downloaded_block( @@ -2174,7 +2171,7 @@ where head, )) } - Ok(InsertPayloadOk2::AlreadySeen(_)) => { + Ok(InsertPayloadOk::AlreadySeen(_)) => { trace!(target: "engine::tree", "downloaded block already executed"); } Err(err) => { @@ -2191,29 +2188,29 @@ where fn insert_block_without_senders( &mut self, block: SealedBlockFor, - ) -> Result> { + ) -> Result> { match block.try_seal_with_senders() { Ok(block) => self.insert_block(block), - Err(block) => Err(InsertBlockErrorTwo::sender_recovery_error(block)), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), } } fn insert_block( &mut self, block: SealedBlockWithSenders, - ) -> Result> { + ) -> Result> { self.insert_block_inner(block.clone()) - .map_err(|kind| InsertBlockErrorTwo::new(block.block, kind)) + .map_err(|kind| InsertBlockError::new(block.block, kind)) } fn insert_block_inner( &mut self, block: SealedBlockWithSenders, - ) -> Result { + ) -> Result { debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash(), state_root = ?block.state_root(), "Inserting new block into tree"); if self.block_by_hash(block.hash())?.is_some() { - return Ok(InsertPayloadOk2::AlreadySeen(BlockStatus2::Valid)) + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid)) } let start = Instant::now(); @@ -2235,7 +2232,7 @@ where self.state.buffer.insert_block(block); - return Ok(InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { + return Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { head: self.state.tree_state.current_canonical_head, missing_ancestor, })) @@ -2243,7 +2240,7 @@ where // now validate against the parent let parent_block = self.sealed_header_by_hash(block.parent_hash())?.ok_or_else(|| { - InsertBlockErrorKindTwo::Provider(ProviderError::HeaderNotFound( + InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( block.parent_hash().into(), )) })?; @@ -2271,7 +2268,7 @@ where let state_root_config = StateRootConfig::new_from_input( consistent_view.clone(), self.compute_trie_input(consistent_view.clone(), block.header().parent_hash()) - .map_err(|e| InsertBlockErrorKindTwo::Other(Box::new(e)))?, + .map_err(|e| InsertBlockErrorKind::Other(Box::new(e)))?, ); let provider_ro = consistent_view.provider_ro()?; @@ -2402,7 +2399,7 @@ where state_provider.state_root_with_updates(hashed_state.clone())?; (root, updates, root_time.elapsed()) } - Err(error) => return Err(InsertBlockErrorKindTwo::Other(Box::new(error))), + Err(error) => return Err(InsertBlockErrorKind::Other(Box::new(error))), } } } else { @@ -2412,7 +2409,7 @@ where (root, updates, root_time.elapsed()) }; - Result::<_, InsertBlockErrorKindTwo>::Ok(( + Result::<_, InsertBlockErrorKind>::Ok(( state_root, trie_updates, hashed_state, @@ -2467,7 +2464,7 @@ where self.emit_event(EngineApiEvent::BeaconConsensus(engine_event)); debug!(target: "engine::tree", block=?BlockNumHash::new(block_number, block_hash), "Finished inserting block"); - Ok(InsertPayloadOk2::Inserted(BlockStatus2::Valid)) + Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) } /// Compute state root for the given hashed post state in parallel. @@ -2527,7 +2524,7 @@ where /// Returns the proper payload status response if the block is invalid. fn on_insert_block_error( &mut self, - error: InsertBlockErrorTwo, + error: InsertBlockError, ) -> Result { let (block, error) = error.split(); @@ -2771,7 +2768,7 @@ where /// If we don't know the block's parent, we return `Disconnected`, as we can't claim that the block /// is valid or not. #[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum BlockStatus2 { +pub enum BlockStatus { /// The block is valid and block extends canonical chain. Valid, /// The block may be valid and has an unknown missing ancestor. @@ -2785,14 +2782,14 @@ pub enum BlockStatus2 { /// How a payload was inserted if it was valid. /// -/// If the payload was valid, but has already been seen, [`InsertPayloadOk2::AlreadySeen(_)`] is -/// returned, otherwise [`InsertPayloadOk2::Inserted(_)`] is returned. +/// If the payload was valid, but has already been seen, [`InsertPayloadOk::AlreadySeen(_)`] is +/// returned, otherwise [`InsertPayloadOk::Inserted(_)`] is returned. #[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub enum InsertPayloadOk2 { +pub enum InsertPayloadOk { /// The payload was valid, but we have already seen it. - AlreadySeen(BlockStatus2), + AlreadySeen(BlockStatus), /// The payload was valid and inserted into the tree. - Inserted(BlockStatus2), + Inserted(BlockStatus), } #[cfg(test)] @@ -3017,7 +3014,7 @@ mod tests { fn insert_block( &mut self, block: SealedBlockWithSenders, - ) -> Result> { + ) -> Result> { let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); self.extend_execution_outcome([execution_outcome]); self.tree.provider.add_state_root(block.state_root); @@ -3374,7 +3371,7 @@ mod tests { let outcome = test_harness.tree.insert_block_without_senders(sealed.clone()).unwrap(); assert_eq!( outcome, - InsertPayloadOk2::Inserted(BlockStatus2::Disconnected { + InsertPayloadOk::Inserted(BlockStatus::Disconnected { head: test_harness.tree.state.tree_state.current_canonical_head, missing_ancestor: sealed.parent_num_hash() }) From 8f2ecc44e8aee6c6a4bf9b137acaf203efd6afd8 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 8 Jan 2025 17:02:49 +0300 Subject: [PATCH 028/113] refactor: reduce `Hardforks` trait usage (#13728) --- Cargo.lock | 1 + crates/chainspec/src/spec.rs | 4 ++ crates/cli/commands/src/p2p/mod.rs | 4 +- crates/cli/commands/src/stage/mod.rs | 4 +- crates/cli/commands/src/stage/run.rs | 4 +- crates/consensus/common/src/calc.rs | 11 +++-- crates/consensus/common/src/validation.rs | 35 +++++++------- .../ethereum-forks/src/hardforks/ethereum.rs | 46 +++++++++++++++---- crates/ethereum/consensus/src/lib.rs | 30 ++++++------ crates/optimism/chainspec/src/lib.rs | 10 +++- crates/optimism/consensus/src/proof.rs | 8 ++-- crates/optimism/hardforks/Cargo.toml | 1 + crates/optimism/hardforks/src/lib.rs | 23 ++++++---- crates/optimism/rpc/src/eth/pending_block.rs | 3 +- crates/rpc/rpc-engine-api/Cargo.toml | 4 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 7 ++- crates/rpc/rpc/src/admin.rs | 7 ++- 17 files changed, 126 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f83bda895de68..9cfd8d9a1c0a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8259,6 +8259,7 @@ version = "1.1.5" dependencies = [ "alloy-chains", "alloy-primitives", + "auto_impl", "once_cell", "reth-ethereum-forks", "serde", diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 4307b0c6bee57..c49082e7ce993 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -765,6 +765,10 @@ impl Hardforks for ChainSpec { } impl EthereumHardforks for ChainSpec { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + self.fork(fork) + } + fn get_final_paris_total_difficulty(&self) -> Option { self.get_final_paris_total_difficulty() } diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index d1821ded826ec..40708714d38bf 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -5,7 +5,7 @@ use std::{path::PathBuf, sync::Arc}; use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_util::{get_secret_key, hash_or_num_value_parser}; use reth_config::Config; @@ -73,7 +73,7 @@ pub enum Subcommands { Rlpx(rlpx::Command), } -impl> Command { +impl> Command { /// Execute `p2p` command pub async fn execute(self) -> eyre::Result<()> { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); diff --git a/crates/cli/commands/src/stage/mod.rs b/crates/cli/commands/src/stage/mod.rs index 38edcc9ac5bc8..c7423930c8b2c 100644 --- a/crates/cli/commands/src/stage/mod.rs +++ b/crates/cli/commands/src/stage/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use crate::common::CliNodeTypes; use clap::{Parser, Subcommand}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_eth_wire::NetPrimitivesFor; @@ -40,7 +40,7 @@ pub enum Subcommands { Unwind(unwind::Command), } -impl> Command { +impl> Command { /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index c7655f0acb750..1fb2e2886ce95 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -6,7 +6,7 @@ use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::Sealable; use clap::Parser; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::get_secret_key; @@ -104,7 +104,7 @@ pub struct Command { network: NetworkArgs, } -impl> Command { +impl> Command { /// Execute `stage` command pub async fn execute(self, ctx: CliContext, executor: F) -> eyre::Result<()> where diff --git a/crates/consensus/common/src/calc.rs b/crates/consensus/common/src/calc.rs index 584e90f04d938..2f3ad6560c3e1 100644 --- a/crates/consensus/common/src/calc.rs +++ b/crates/consensus/common/src/calc.rs @@ -1,6 +1,6 @@ use alloy_consensus::constants::ETH_TO_WEI; use alloy_primitives::BlockNumber; -use reth_chainspec::{EthereumHardfork, EthereumHardforks, Hardforks}; +use reth_chainspec::EthereumHardforks; /// Calculates the base block reward. /// @@ -35,10 +35,13 @@ pub fn base_block_reward( /// Calculates the base block reward __before__ the merge (Paris hardfork). /// /// Caution: The caller must ensure that the block number is before the merge. -pub fn base_block_reward_pre_merge(chain_spec: impl Hardforks, block_number: BlockNumber) -> u128 { - if chain_spec.fork(EthereumHardfork::Constantinople).active_at_block(block_number) { +pub fn base_block_reward_pre_merge( + chain_spec: impl EthereumHardforks, + block_number: BlockNumber, +) -> u128 { + if chain_spec.is_constantinople_active_at_block(block_number) { ETH_TO_WEI * 2 - } else if chain_spec.fork(EthereumHardfork::Byzantium).active_at_block(block_number) { + } else if chain_spec.is_byzantium_active_at_block(block_number) { ETH_TO_WEI * 3 } else { ETH_TO_WEI * 5 diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 67ecc886ea672..61251b80e5f8b 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -25,8 +25,7 @@ pub fn validate_header_base_fee( header: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.is_fork_active_at_block(EthereumHardfork::London, header.number()) && - header.base_fee_per_gas().is_none() + if chain_spec.is_london_active_at_block(header.number()) && header.base_fee_per_gas().is_none() { return Err(ConsensusError::BaseFeeMissing) } @@ -253,23 +252,25 @@ pub fn validate_against_parent_eip1559_base_fee< parent: &H, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { - if chain_spec.fork(EthereumHardfork::London).active_at_block(header.number()) { + if chain_spec.is_london_active_at_block(header.number()) { let base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; - let expected_base_fee = - if chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) { - alloy_eips::eip1559::INITIAL_BASE_FEE - } else { - // This BaseFeeMissing will not happen as previous blocks are checked to have - // them. - let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; - calc_next_block_base_fee( - parent.gas_used(), - parent.gas_limit(), - base_fee, - chain_spec.base_fee_params_at_timestamp(header.timestamp()), - ) - }; + let expected_base_fee = if chain_spec + .ethereum_fork_activation(EthereumHardfork::London) + .transitions_at_block(header.number()) + { + alloy_eips::eip1559::INITIAL_BASE_FEE + } else { + // This BaseFeeMissing will not happen as previous blocks are checked to have + // them. + let base_fee = parent.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; + calc_next_block_base_fee( + parent.gas_used(), + parent.gas_limit(), + base_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp()), + ) + }; if expected_base_fee != base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { expected: expected_base_fee, diff --git a/crates/ethereum-forks/src/hardforks/ethereum.rs b/crates/ethereum-forks/src/hardforks/ethereum.rs index c62c6a91a355f..c9b1a115d23b8 100644 --- a/crates/ethereum-forks/src/hardforks/ethereum.rs +++ b/crates/ethereum-forks/src/hardforks/ethereum.rs @@ -1,54 +1,80 @@ use alloy_primitives::U256; -use crate::{hardforks::Hardforks, EthereumHardfork, ForkCondition}; +use crate::{EthereumHardfork, ForkCondition}; /// Helper methods for Ethereum forks. #[auto_impl::auto_impl(&, Arc)] -pub trait EthereumHardforks: Hardforks { +pub trait EthereumHardforks: Clone { + /// Retrieves [`ForkCondition`] by an [`EthereumHardfork`]. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition; + + /// Convenience method to check if an [`EthereumHardfork`] is active at a given timestamp. + fn is_ethereum_fork_active_at_timestamp(&self, fork: EthereumHardfork, timestamp: u64) -> bool { + self.ethereum_fork_activation(fork).active_at_timestamp(timestamp) + } + + /// Convenience method to check if an [`EthereumHardfork`] is active at a given block number. + fn is_ethereum_fork_active_at_block(&self, fork: EthereumHardfork, block_number: u64) -> bool { + self.ethereum_fork_activation(fork).active_at_block(block_number) + } + /// Convenience method to check if [`EthereumHardfork::Shanghai`] is active at a given /// timestamp. fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(EthereumHardfork::Shanghai, timestamp) + self.is_ethereum_fork_active_at_timestamp(EthereumHardfork::Shanghai, timestamp) } /// Convenience method to check if [`EthereumHardfork::Cancun`] is active at a given timestamp. fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(EthereumHardfork::Cancun, timestamp) + self.is_ethereum_fork_active_at_timestamp(EthereumHardfork::Cancun, timestamp) } /// Convenience method to check if [`EthereumHardfork::Prague`] is active at a given timestamp. fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) + self.is_ethereum_fork_active_at_timestamp(EthereumHardfork::Prague, timestamp) } /// Convenience method to check if [`EthereumHardfork::Osaka`] is active at a given timestamp. fn is_osaka_active_at_timestamp(&self, timestamp: u64) -> bool { - self.is_fork_active_at_timestamp(EthereumHardfork::Osaka, timestamp) + self.is_ethereum_fork_active_at_timestamp(EthereumHardfork::Osaka, timestamp) } /// Convenience method to check if [`EthereumHardfork::Byzantium`] is active at a given block /// number. fn is_byzantium_active_at_block(&self, block_number: u64) -> bool { - self.fork(EthereumHardfork::Byzantium).active_at_block(block_number) + self.is_ethereum_fork_active_at_block(EthereumHardfork::Byzantium, block_number) } /// Convenience method to check if [`EthereumHardfork::SpuriousDragon`] is active at a given /// block number. fn is_spurious_dragon_active_at_block(&self, block_number: u64) -> bool { - self.fork(EthereumHardfork::SpuriousDragon).active_at_block(block_number) + self.is_ethereum_fork_active_at_block(EthereumHardfork::SpuriousDragon, block_number) } /// Convenience method to check if [`EthereumHardfork::Homestead`] is active at a given block /// number. fn is_homestead_active_at_block(&self, block_number: u64) -> bool { - self.fork(EthereumHardfork::Homestead).active_at_block(block_number) + self.is_ethereum_fork_active_at_block(EthereumHardfork::Homestead, block_number) + } + + /// Convenience method to check if [`EthereumHardfork::London`] is active at a given block + /// number. + fn is_london_active_at_block(&self, block_number: u64) -> bool { + self.is_ethereum_fork_active_at_block(EthereumHardfork::London, block_number) + } + + /// Convenience method to check if [`EthereumHardfork::Constantinople`] is active at a given + /// block number. + fn is_constantinople_active_at_block(&self, block_number: u64) -> bool { + self.is_ethereum_fork_active_at_block(EthereumHardfork::Constantinople, block_number) } /// The Paris hardfork (merge) is activated via block number. If we have knowledge of the block, /// this function will return true if the block number is greater than or equal to the Paris /// (merge) block. fn is_paris_active_at_block(&self, block_number: u64) -> Option { - match self.fork(EthereumHardfork::Paris) { + match self.ethereum_fork_activation(EthereumHardfork::Paris) { ForkCondition::TTD { activation_block_number, .. } => { Some(block_number >= activation_block_number) } diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index c31be45e22103..b81ee1d5c4484 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -11,7 +11,7 @@ use alloy_consensus::{BlockHeader, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip7840::BlobParams, merge::ALLOWED_FUTURE_BLOCK_TIME_SECONDS}; use alloy_primitives::U256; -use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_consensus::{ Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput, }; @@ -56,16 +56,16 @@ impl EthBeaconConsensus parent: &SealedHeader, ) -> Result<(), ConsensusError> { // Determine the parent gas limit, considering elasticity multiplier on the London fork. - let parent_gas_limit = - if self.chain_spec.fork(EthereumHardfork::London).transitions_at_block(header.number()) - { - parent.gas_limit() * - self.chain_spec - .base_fee_params_at_timestamp(header.timestamp()) - .elasticity_multiplier as u64 - } else { - parent.gas_limit() - }; + let parent_gas_limit = if !self.chain_spec.is_london_active_at_block(parent.number()) && + self.chain_spec.is_london_active_at_block(header.number()) + { + parent.gas_limit() * + self.chain_spec + .base_fee_params_at_timestamp(header.timestamp()) + .elasticity_multiplier as u64 + } else { + parent.gas_limit() + }; // Check for an increase in gas limit beyond the allowed threshold. if header.gas_limit() > parent_gas_limit { @@ -209,12 +209,10 @@ where fn validate_header_with_total_difficulty( &self, header: &H, - total_difficulty: U256, + _total_difficulty: U256, ) -> Result<(), ConsensusError> { - let is_post_merge = self - .chain_spec - .fork(EthereumHardfork::Paris) - .active_at_ttd(total_difficulty, header.difficulty()); + let is_post_merge = + self.chain_spec.is_paris_active_at_block(header.number()).is_some_and(|active| active); if is_post_merge { // TODO: add `is_zero_difficulty` to `alloy_consensus::BlockHeader` trait diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 6e24bdd50dae6..e8f8a084e3c2a 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -313,6 +313,10 @@ impl Hardforks for OpChainSpec { } impl EthereumHardforks for OpChainSpec { + fn ethereum_fork_activation(&self, fork: EthereumHardfork) -> ForkCondition { + self.fork(fork) + } + fn get_final_paris_total_difficulty(&self) -> Option { self.inner.get_final_paris_total_difficulty() } @@ -322,7 +326,11 @@ impl EthereumHardforks for OpChainSpec { } } -impl OpHardforks for OpChainSpec {} +impl OpHardforks for OpChainSpec { + fn op_fork_activation(&self, fork: OpHardfork) -> ForkCondition { + self.fork(fork) + } +} impl From for OpChainSpec { fn from(genesis: Genesis) -> Self { diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index d539f5739f9b6..e83990bdaba68 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -6,7 +6,7 @@ use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; -use reth_optimism_forks::OpHardfork; +use reth_optimism_forks::{OpHardfork, OpHardforks}; use reth_optimism_primitives::OpReceipt; use reth_primitives::ReceiptWithBloom; @@ -46,7 +46,7 @@ pub(crate) fn calculate_receipt_root_optimism( /// NOTE: Prefer calculate receipt root optimism if you have log blooms memoized. pub fn calculate_receipt_root_no_memo_optimism( receipts: &[&OpReceipt], - chain_spec: impl reth_chainspec::Hardforks, + chain_spec: impl OpHardforks, timestamp: u64, ) -> B256 { // There is a minor bug in op-geth and op-erigon where in the Regolith hardfork, @@ -54,8 +54,8 @@ pub fn calculate_receipt_root_no_memo_optimism( // encoding. In the Regolith Hardfork, we must strip the deposit nonce from the // receipts before calculating the receipt root. This was corrected in the Canyon // hardfork. - if chain_spec.is_fork_active_at_timestamp(OpHardfork::Regolith, timestamp) && - !chain_spec.is_fork_active_at_timestamp(OpHardfork::Canyon, timestamp) + if chain_spec.is_regolith_active_at_timestamp(timestamp) && + !chain_spec.is_canyon_active_at_timestamp(timestamp) { let receipts = receipts .iter() diff --git a/crates/optimism/hardforks/Cargo.toml b/crates/optimism/hardforks/Cargo.toml index 1ea23069a6854..5ac5f6fe6ce46 100644 --- a/crates/optimism/hardforks/Cargo.toml +++ b/crates/optimism/hardforks/Cargo.toml @@ -23,6 +23,7 @@ alloy-primitives.workspace = true serde = { workspace = true, optional = true } # misc +auto_impl.workspace = true once_cell.workspace = true [features] diff --git a/crates/optimism/hardforks/src/lib.rs b/crates/optimism/hardforks/src/lib.rs index 36f42155e9428..fbe77aa20a7a5 100644 --- a/crates/optimism/hardforks/src/lib.rs +++ b/crates/optimism/hardforks/src/lib.rs @@ -17,51 +17,56 @@ mod dev; pub use dev::DEV_HARDFORKS; pub use hardfork::OpHardfork; -use reth_ethereum_forks::EthereumHardforks; +use reth_ethereum_forks::{EthereumHardforks, ForkCondition}; /// Extends [`EthereumHardforks`] with optimism helper methods. +#[auto_impl::auto_impl(&, Arc)] pub trait OpHardforks: EthereumHardforks { + /// Retrieves [`ForkCondition`] by an [`OpHardfork`]. If `fork` is not present, returns + /// [`ForkCondition::Never`]. + fn op_fork_activation(&self, fork: OpHardfork) -> ForkCondition; + /// Convenience method to check if [`OpHardfork::Bedrock`] is active at a given block /// number. fn is_bedrock_active_at_block(&self, block_number: u64) -> bool { - self.fork(OpHardfork::Bedrock).active_at_block(block_number) + self.op_fork_activation(OpHardfork::Bedrock).active_at_block(block_number) } /// Returns `true` if [`Regolith`](OpHardfork::Regolith) is active at given block /// timestamp. fn is_regolith_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Regolith).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Regolith).active_at_timestamp(timestamp) } /// Returns `true` if [`Canyon`](OpHardfork::Canyon) is active at given block timestamp. fn is_canyon_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Canyon).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Canyon).active_at_timestamp(timestamp) } /// Returns `true` if [`Ecotone`](OpHardfork::Ecotone) is active at given block timestamp. fn is_ecotone_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Ecotone).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Ecotone).active_at_timestamp(timestamp) } /// Returns `true` if [`Fjord`](OpHardfork::Fjord) is active at given block timestamp. fn is_fjord_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Fjord).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Fjord).active_at_timestamp(timestamp) } /// Returns `true` if [`Granite`](OpHardfork::Granite) is active at given block timestamp. fn is_granite_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Granite).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Granite).active_at_timestamp(timestamp) } /// Returns `true` if [`Holocene`](OpHardfork::Holocene) is active at given block /// timestamp. fn is_holocene_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Holocene).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Holocene).active_at_timestamp(timestamp) } /// Returns `true` if [`Isthmus`](OpHardfork::Isthmus) is active at given block /// timestamp. fn is_isthmus_active_at_timestamp(&self, timestamp: u64) -> bool { - self.fork(OpHardfork::Isthmus).active_at_timestamp(timestamp) + self.op_fork_activation(OpHardfork::Isthmus).active_at_timestamp(timestamp) } } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 3ba5df6968a80..5361d00be0d79 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -12,6 +12,7 @@ use op_alloy_network::Network; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; +use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpBlock, OpReceipt, OpTransactionSigned}; use reth_primitives::{logs_bloom, BlockBody, SealedBlockWithSenders}; use reth_provider::{ @@ -40,7 +41,7 @@ where Block = OpBlock, Receipt = OpReceipt, Header = reth_primitives::Header, - > + ChainSpecProvider + > + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool>>, Evm: ConfigureEvm< diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 9bf9ae70710b4..2b4560028db56 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-primitives.workspace = true reth-rpc-api.workspace = true reth-storage-api.workspace = true reth-payload-builder.workspace = true @@ -49,9 +48,10 @@ parking_lot.workspace = true [dev-dependencies] reth-ethereum-engine-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +reth-primitives.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-tokio-util.workspace = true reth-testing-utils.workspace = true alloy-rlp.workspace = true -assert_matches.workspace = true \ No newline at end of file +assert_matches.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 9e1f68072849c..fa3fba285745e 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -16,14 +16,13 @@ use alloy_rpc_types_engine::{ use async_trait::async_trait; use jsonrpsee_core::RpcResult; use parking_lot::Mutex; -use reth_chainspec::{EthereumHardforks, Hardforks}; +use reth_chainspec::{EthereumHardfork, EthereumHardforks}; use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes, EngineValidator}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, PayloadOrAttributes, }; -use reth_primitives::EthereumHardfork; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::convert_to_payload_body_v1; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; @@ -615,7 +614,7 @@ where let merge_terminal_td = self .inner .chain_spec - .fork(EthereumHardfork::Paris) + .ethereum_fork_activation(EthereumHardfork::Paris) .ttd() .expect("the engine API should not be running for chains w/o paris"); @@ -1024,7 +1023,7 @@ mod tests { use super::*; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use assert_matches::assert_matches; - use reth_chainspec::{ChainSpec, MAINNET}; + use reth_chainspec::{ChainSpec, EthereumHardfork, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_payload_builder::test_utils::spawn_test_payload_service; diff --git a/crates/rpc/rpc/src/admin.rs b/crates/rpc/rpc/src/admin.rs index 75cfece9d5643..8ee6d5c861a45 100644 --- a/crates/rpc/rpc/src/admin.rs +++ b/crates/rpc/rpc/src/admin.rs @@ -114,7 +114,10 @@ where .chain_spec .get_final_paris_total_difficulty() .is_some(), - terminal_total_difficulty: self.chain_spec.fork(EthereumHardfork::Paris).ttd(), + terminal_total_difficulty: self + .chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .ttd(), deposit_contract_address: self.chain_spec.deposit_contract().map(|dc| dc.address), ..self.chain_spec.genesis().config.clone() }; @@ -125,7 +128,7 @@ where $( // don't overwrite if already set if $config.$field.is_none() { - $config.$field = match self.chain_spec.fork(EthereumHardfork::$fork) { + $config.$field = match self.chain_spec.ethereum_fork_activation(EthereumHardfork::$fork) { ForkCondition::Block(block) => Some(block), ForkCondition::TTD { fork_block, .. } => fork_block, ForkCondition::Timestamp(ts) => Some(ts), From 6bfb3bb6f8765f83190c49f49b2f7037a6507dd4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 15:30:24 +0100 Subject: [PATCH 029/113] chore: rm redundant std cfgs (#13733) --- crates/evm/execution-errors/src/lib.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index f49fa693f241b..cc723fa110ff7 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -11,7 +11,10 @@ extern crate alloc; -use alloc::{boxed::Box, string::String}; +use alloc::{ + boxed::Box, + string::{String, ToString}, +}; use alloy_eips::BlockNumHash; use alloy_primitives::B256; use reth_consensus::ConsensusError; @@ -134,7 +137,6 @@ pub enum BlockExecutionError { impl BlockExecutionError { /// Create a new [`BlockExecutionError::Internal`] variant, containing a /// [`InternalBlockExecutionError::Other`] error. - #[cfg(feature = "std")] pub fn other(error: E) -> Self where E: core::error::Error + Send + Sync + 'static, @@ -144,8 +146,7 @@ impl BlockExecutionError { /// Create a new [`BlockExecutionError::Internal`] variant, containing a /// [`InternalBlockExecutionError::Other`] error with the given message. - #[cfg(feature = "std")] - pub fn msg(msg: impl std::fmt::Display) -> Self { + pub fn msg(msg: impl core::fmt::Display) -> Self { Self::Internal(InternalBlockExecutionError::msg(msg)) } @@ -195,7 +196,6 @@ pub enum InternalBlockExecutionError { impl InternalBlockExecutionError { /// Create a new [`InternalBlockExecutionError::Other`] variant. - #[cfg(feature = "std")] pub fn other(error: E) -> Self where E: core::error::Error + Send + Sync + 'static, @@ -204,8 +204,7 @@ impl InternalBlockExecutionError { } /// Create a new [`InternalBlockExecutionError::Other`] from a given message. - #[cfg(feature = "std")] - pub fn msg(msg: impl std::fmt::Display) -> Self { + pub fn msg(msg: impl core::fmt::Display) -> Self { Self::Other(msg.to_string().into()) } } From b5734bfb656baacd9b6e90e67204eee712f248e6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 15:40:30 +0100 Subject: [PATCH 030/113] chore: rm last mentions of deprecated crates (#13734) --- .github/assets/check_wasm.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 2c6b97101b379..1370e689cdb30 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -10,9 +10,7 @@ crates=($(cargo metadata --format-version=1 --no-deps | jq -r '.packages[].name' exclude_crates=( # The following require investigation if they can be fixed reth-basic-payload-builder - reth-beacon-consensus reth-bench - reth-blockchain-tree reth-cli reth-cli-commands reth-cli-runner From f2521b54de9fa8cc76a0874879b725752e568b09 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 16:07:52 +0100 Subject: [PATCH 031/113] chore: misc SealedBlock prep (#13736) --- crates/chain-state/src/in_memory.rs | 2 +- crates/exex/exex/src/backfill/job.rs | 2 +- crates/net/p2p/src/full_block.rs | 2 +- crates/primitives/src/block.rs | 30 ++----------------- crates/stages/stages/src/stages/merkle.rs | 2 +- .../storage/provider/src/test_utils/blocks.rs | 10 +++---- 6 files changed, 12 insertions(+), 36 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 49c20bacdc2ba..8b5c8140acd7a 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -640,7 +640,7 @@ impl BlockState { pub fn block_with_senders(&self) -> BlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - let (header, body) = block.split_header_body(); + let (header, body) = block.split(); BlockWithSenders::new_unchecked(N::Block::new(header.unseal(), body), senders) } diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 161253d2b18c3..3bb0e04ec25f9 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -108,7 +108,7 @@ where // Unseal the block for execution let (block, senders) = block.into_components(); - let (header, body) = block.split_header_body(); + let (header, body) = block.split(); let (unsealed_header, hash) = header.split(); let block = P::Block::new(unsealed_header, body).with_senders_unchecked(senders); diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index fdee01ab99889..309252bb8f26a 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -429,7 +429,7 @@ where // put response hashes back into bodies map since we aren't returning them as a // response for block in valid_responses { - let (header, body) = block.split_header_body(); + let (header, body) = block.split(); self.bodies.insert(header, BodyResponse::Validated(body)); } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 2891d1285faa7..24e38997fc5e4 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -5,7 +5,7 @@ use crate::{ use alloc::vec::Vec; use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; -use alloy_primitives::{Address, Bytes, B256}; +use alloy_primitives::{Address, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; #[cfg(any(test, feature = "arbitrary"))] @@ -212,25 +212,11 @@ impl SealedBlock { /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components #[inline] - pub fn split_header_body(self) -> (SealedHeader, B) { + pub fn split(self) -> (SealedHeader, B) { (self.header, self.body) } } -impl SealedBlock { - /// Returns whether or not the block contains any blob transactions. - #[inline] - pub fn has_eip4844_transactions(&self) -> bool { - self.body.has_eip4844_transactions() - } - - /// Returns whether or not the block contains any eip-7702 transactions. - #[inline] - pub fn has_eip7702_transactions(&self) -> bool { - self.body.has_eip7702_transactions() - } -} - impl SealedBlock where B: reth_primitives_traits::BlockBody, @@ -369,16 +355,6 @@ where { Block::new(self.header.unseal(), self.body) } - - /// Returns a vector of encoded 2718 transactions. - /// - /// This is also known as `raw transactions`. - /// - /// See also [`Encodable2718`]. - #[doc(alias = "raw_transactions")] - pub fn encoded_2718_transactions(&self) -> Vec { - self.body.encoded_2718_transactions() - } } impl InMemorySize for SealedBlock { @@ -457,7 +433,7 @@ impl SealedBlockWithSenders { #[inline] pub fn unseal(self) -> BlockWithSenders { let (block, senders) = self.into_components(); - let (header, body) = block.split_header_body(); + let (header, body) = block.split(); let header = header.unseal(); BlockWithSenders::new_unchecked(B::new(header, body), senders) } diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index 4c163d8042acf..a2b4655835cc1 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -525,7 +525,7 @@ mod tests { stage_progress, BlockParams { parent: preblocks.last().map(|b| b.hash()), ..Default::default() }, ) - .split_header_body(); + .split(); let mut header = header.unseal(); header.state_root = state_root( diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 2f46ef5c1e70b..9924375ecb997 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -232,7 +232,7 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { b256!("5d035ccb3e75a9057452ff060b773b213ec1fc353426174068edfc3971a0b6bd") ); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); header.number = number; @@ -294,7 +294,7 @@ fn block2( b256!("90101a13dd059fa5cca99ed93d1dc23657f63626c5b8f993a2ccbdf7446b64f8") ); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); @@ -359,7 +359,7 @@ fn block3( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); header.number = number; @@ -448,7 +448,7 @@ fn block4( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); header.number = number; @@ -534,7 +534,7 @@ fn block5( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split_header_body(); + let (header, mut body) = TEST_BLOCK.clone().split(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); let mut header = header.unseal(); header.number = number; From 73ed3ea440fac21728b34c31b993b55c5c5a8367 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 8 Jan 2025 18:32:12 +0300 Subject: [PATCH 032/113] chore: add `Hardfork::boxed` (#13737) --- crates/ethereum-forks/src/hardfork/dev.rs | 2 +- crates/ethereum-forks/src/hardfork/macros.rs | 5 ----- crates/ethereum-forks/src/hardfork/mod.rs | 6 ++++++ crates/optimism/chainspec/src/base.rs | 2 +- crates/optimism/chainspec/src/base_sepolia.rs | 2 +- crates/optimism/chainspec/src/op.rs | 2 +- crates/optimism/chainspec/src/op_sepolia.rs | 2 +- crates/optimism/hardforks/src/dev.rs | 2 +- crates/optimism/hardforks/src/hardfork.rs | 2 +- examples/bsc-p2p/src/chainspec.rs | 3 ++- examples/polygon-p2p/src/chain_cfg.rs | 3 ++- 11 files changed, 17 insertions(+), 14 deletions(-) diff --git a/crates/ethereum-forks/src/hardfork/dev.rs b/crates/ethereum-forks/src/hardfork/dev.rs index 8a0510a979858..225263ffed606 100644 --- a/crates/ethereum-forks/src/hardfork/dev.rs +++ b/crates/ethereum-forks/src/hardfork/dev.rs @@ -8,7 +8,7 @@ use once_cell::sync::Lazy as LazyLock; #[cfg(feature = "std")] use std::sync::LazyLock; -use crate::{ChainHardforks, EthereumHardfork, ForkCondition}; +use crate::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; /// Dev hardforks pub static DEV_HARDFORKS: LazyLock = LazyLock::new(|| { diff --git a/crates/ethereum-forks/src/hardfork/macros.rs b/crates/ethereum-forks/src/hardfork/macros.rs index 780c15f6e6b95..dae98248bf13f 100644 --- a/crates/ethereum-forks/src/hardfork/macros.rs +++ b/crates/ethereum-forks/src/hardfork/macros.rs @@ -17,11 +17,6 @@ macro_rules! hardfork { $( $enum::$variant => stringify!($variant), )* } } - - /// Boxes `self` and returns it as `Box`. - pub fn boxed(self) -> Box { - Box::new(self) - } } impl FromStr for $enum { diff --git a/crates/ethereum-forks/src/hardfork/mod.rs b/crates/ethereum-forks/src/hardfork/mod.rs index f77d06cbf768b..c939e2912c1dc 100644 --- a/crates/ethereum-forks/src/hardfork/mod.rs +++ b/crates/ethereum-forks/src/hardfork/mod.rs @@ -6,6 +6,7 @@ pub use ethereum::EthereumHardfork; mod dev; pub use dev::DEV_HARDFORKS; +use alloc::boxed::Box; use core::{ any::Any, hash::{Hash, Hasher}, @@ -17,6 +18,11 @@ use dyn_clone::DynClone; pub trait Hardfork: Any + DynClone + Send + Sync + 'static { /// Fork name. fn name(&self) -> &'static str; + + /// Returns boxed value. + fn boxed(&self) -> Box { + Box::new(self) + } } dyn_clone::clone_trait_object!(Hardfork); diff --git a/crates/optimism/chainspec/src/base.rs b/crates/optimism/chainspec/src/base.rs index ab24ecf16c409..8282c58f6551b 100644 --- a/crates/optimism/chainspec/src/base.rs +++ b/crates/optimism/chainspec/src/base.rs @@ -5,7 +5,7 @@ use alloc::{sync::Arc, vec}; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; -use reth_ethereum_forks::EthereumHardfork; +use reth_ethereum_forks::{EthereumHardfork, Hardfork}; use reth_optimism_forks::OpHardfork; use crate::{LazyLock, OpChainSpec}; diff --git a/crates/optimism/chainspec/src/base_sepolia.rs b/crates/optimism/chainspec/src/base_sepolia.rs index 4ebf4d9a81e74..2b5434754501f 100644 --- a/crates/optimism/chainspec/src/base_sepolia.rs +++ b/crates/optimism/chainspec/src/base_sepolia.rs @@ -4,7 +4,7 @@ use alloc::{sync::Arc, vec}; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; -use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OpHardfork; diff --git a/crates/optimism/chainspec/src/op.rs b/crates/optimism/chainspec/src/op.rs index 20a2ac60e220b..9b2c98e618082 100644 --- a/crates/optimism/chainspec/src/op.rs +++ b/crates/optimism/chainspec/src/op.rs @@ -4,7 +4,7 @@ use crate::{LazyLock, OpChainSpec}; use alloc::{sync::Arc, vec}; use alloy_chains::Chain; use alloy_primitives::{b256, U256}; -use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OpHardfork; diff --git a/crates/optimism/chainspec/src/op_sepolia.rs b/crates/optimism/chainspec/src/op_sepolia.rs index 3a60d49ed1201..99702e6a17ac7 100644 --- a/crates/optimism/chainspec/src/op_sepolia.rs +++ b/crates/optimism/chainspec/src/op_sepolia.rs @@ -4,7 +4,7 @@ use crate::{LazyLock, OpChainSpec}; use alloc::{sync::Arc, vec}; use alloy_chains::{Chain, NamedChain}; use alloy_primitives::{b256, U256}; -use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; +use reth_chainspec::{once_cell_set, BaseFeeParams, BaseFeeParamsKind, ChainSpec, Hardfork}; use reth_ethereum_forks::EthereumHardfork; use reth_optimism_forks::OpHardfork; diff --git a/crates/optimism/hardforks/src/dev.rs b/crates/optimism/hardforks/src/dev.rs index 33877301c7d43..897ce510f170e 100644 --- a/crates/optimism/hardforks/src/dev.rs +++ b/crates/optimism/hardforks/src/dev.rs @@ -1,6 +1,6 @@ use alloc::vec; use alloy_primitives::U256; -use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; +use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition, Hardfork}; #[cfg(not(feature = "std"))] use once_cell::sync::Lazy as LazyLock; diff --git a/crates/optimism/hardforks/src/hardfork.rs b/crates/optimism/hardforks/src/hardfork.rs index 75d294c9b3e5a..313fd67dde6f2 100644 --- a/crates/optimism/hardforks/src/hardfork.rs +++ b/crates/optimism/hardforks/src/hardfork.rs @@ -1,6 +1,6 @@ //! Hard forks of optimism protocol. -use alloc::{boxed::Box, format, string::String, vec}; +use alloc::{format, string::String, vec}; use core::{ any::Any, fmt::{self, Display, Formatter}, diff --git a/examples/bsc-p2p/src/chainspec.rs b/examples/bsc-p2p/src/chainspec.rs index acf9f4dff062b..106d96b560be4 100644 --- a/examples/bsc-p2p/src/chainspec.rs +++ b/examples/bsc-p2p/src/chainspec.rs @@ -1,6 +1,7 @@ use alloy_primitives::{b256, B256}; use reth_chainspec::{ - once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, + once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, + ForkCondition, Hardfork, }; use reth_network_peers::NodeRecord; diff --git a/examples/polygon-p2p/src/chain_cfg.rs b/examples/polygon-p2p/src/chain_cfg.rs index d87bbccb2836b..586d755861f90 100644 --- a/examples/polygon-p2p/src/chain_cfg.rs +++ b/examples/polygon-p2p/src/chain_cfg.rs @@ -1,6 +1,7 @@ use alloy_primitives::{b256, B256}; use reth_chainspec::{ - once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, ForkCondition, + once_cell_set, BaseFeeParams, Chain, ChainHardforks, ChainSpec, EthereumHardfork, + ForkCondition, Hardfork, }; use reth_discv4::NodeRecord; use reth_primitives::Head; From d336ceb27e85f480cf7c38a3103495e877bf0392 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 Jan 2025 11:11:20 -0500 Subject: [PATCH 033/113] perf: introduce moka cached state provider (#12214) --- Cargo.lock | 269 +++++++++--------- Cargo.toml | 3 +- crates/engine/tree/Cargo.toml | 1 + crates/engine/tree/src/tree/cached_state.rs | 288 ++++++++++++++++++++ crates/engine/tree/src/tree/mod.rs | 13 +- 5 files changed, 444 insertions(+), 130 deletions(-) create mode 100644 crates/engine/tree/src/tree/cached_state.rs diff --git a/Cargo.lock b/Cargo.lock index 9cfd8d9a1c0a3..1211d0466f9d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f15afc5993458b42739ab3b69bdb6b4c8112acd3997dbea9bc092c9517137c" +checksum = "da226340862e036ab26336dc99ca85311c6b662267c1440e1733890fd688802c" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -460,7 +460,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -700,7 +700,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -716,7 +716,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "syn-solidity", "tiny-keccak", ] @@ -732,7 +732,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "syn-solidity", ] @@ -938,7 +938,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1136,7 +1136,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1172,18 +1172,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "async-trait" -version = "0.1.84" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1244b10dcd56c92219da4e14caa97e312079e185f04ba3eea25061561dc0a0" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1221,7 +1221,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1327,7 +1327,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1510,7 +1510,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -1632,7 +1632,7 @@ checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1821,9 +1821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "9560b07a799281c7e0958b9296854d6fafd4c5f31444a7e5bb1ad6dde5ccf1bd" dependencies = [ "clap_builder", "clap_derive", @@ -1831,9 +1831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "874e0dd3eb68bf99058751ac9712f622e61e6f393a94f7128fa26e3f02f5c7cd" dependencies = [ "anstream", "anstyle", @@ -1843,14 +1843,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2317,7 +2317,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2341,7 +2341,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2352,7 +2352,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2461,7 +2461,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2482,7 +2482,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "unicode-xid", ] @@ -2596,7 +2596,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2742,7 +2742,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2753,7 +2753,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2773,7 +2773,7 @@ checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2807,9 +2807,9 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "036c84bd29bff35e29bbee3c8fc0e2fb95db12b6f2f3cae82a827fbc97256f3a" +checksum = "862e41ea8eea7508f70cfd8cd560f0c34bb0af37c719a8e06c2672f0f031d8e5" dependencies = [ "alloy-primitives", "ethereum_serde_utils", @@ -2822,14 +2822,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dc8e67e1f770f5aa4c2c2069aaaf9daee7ac21bed357a71b911b37a58966cfb" +checksum = "d31ecf6640112f61dc34b4d8359c081102969af0edd18381fed2052f6db6a410" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -3399,7 +3399,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4081,7 +4081,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4138,7 +4138,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4259,7 +4259,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4508,7 +4508,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4903,14 +4903,14 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" +checksum = "12779523996a67c13c84906a876ac6fe4d07a6e1adb54978378e13f199251a62" dependencies = [ "base64 0.22.1", "indexmap 2.7.0", @@ -4938,9 +4938,9 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.18.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" +checksum = "dbd4884b1dd24f7d6628274a2f5ae22465c337c5ba065ec9b6edccddf8acc673" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -4949,6 +4949,8 @@ dependencies = [ "metrics", "ordered-float", "quanta", + "rand 0.8.5", + "rand_xoshiro", "sketches-ddsketch", ] @@ -5050,16 +5052,16 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.9" +version = "0.12.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23db87a7f248211f6a7c8644a1b750541f8a4c68ae7de0f908860e44c0c201f6" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", "loom", "parking_lot", - "quanta", + "portable-atomic", "rustc_version 0.4.1", "smallvec", "tagptr", @@ -5291,7 +5293,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -5305,9 +5307,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3409fc85ac27b27d971ea7cd1aabafd2eefa6de7e481c8d4f707225c117e81a" +checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" dependencies = [ "alloy-rlp", "arbitrary", @@ -5344,9 +5346,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0adb232ec805af3aa35606c19329aa7dc44c4457ae318ed0b8fc7f799dd7dbfe" +checksum = "250244eadaf1a25e0e2ad263110ad2d1b43c2e57ddf4c025e71552d98196a8d3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5362,9 +5364,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c272cfd65317538f5815c2b7059445230b050d48ebe2d0bab3e861d419a785" +checksum = "98334a9cdccc5878e9d5c48afc9cc1b84da58dbc68d41f9488d8f71688b495d3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5377,9 +5379,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19872a58b7acceeffb8e88ea048bee1690e7cde53068bd652976435d61fcd1de" +checksum = "1dd588157ac14db601d6497b81ae738b2581c60886fc592976fab6c282619604" dependencies = [ "alloy-consensus", "alloy-network", @@ -5392,9 +5394,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad65d040648e0963ed378e88489f5805e24fb56b7e6611362299cd4c24debeb2" +checksum = "753762429c31f838b59c886b31456c9bf02fd38fb890621665523a9087ae06ae" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5402,9 +5404,10 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-serde", + "alloy-sol-types", "async-trait", "brotli", - "cfg-if", + "derive_more", "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", @@ -5416,9 +5419,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36b1f2547067c5b60f3144ae1033a54ce1d11341d8327fa8f203b048d51465e9" +checksum = "1f483fb052ef807682ae5b5729c3a61a092ee4f7334e6e6055de67e9f28ef880" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5429,9 +5432,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e68d1a51fe3ee143f102b82f54fa237f21d12635da363276901e6d3ef6c65b7b" +checksum = "37b1d3872021aa28b10fc6cf8252e792e802d89e8b2cdaa57dcb9243c461b286" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5448,9 +5451,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8833ef149ceb74f8f25a79801d110d88ec2db32e700fa10db6c5f5b5cbb71a" +checksum = "c43f00d4060a6a38f5bf0a8182b4cc4c7071e2bc96942f414619251b522169eb" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5650,9 +5653,9 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_macros", "phf_shared", @@ -5660,9 +5663,9 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", "rand 0.8.5", @@ -5670,51 +5673,51 @@ dependencies = [ [[package]] name = "phf_macros" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" dependencies = [ "phf_generator", "phf_shared", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -5849,12 +5852,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.25" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +checksum = "483f8c21f64f3ea09fe0f30f5d48c3e8eefe5dac9129f0075f76593b4c1da705" dependencies = [ "proc-macro2", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -5905,7 +5908,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -6003,7 +6006,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -6193,6 +6196,15 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_xoshiro" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "ratatui" version = "0.28.1" @@ -6704,7 +6716,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -7177,6 +7189,7 @@ dependencies = [ "derive_more", "futures", "metrics", + "moka", "proptest", "rand 0.8.5", "rayon", @@ -9687,7 +9700,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.94", + "syn 2.0.95", "unicode-ident", ] @@ -9771,9 +9784,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ "bitflags 2.6.0", "errno", @@ -9819,7 +9832,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.1.0", + "security-framework 3.2.0", ] [[package]] @@ -10015,9 +10028,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81d3f8c9bfcc3cbb6b0179eb57042d75b1582bdc65c3cb95f3fa999509c03cbc" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ "bitflags 2.6.0", "core-foundation 0.10.0", @@ -10028,9 +10041,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -10092,14 +10105,14 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "serde_json" -version = "1.0.134" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "indexmap 2.7.0", "itoa", @@ -10127,7 +10140,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10178,7 +10191,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10211,7 +10224,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10368,9 +10381,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "0.3.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "sketches-ddsketch" @@ -10494,7 +10507,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10552,9 +10565,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.94" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "987bc0be1cdea8b10216bd06e2ca407d40b9543468fafd3ddfb02f36e77f71f3" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -10570,7 +10583,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10590,7 +10603,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10668,7 +10681,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10716,7 +10729,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10727,7 +10740,7 @@ checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -10884,7 +10897,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11085,7 +11098,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11421,7 +11434,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11491,7 +11504,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -11526,7 +11539,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11692,7 +11705,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11703,7 +11716,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11714,7 +11727,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -11725,7 +11738,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -12000,7 +12013,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -12022,7 +12035,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -12042,7 +12055,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -12063,7 +12076,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -12085,7 +12098,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 16933fc7db1b0..a49d3052a5578 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -527,13 +527,14 @@ tracing-appender = "0.2" url = { version = "2.3", default-features = false } zstd = "0.13" byteorder = "1" +moka = "0.12" # metrics metrics = "0.24.0" metrics-derive = "0.1" metrics-exporter-prometheus = { version = "0.16.0", default-features = false } metrics-process = "2.1.0" -metrics-util = { default-features = false, version = "0.18.0" } +metrics-util = { default-features = false, version = "0.19.0" } # proc-macros proc-macro2 = "1.0" diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index bd5e70319a6aa..822780657d8f1 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -49,6 +49,7 @@ revm-primitives.workspace = true futures.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["macros", "sync"] } +moka = { workspace = true, features = ["sync"] } # metrics metrics.workspace = true diff --git a/crates/engine/tree/src/tree/cached_state.rs b/crates/engine/tree/src/tree/cached_state.rs new file mode 100644 index 0000000000000..84d2c8a092252 --- /dev/null +++ b/crates/engine/tree/src/tree/cached_state.rs @@ -0,0 +1,288 @@ +//! Implements a state provider that has a shared cache in front of it. +use alloy_primitives::{map::B256HashMap, Address, StorageKey, StorageValue, B256}; +use metrics::Gauge; +use moka::sync::CacheBuilder; +use reth_errors::ProviderResult; +use reth_metrics::Metrics; +use reth_primitives::{Account, Bytecode}; +use reth_provider::{ + AccountReader, BlockHashReader, HashedPostStateProvider, StateProofProvider, StateProvider, + StateRootProvider, StorageRootProvider, +}; +use reth_trie::{ + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, + MultiProofTargets, StorageMultiProof, StorageProof, TrieInput, +}; +use revm_primitives::map::DefaultHashBuilder; + +type Cache = moka::sync::Cache; + +/// A wrapper of a state provider and a shared cache. +pub(crate) struct CachedStateProvider { + /// The state provider + state_provider: S, + + /// The caches used for the provider + caches: ProviderCaches, + + /// Metrics for the cached state provider + metrics: CachedStateMetrics, +} + +impl CachedStateProvider +where + S: StateProvider, +{ + /// Creates a new [`CachedStateProvider`] from a [`ProviderCaches`], state provider, and + /// [`CachedStateMetrics`]. + pub(crate) const fn new_with_caches( + state_provider: S, + caches: ProviderCaches, + metrics: CachedStateMetrics, + ) -> Self { + Self { state_provider, caches, metrics } + } +} + +/// Metrics for the cached state provider, showing hits / misses for each cache +#[derive(Metrics, Clone)] +#[metrics(scope = "sync.caching")] +pub(crate) struct CachedStateMetrics { + /// Code cache hits + code_cache_hits: Gauge, + + /// Code cache misses + code_cache_misses: Gauge, + + /// Storage cache hits + storage_cache_hits: Gauge, + + /// Storage cache misses + storage_cache_misses: Gauge, + + /// Account cache hits + account_cache_hits: Gauge, + + /// Account cache misses + account_cache_misses: Gauge, +} + +impl CachedStateMetrics { + /// Sets all values to zero, indicating that a new block is being executed. + pub(crate) fn reset(&self) { + // code cache + self.code_cache_hits.set(0); + self.code_cache_misses.set(0); + + // storage cache + self.storage_cache_hits.set(0); + self.storage_cache_misses.set(0); + + // account cache + self.account_cache_hits.set(0); + self.account_cache_misses.set(0); + } + + /// Returns a new zeroed-out instance of [`CachedStateMetrics`]. + pub(crate) fn zeroed() -> Self { + let zeroed = Self::default(); + zeroed.reset(); + zeroed + } +} + +impl AccountReader for CachedStateProvider { + fn basic_account(&self, address: &Address) -> ProviderResult> { + if let Some(res) = self.caches.account_cache.get(address) { + self.metrics.account_cache_hits.increment(1); + return Ok(res) + } + + self.metrics.account_cache_misses.increment(1); + + let res = self.state_provider.basic_account(address)?; + self.caches.account_cache.insert(*address, res); + Ok(res) + } +} + +impl StateProvider for CachedStateProvider { + fn storage( + &self, + account: Address, + storage_key: StorageKey, + ) -> ProviderResult> { + if let Some(res) = self.caches.storage_cache.get(&(account, storage_key)) { + self.metrics.storage_cache_hits.increment(1); + return Ok(res) + } + + self.metrics.storage_cache_misses.increment(1); + + let final_res = self.state_provider.storage(account, storage_key)?; + self.caches.storage_cache.insert((account, storage_key), final_res); + Ok(final_res) + } + + fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult> { + if let Some(res) = self.caches.code_cache.get(code_hash) { + self.metrics.code_cache_hits.increment(1); + return Ok(res) + } + + self.metrics.code_cache_misses.increment(1); + + let final_res = self.state_provider.bytecode_by_hash(code_hash)?; + self.caches.code_cache.insert(*code_hash, final_res.clone()); + Ok(final_res) + } +} + +impl StateRootProvider for CachedStateProvider { + fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { + self.state_provider.state_root(hashed_state) + } + + fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult { + self.state_provider.state_root_from_nodes(input) + } + + fn state_root_from_nodes_with_updates( + &self, + input: TrieInput, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_provider.state_root_from_nodes_with_updates(input) + } + + fn state_root_with_updates( + &self, + hashed_state: HashedPostState, + ) -> ProviderResult<(B256, TrieUpdates)> { + self.state_provider.state_root_with_updates(hashed_state) + } +} + +impl StateProofProvider for CachedStateProvider { + fn proof( + &self, + input: TrieInput, + address: Address, + slots: &[B256], + ) -> ProviderResult { + self.state_provider.proof(input, address, slots) + } + + fn multiproof( + &self, + input: TrieInput, + targets: MultiProofTargets, + ) -> ProviderResult { + self.state_provider.multiproof(input, targets) + } + + fn witness( + &self, + input: TrieInput, + target: HashedPostState, + ) -> ProviderResult> { + self.state_provider.witness(input, target) + } +} + +impl StorageRootProvider for CachedStateProvider { + fn storage_root( + &self, + address: Address, + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.state_provider.storage_root(address, hashed_storage) + } + + fn storage_proof( + &self, + address: Address, + slot: B256, + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.state_provider.storage_proof(address, slot, hashed_storage) + } + + fn storage_multiproof( + &self, + address: Address, + slots: &[B256], + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.state_provider.storage_multiproof(address, slots, hashed_storage) + } +} + +impl BlockHashReader for CachedStateProvider { + fn block_hash(&self, number: alloy_primitives::BlockNumber) -> ProviderResult> { + self.state_provider.block_hash(number) + } + + fn canonical_hashes_range( + &self, + start: alloy_primitives::BlockNumber, + end: alloy_primitives::BlockNumber, + ) -> ProviderResult> { + self.state_provider.canonical_hashes_range(start, end) + } +} + +impl HashedPostStateProvider for CachedStateProvider { + fn hashed_post_state(&self, bundle_state: &reth_revm::db::BundleState) -> HashedPostState { + self.state_provider.hashed_post_state(bundle_state) + } +} + +/// The set of caches that are used in the [`CachedStateProvider`]. +#[derive(Debug, Clone)] +pub(crate) struct ProviderCaches { + /// The cache for bytecode + code_cache: Cache>, + + /// The cache for storage + storage_cache: Cache<(Address, StorageKey), Option>, + + /// The cache for basic accounts + account_cache: Cache>, +} + +/// A builder for [`ProviderCaches`]. +#[derive(Debug)] +pub(crate) struct ProviderCacheBuilder { + /// Code cache size + code_cache_size: u64, + + /// Storage cache size + storage_cache_size: u64, + + /// Account cache size + account_cache_size: u64, +} + +impl ProviderCacheBuilder { + /// Build a [`ProviderCaches`] struct, so that provider caches can be easily cloned. + pub(crate) fn build_caches(self) -> ProviderCaches { + ProviderCaches { + code_cache: CacheBuilder::new(self.code_cache_size) + .build_with_hasher(DefaultHashBuilder::default()), + storage_cache: CacheBuilder::new(self.storage_cache_size) + .build_with_hasher(DefaultHashBuilder::default()), + account_cache: CacheBuilder::new(self.account_cache_size) + .build_with_hasher(DefaultHashBuilder::default()), + } + } +} + +impl Default for ProviderCacheBuilder { + fn default() -> Self { + // moka caches have been benchmarked up to 800k entries, so we just use 1M, optimizing for + // hitrate over memory consumption. + // + // See: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies + Self { code_cache_size: 1000000, storage_cache_size: 1000000, account_cache_size: 1000000 } + } +} diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index c09cf9b31c247..38e72c55fd21a 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -3,7 +3,10 @@ use crate::{ chain::FromOrchestrator, engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, - tree::metrics::EngineApiMetrics, + tree::{ + cached_state::{CachedStateMetrics, CachedStateProvider, ProviderCacheBuilder}, + metrics::EngineApiMetrics, + }, }; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; @@ -74,6 +77,7 @@ use tokio::sync::{ use tracing::*; mod block_buffer; +mod cached_state; pub mod config; pub mod error; mod invalid_block_hook; @@ -2249,6 +2253,13 @@ where return Err(e.into()) } + // Use cached state provider before executing, this does nothing currently, will be used in + // prewarming + let caches = ProviderCacheBuilder::default().build_caches(); + let cache_metrics = CachedStateMetrics::zeroed(); + let state_provider = + CachedStateProvider::new_with_caches(state_provider, caches, cache_metrics); + trace!(target: "engine::tree", block=?block.num_hash(), "Executing block"); let executor = self.executor_provider.executor(StateProviderDatabase::new(&state_provider)); From 28d52312acd46be2bfc46661a7b392feaa2bd4c5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 8 Jan 2025 18:33:56 +0100 Subject: [PATCH 034/113] feat: add SealedBlock::clone_sealed_header (#13739) --- crates/chain-state/src/in_memory.rs | 8 ++-- crates/engine/tree/src/tree/mod.rs | 6 +-- crates/evm/execution-types/src/chain.rs | 2 +- crates/exex/exex/src/manager.rs | 8 ++-- crates/net/downloaders/src/bodies/bodies.rs | 2 +- crates/net/downloaders/src/test_utils/mod.rs | 2 +- crates/primitives/src/block.rs | 8 ++++ .../src/providers/blockchain_provider.rs | 38 +++++++++---------- .../provider/src/providers/consistent.rs | 4 +- .../src/providers/database/provider.rs | 2 +- 10 files changed, 44 insertions(+), 36 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 8b5c8140acd7a..d0aafbd57e2b8 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -182,7 +182,7 @@ impl CanonicalInMemoryState { ) -> Self { let in_memory_state = InMemoryState::new(blocks, numbers, pending); let header = in_memory_state.head_state().map_or_else(SealedHeader::default, |state| { - state.block_ref().block().sealed_header().clone() + state.block_ref().block().clone_sealed_header() }); let chain_info_tracker = ChainInfoTracker::new(header, finalized, safe); let (canon_state_notification_sender, _) = @@ -229,7 +229,7 @@ impl CanonicalInMemoryState { /// Returns the header corresponding to the given hash. pub fn header_by_hash(&self, hash: B256) -> Option> { - self.state_by_hash(hash).map(|block| block.block_ref().block.sealed_header().clone()) + self.state_by_hash(hash).map(|block| block.block_ref().block.clone_sealed_header()) } /// Clears all entries in the in memory state. @@ -462,7 +462,7 @@ impl CanonicalInMemoryState { /// Returns the `SealedHeader` corresponding to the pending state. pub fn pending_sealed_header(&self) -> Option> { - self.pending_state().map(|h| h.block_ref().block().sealed_header().clone()) + self.pending_state().map(|h| h.block_ref().block().clone_sealed_header()) } /// Returns the `Header` corresponding to the pending state. @@ -1321,7 +1321,7 @@ mod tests { assert_eq!(state.pending_header().unwrap(), block2.block().header().clone()); // Check the pending sealed header - assert_eq!(state.pending_sealed_header().unwrap(), block2.block().sealed_header().clone()); + assert_eq!(state.pending_sealed_header().unwrap(), block2.block().clone_sealed_header()); // Check the pending block with senders assert_eq!( diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 38e72c55fd21a..00e314cf5916b 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1087,7 +1087,7 @@ where // 2. ensure we can apply a new chain update for the head block if let Some(chain_update) = self.on_new_head(state.head_block_hash)? { - let tip = chain_update.tip().sealed_header().clone(); + let tip = chain_update.tip().clone_sealed_header(); self.on_canonical_chain_update(chain_update); // update the safe and finalized blocks and ensure their values are valid @@ -1626,7 +1626,7 @@ where .state .tree_state .block_by_hash(hash) - .map(|block| block.as_ref().sealed_header().clone()); + .map(|block| block.as_ref().clone_sealed_header()); if block.is_some() { Ok(block) @@ -2039,7 +2039,7 @@ where // update the tracked canonical head self.state.tree_state.set_canonical_head(chain_update.tip().num_hash()); - let tip = chain_update.tip().sealed_header().clone(); + let tip = chain_update.tip().clone_sealed_header(); let notification = chain_update.to_chain_notification(); // reinsert any missing reorged blocks diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 7e6ba2046043a..43b5269bef3bb 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -91,7 +91,7 @@ impl Chain { /// Returns an iterator over all headers in the block with increasing block numbers. pub fn headers(&self) -> impl Iterator> + '_ { - self.blocks.values().map(|block| block.sealed_header().clone()) + self.blocks.values().map(|block| block.clone_sealed_header()) } /// Get cached trie updates for this chain. diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index 165ae8b7b7d69..b2817582760d9 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -1327,7 +1327,7 @@ mod tests { }; let (finalized_headers_tx, rx) = watch::channel(None); - finalized_headers_tx.send(Some(genesis_block.sealed_header().clone()))?; + finalized_headers_tx.send(Some(genesis_block.clone_sealed_header()))?; let finalized_header_stream = ForkChoiceStream::new(rx); let mut exex_manager = std::pin::pin!(ExExManager::new( @@ -1361,7 +1361,7 @@ mod tests { [notification.clone()] ); - finalized_headers_tx.send(Some(block.sealed_header().clone()))?; + finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx didn't emit the `FinishedHeight` event assert_eq!( @@ -1374,7 +1374,7 @@ mod tests { .send(ExExEvent::FinishedHeight((rng.gen::(), rng.gen::()).into())) .unwrap(); - finalized_headers_tx.send(Some(block.sealed_header().clone()))?; + finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx emitted a `FinishedHeight` event with a // non-canonical block @@ -1386,7 +1386,7 @@ mod tests { // Send a `FinishedHeight` event with a canonical block events_tx.send(ExExEvent::FinishedHeight(block.num_hash())).unwrap(); - finalized_headers_tx.send(Some(block.sealed_header().clone()))?; + finalized_headers_tx.send(Some(block.clone_sealed_header()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL is finalized assert_eq!(exex_manager.wal.iter_notifications()?.next().transpose()?, None); diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index a7be903f23687..9aed7d3b698ab 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -677,7 +677,7 @@ mod tests { BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..2, ..Default::default() }, ); - let headers = blocks.iter().map(|block| block.sealed_header().clone()).collect::>(); + let headers = blocks.iter().map(|block| block.clone_sealed_header()).collect::>(); let bodies = blocks .into_iter() .map(|block| (block.hash(), block.into_body())) diff --git a/crates/net/downloaders/src/test_utils/mod.rs b/crates/net/downloaders/src/test_utils/mod.rs index 2e8d5365c0dbe..698f30faee5be 100644 --- a/crates/net/downloaders/src/test_utils/mod.rs +++ b/crates/net/downloaders/src/test_utils/mod.rs @@ -28,7 +28,7 @@ pub(crate) fn generate_bodies( BlockRangeParams { parent: Some(B256::ZERO), tx_count: 0..2, ..Default::default() }, ); - let headers = blocks.iter().map(|block| block.sealed_header().clone()).collect(); + let headers = blocks.iter().map(|block| block.clone_sealed_header()).collect(); let bodies = blocks.into_iter().map(|block| (block.hash(), block.into_body())).collect(); (headers, bodies) diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 24e38997fc5e4..9908c06677fbf 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -195,6 +195,14 @@ impl SealedBlock { &self.header } + /// Clones the wrapped header and returns a [`SealedHeader`] sealed with the hash. + pub fn clone_sealed_header(&self) -> SealedHeader + where + H: Clone, + { + self.header.clone() + } + /// Consumes the block and returns the sealed header. pub fn into_sealed_header(self) -> SealedHeader { self.header diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 9d1d7abc01027..d5d704001e54e 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -946,9 +946,9 @@ mod tests { let finalized_block = blocks.get(block_count - 3).unwrap(); // Set the canonical head, safe, and finalized blocks - provider.set_canonical_head(canonical_block.sealed_header().clone()); - provider.set_safe(safe_block.sealed_header().clone()); - provider.set_finalized(finalized_block.sealed_header().clone()); + provider.set_canonical_head(canonical_block.clone_sealed_header()); + provider.set_safe(safe_block.clone_sealed_header()); + provider.set_finalized(finalized_block.clone_sealed_header()); Ok((provider, database_blocks.clone(), in_memory_blocks.clone(), receipts)) } @@ -1355,7 +1355,7 @@ mod tests { let in_memory_block = in_memory_blocks.last().unwrap().clone(); // make sure that the finalized block is on db let finalized_block = database_blocks.get(database_blocks.len() - 3).unwrap(); - provider.set_finalized(finalized_block.sealed_header().clone()); + provider.set_finalized(finalized_block.clone_sealed_header()); let blocks = [database_blocks, in_memory_blocks].concat(); @@ -1374,7 +1374,7 @@ mod tests { blocks .iter() .take_while(|header| header.number <= 8) - .map(|b| b.sealed_header().clone()) + .map(|b| b.clone_sealed_header()) .collect::>() ); @@ -1550,7 +1550,7 @@ mod tests { ); assert_eq!( provider.sealed_header_by_number_or_tag(block_number.into())?, - Some(database_block.sealed_header().clone()) + Some(database_block.clone_sealed_header()) ); assert_eq!( @@ -1559,7 +1559,7 @@ mod tests { ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Latest).unwrap(), - Some(canonical_block.sealed_header().clone()) + Some(canonical_block.clone_sealed_header()) ); assert_eq!( @@ -1568,7 +1568,7 @@ mod tests { ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Safe).unwrap(), - Some(safe_block.sealed_header().clone()) + Some(safe_block.clone_sealed_header()) ); assert_eq!( @@ -1577,7 +1577,7 @@ mod tests { ); assert_eq!( provider.sealed_header_by_number_or_tag(BlockNumberOrTag::Finalized).unwrap(), - Some(finalized_block.sealed_header().clone()) + Some(finalized_block.clone_sealed_header()) ); Ok(()) @@ -1605,7 +1605,7 @@ mod tests { ); assert_eq!( provider.sealed_header_by_id(block_number.into()).unwrap(), - Some(database_block.sealed_header().clone()) + Some(database_block.clone_sealed_header()) ); assert_eq!( @@ -1614,7 +1614,7 @@ mod tests { ); assert_eq!( provider.sealed_header_by_id(block_hash.into()).unwrap(), - Some(database_block.sealed_header().clone()) + Some(database_block.clone_sealed_header()) ); let block_number = in_memory_block.number; @@ -1626,7 +1626,7 @@ mod tests { ); assert_eq!( provider.sealed_header_by_id(block_number.into()).unwrap(), - Some(in_memory_block.sealed_header().clone()) + Some(in_memory_block.clone_sealed_header()) ); assert_eq!( @@ -1635,7 +1635,7 @@ mod tests { ); assert_eq!( provider.sealed_header_by_id(block_hash.into()).unwrap(), - Some(in_memory_block.sealed_header().clone()) + Some(in_memory_block.clone_sealed_header()) ); Ok(()) @@ -2021,7 +2021,7 @@ mod tests { ); // test state by block tag for safe block let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); - in_memory_provider.canonical_in_memory_state.set_safe(safe_block.sealed_header().clone()); + in_memory_provider.canonical_in_memory_state.set_safe(safe_block.clone_sealed_header()); assert_eq!( safe_block.hash(), in_memory_provider @@ -2033,7 +2033,7 @@ mod tests { let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); in_memory_provider .canonical_in_memory_state - .set_finalized(finalized_block.sealed_header().clone()); + .set_finalized(finalized_block.clone_sealed_header()); assert_eq!( finalized_block.hash(), in_memory_provider @@ -2106,11 +2106,11 @@ mod tests { // Set the safe block in memory let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); - provider.canonical_in_memory_state.set_safe(safe_block.sealed_header().clone()); + provider.canonical_in_memory_state.set_safe(safe_block.clone_sealed_header()); // Set the finalized block in memory let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); - provider.canonical_in_memory_state.set_finalized(finalized_block.sealed_header().clone()); + provider.canonical_in_memory_state.set_finalized(finalized_block.clone_sealed_header()); // Verify the pending block number and hash assert_eq!( @@ -2325,7 +2325,7 @@ mod tests { // instead start end test_by_block_range!([ (headers_range, |block: &SealedBlock| block.header().clone()), - (sealed_headers_range, |block: &SealedBlock| block.sealed_header().clone()), + (sealed_headers_range, |block: &SealedBlock| block.clone_sealed_header()), (block_range, |block: &SealedBlock| block.clone().unseal()), (block_with_senders_range, |block: &SealedBlock| block .clone() @@ -2467,7 +2467,7 @@ mod tests { sealed_header, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( block.number, - Some(block.sealed_header().clone()) + Some(block.clone_sealed_header()) ), u64::MAX ), diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 9ac33e3476e7a..098c27c3c7538 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -693,7 +693,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( number.into(), |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block_ref().block().sealed_header().clone())), + |block_state| Ok(Some(block_state.block_ref().block().clone_sealed_header())), ) } @@ -704,7 +704,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block_ref().block().sealed_header().clone()), + |block_state, _| Some(block_state.block_ref().block().clone_sealed_header()), |_| true, ) } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index faf35d6416bb9..f2886d664f660 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -440,7 +440,7 @@ impl< let segment_header = writer.user_header(); if segment_header.block_end().is_none() && segment_header.expected_block_start() == 0 { for block_number in 0..block.number() { - let mut prev = block.sealed_header().clone().unseal(); + let mut prev = block.clone_sealed_header().unseal(); prev.number = block_number; writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; } From 875c45bc312e48fb97ff52717a8db51a1838ad8e Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 9 Jan 2025 10:42:37 +0700 Subject: [PATCH 035/113] fix(ci): install missing deps for wasm build checks (#13745) --- .github/workflows/lint.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 418fd4cc4e688..e0ae216dd38b7 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -73,7 +73,9 @@ jobs: cache-on-failure: true - uses: dcarbone/install-jq-action@v3 - name: Run Wasm checks - run: .github/assets/check_wasm.sh + run: | + sudo apt update && sudo apt install gcc-multilib + .github/assets/check_wasm.sh riscv: runs-on: ubuntu-latest From 99932e43773dee0a24f474461bbdaf34ba61f23c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 9 Jan 2025 05:16:41 +0100 Subject: [PATCH 036/113] feat: add Test traits for Header and Block (#13741) --- crates/primitives-traits/src/block/mod.rs | 26 +++++++++++++ crates/primitives-traits/src/header/sealed.rs | 36 ++++++++--------- .../src/header/test_utils.rs | 39 ++++++++++++++++++- crates/primitives-traits/src/lib.rs | 10 ++++- 4 files changed, 89 insertions(+), 22 deletions(-) diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 4c98a94b318af..f161ced8258f8 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -88,3 +88,29 @@ where (self.header, self.body) } } + +/// An extension trait for [`Block`]s that allows for mutable access to the block's internals. +/// +/// This allows for modifying the block's header and body for testing purposes. +#[cfg(any(test, feature = "test-utils"))] +pub trait TestBlock: Block { + /// Returns mutable reference to block body. + fn body_mut(&mut self) -> &mut Self::Body; + + /// Returns mutable reference to block header. + fn header_mut(&mut self) -> &mut Self::Header; +} + +#[cfg(any(test, feature = "test-utils"))] +impl TestBlock for alloy_consensus::Block +where + T: SignedTransaction, +{ + fn body_mut(&mut self) -> &mut Self::Body { + &mut self.body + } + + fn header_mut(&mut self) -> &mut Self::Header { + &mut self.header + } +} diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index ef8b5fde5e910..4291735a6aa52 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -119,6 +119,24 @@ impl Decodable for SealedHeader { } } +impl From> for Sealed { + fn from(value: SealedHeader) -> Self { + Self::new_unchecked(value.header, value.hash) + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader +where + H: for<'b> arbitrary::Arbitrary<'b> + Sealable, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let header = H::arbitrary(u)?; + + Ok(Self::seal(header)) + } +} + #[cfg(any(test, feature = "test-utils"))] impl SealedHeader { /// Updates the block header. @@ -152,24 +170,6 @@ impl SealedHeader { } } -impl From> for Sealed { - fn from(value: SealedHeader) -> Self { - Self::new_unchecked(value.header, value.hash) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a, H> arbitrary::Arbitrary<'a> for SealedHeader -where - H: for<'b> arbitrary::Arbitrary<'b> + Sealable, -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let header = H::arbitrary(u)?; - - Ok(Self::seal(header)) - } -} - /// Bincode-compatible [`SealedHeader`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { diff --git a/crates/primitives-traits/src/header/test_utils.rs b/crates/primitives-traits/src/header/test_utils.rs index 0e79f6cb462fb..58237fbca105b 100644 --- a/crates/primitives-traits/src/header/test_utils.rs +++ b/crates/primitives-traits/src/header/test_utils.rs @@ -1,10 +1,45 @@ -//! Test utilities to generate random valid headers. +//! Test utilities for the block header. use alloy_consensus::Header; -use alloy_primitives::B256; +use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; use proptest::{arbitrary::any, prop_compose}; use proptest_arbitrary_interop::arb; +/// A helper trait for [`Header`]s that allows for mutable access to the headers values. +/// +/// This allows for modifying the header for testing purposes. +pub trait TestHeader { + /// Updates the parent block hash. + fn set_parent_hash(&mut self, hash: BlockHash); + + /// Updates the block number. + fn set_block_number(&mut self, number: BlockNumber); + + /// Updates the block state root. + fn set_state_root(&mut self, state_root: B256); + + /// Updates the block difficulty. + fn set_difficulty(&mut self, difficulty: U256); +} + +impl TestHeader for Header { + fn set_parent_hash(&mut self, hash: BlockHash) { + self.parent_hash = hash + } + + fn set_block_number(&mut self, number: BlockNumber) { + self.number = number; + } + + fn set_state_root(&mut self, state_root: B256) { + self.state_root = state_root; + } + + fn set_difficulty(&mut self, difficulty: U256) { + self.difficulty = difficulty; + } +} + /// Generates a header which is valid __with respect to past and future forks__. This means, for /// example, that if the withdrawals root is present, the base fee per gas is also present. /// diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index c5d9b710c1f0d..bad587e0f67d9 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -74,8 +74,6 @@ pub mod sync; /// Common header types pub mod header; -#[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] -pub use header::test_utils; pub use header::{Header, HeaderError, SealedHeader}; /// Bincode-compatible serde implementations for common abstracted types in Reth. @@ -135,3 +133,11 @@ pub trait MaybeSerdeBincodeCompat {} impl MaybeSerdeBincodeCompat for T where T: crate::serde_bincode_compat::SerdeBincodeCompat {} #[cfg(not(feature = "serde-bincode-compat"))] impl MaybeSerdeBincodeCompat for T {} + +/// Utilities for testing. +#[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] +pub mod test_utils { + pub use crate::header::test_utils::{generate_valid_header, valid_header_strategy}; + #[cfg(feature = "test-utils")] + pub use crate::{block::TestBlock, header::test_utils::TestHeader}; +} From ceaa3d37052389f38dc0d67f4a9fa3ae9b7ff843 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Thu, 9 Jan 2025 11:31:53 +0700 Subject: [PATCH 037/113] perf(persistence): reuse cursor for updating history indices (#13622) --- .../src/providers/database/provider.rs | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index f2886d664f660..8f7dbbc2177b0 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -801,15 +801,17 @@ impl DatabaseProvider { /// Load shard and remove it. If list is empty, last shard was full or /// there are no shards at all. - fn take_shard(&self, key: T::Key) -> ProviderResult> + fn take_shard( + &self, + cursor: &mut ::CursorMut, + key: T::Key, + ) -> ProviderResult> where T: Table, { - let mut cursor = self.tx.cursor_read::()?; - let shard = cursor.seek_exact(key)?; - if let Some((shard_key, list)) = shard { + if let Some((_, list)) = cursor.seek_exact(key)? { // delete old shard so new one can be inserted. - self.tx.delete::(shard_key, None)?; + cursor.delete_current()?; let list = list.iter().collect::>(); return Ok(list) } @@ -832,13 +834,13 @@ impl DatabaseProvider { P: Copy, T: Table, { + let mut cursor = self.tx.cursor_write::()?; for (partial_key, indices) in index_updates { let mut last_shard = - self.take_shard::(sharded_key_factory(partial_key, u64::MAX))?; + self.take_shard::(&mut cursor, sharded_key_factory(partial_key, u64::MAX))?; last_shard.extend(indices); // Chunk indices and insert them in shards of N size. - let indices = last_shard; - let mut chunks = indices.chunks(sharded_key::NUM_OF_INDICES_IN_SHARD).peekable(); + let mut chunks = last_shard.chunks(sharded_key::NUM_OF_INDICES_IN_SHARD).peekable(); while let Some(list) = chunks.next() { let highest_block_number = if chunks.peek().is_some() { *list.last().expect("`chunks` does not return empty list") @@ -846,9 +848,9 @@ impl DatabaseProvider { // Insert last list with `u64::MAX`. u64::MAX }; - self.tx.put::( + cursor.insert( sharded_key_factory(partial_key, highest_block_number), - BlockNumberList::new_pre_sorted(list.iter().copied()), + &BlockNumberList::new_pre_sorted(list.iter().copied()), )?; } } From fd092a267ed7e429d087cbeb008163acdeea0c1f Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 Jan 2025 23:32:22 -0500 Subject: [PATCH 038/113] chore: remove Block generic from apply_pre_execution_changes (#13743) --- .../engine/invalid-block-hooks/src/witness.rs | 2 +- crates/ethereum/evm/src/execute.rs | 8 ++++---- crates/evm/src/system_calls/mod.rs | 17 ++++++++--------- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 2b634ae5ce71e..a0c986e4384ab 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -95,7 +95,7 @@ where SystemCaller::new(self.evm_config.clone(), self.provider.chain_spec()); // Apply pre-block system contract calls. - system_caller.apply_pre_execution_changes(&block.clone().unseal().block, &mut evm)?; + system_caller.apply_pre_execution_changes(block.header(), &mut evm)?; // Re-execute all of the transactions in the block to load all touched accounts into // the cache DB. diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 0470a283ed6e5..3d8fed3b5cc01 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -5,7 +5,7 @@ use crate::{ EthEvmConfig, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::Transaction as _; +use alloy_consensus::Transaction; use alloy_eips::{eip6110, eip7685::Requests}; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; @@ -116,14 +116,14 @@ where impl EthExecutionStrategy where DB: Database + Display>, - EvmConfig: ConfigureEvm

, + EvmConfig: ConfigureEvm, { /// Configures a new evm configuration and block environment for the given block. /// /// # Caution /// /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &alloy_consensus::Header) -> EnvWithHandlerCfg { + fn evm_env_for_block(&self, header: &EvmConfig::Header) -> EnvWithHandlerCfg { let EvmEnv { cfg_env_with_handler_cfg, block_env } = self.evm_config.cfg_and_block_env(header); EnvWithHandlerCfg::new_with_cfg_env(cfg_env_with_handler_cfg, block_env, Default::default()) @@ -156,7 +156,7 @@ where let env = self.evm_env_for_block(&block.header); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - self.system_caller.apply_pre_execution_changes(&block.block, &mut evm)?; + self.system_caller.apply_pre_execution_changes(&block.header, &mut evm)?; Ok(()) } diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 5758bdd5855e1..4d0fc8041d457 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -94,26 +94,25 @@ where Chainspec: EthereumHardforks, { /// Apply pre execution changes. - pub fn apply_pre_execution_changes( + pub fn apply_pre_execution_changes( &mut self, - block: &Block, + header: &EvmConfig::Header, evm: &mut Evm<'_, Ext, DB>, ) -> Result<(), BlockExecutionError> where DB: Database + DatabaseCommit, DB::Error: Display, - Block: reth_primitives_traits::Block
, { self.apply_blockhashes_contract_call( - block.header().timestamp(), - block.header().number(), - block.header().parent_hash(), + header.timestamp(), + header.number(), + header.parent_hash(), evm, )?; self.apply_beacon_root_contract_call( - block.header().timestamp(), - block.header().number(), - block.header().parent_beacon_block_root(), + header.timestamp(), + header.number(), + header.parent_beacon_block_root(), evm, )?; From d0684cf8bb934c0e27eaee7c6183b86f49aad331 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Wed, 8 Jan 2025 23:32:51 -0500 Subject: [PATCH 039/113] chore: remove outdated `as _;` imports (#13744) --- crates/evm/src/execute.rs | 2 +- crates/storage/provider/src/providers/blockchain_provider.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 9a9f653759183..8bf40d38caa0e 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -6,7 +6,6 @@ pub use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, }; pub use reth_execution_types::{BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives_traits::Block as _; pub use reth_storage_errors::provider::ProviderError; use crate::{system_calls::OnStateHook, TxEnvOverrides}; @@ -19,6 +18,7 @@ use alloy_primitives::{ use core::fmt::Display; use reth_consensus::ConsensusError; use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt}; +use reth_primitives_traits::Block; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; use revm::{ diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index d5d704001e54e..1c19e8260b8d9 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -30,7 +30,7 @@ use reth_primitives::{ Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionSigned, }; -use reth_primitives_traits::BlockBody as _; +use reth_primitives_traits::BlockBody; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -795,7 +795,7 @@ mod tests { use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{BlockExt, EthPrimitives, Receipt, SealedBlock, StaticFileSegment}; - use reth_primitives_traits::{BlockBody as _, SignedTransaction}; + use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, ChangeSetReader, DatabaseProviderFactory, HeaderProvider, From 383eb2331c41cfc6d7523c691552264e7894fd25 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 9 Jan 2025 09:06:28 +0100 Subject: [PATCH 040/113] chore: integrate Test trait for sealed types (#13746) --- .../src/commands/debug_cmd/build_block.rs | 2 +- crates/consensus/common/src/validation.rs | 3 +- crates/engine/tree/src/tree/mod.rs | 6 +- crates/primitives-traits/src/block/mod.rs | 29 ++++++- crates/primitives-traits/src/header/sealed.rs | 17 ++-- crates/primitives/Cargo.toml | 2 +- crates/primitives/src/block.rs | 77 ++++++++++++++++++- 7 files changed, 119 insertions(+), 17 deletions(-) diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 7507e8bf11bc3..40110fe849888 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -238,7 +238,7 @@ impl> Command { debug!(target: "reth::cli", ?block, "Built new payload"); consensus.validate_header_with_total_difficulty(block, U256::MAX)?; - consensus.validate_header(block)?; + consensus.validate_header(block.sealed_header())?; consensus.validate_block_pre_execution(block)?; let senders = block.senders().expect("sender recovery failed"); diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 61251b80e5f8b..9e6a2ad90173e 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -64,8 +64,7 @@ pub fn validate_cancun_gas( ) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each // blob tx - let header_blob_gas_used = - block.header().blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; + let header_blob_gas_used = block.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; let total_blob_gas = block.body().blob_gas_used(); if total_blob_gas != header_blob_gas_used { return Err(ConsensusError::BlobGasUsedDiff(GotExpected { diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 00e314cf5916b..03b9f0ab50b66 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1809,7 +1809,7 @@ where return Err(e) } - if let Err(e) = self.consensus.validate_header(block) { + if let Err(e) = self.consensus.validate_header(block.sealed_header()) { error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); return Err(e) } @@ -2248,7 +2248,9 @@ where block.parent_hash().into(), )) })?; - if let Err(e) = self.consensus.validate_header_against_parent(&block, &parent_block) { + if let Err(e) = + self.consensus.validate_header_against_parent(block.sealed_header(), &parent_block) + { warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); return Err(e.into()) } diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index f161ced8258f8..85e29995f4b61 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -93,12 +93,35 @@ where /// /// This allows for modifying the block's header and body for testing purposes. #[cfg(any(test, feature = "test-utils"))] -pub trait TestBlock: Block { +pub trait TestBlock: Block { /// Returns mutable reference to block body. fn body_mut(&mut self) -> &mut Self::Body; /// Returns mutable reference to block header. fn header_mut(&mut self) -> &mut Self::Header; + + /// Updates the block header. + fn set_header(&mut self, header: Self::Header); + + /// Updates the parent block hash. + fn set_parent_hash(&mut self, hash: alloy_primitives::BlockHash) { + crate::header::test_utils::TestHeader::set_parent_hash(self.header_mut(), hash); + } + + /// Updates the block number. + fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { + crate::header::test_utils::TestHeader::set_block_number(self.header_mut(), number); + } + + /// Updates the block state root. + fn set_state_root(&mut self, state_root: alloy_primitives::B256) { + crate::header::test_utils::TestHeader::set_state_root(self.header_mut(), state_root); + } + + /// Updates the block difficulty. + fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { + crate::header::test_utils::TestHeader::set_difficulty(self.header_mut(), difficulty); + } } #[cfg(any(test, feature = "test-utils"))] @@ -113,4 +136,8 @@ where fn header_mut(&mut self) -> &mut Self::Header { &mut self.header } + + fn set_header(&mut self, header: Self::Header) { + self.header = header + } } diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 4291735a6aa52..4b1a83fb50d77 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -138,9 +138,9 @@ where } #[cfg(any(test, feature = "test-utils"))] -impl SealedHeader { +impl SealedHeader { /// Updates the block header. - pub fn set_header(&mut self, header: Header) { + pub fn set_header(&mut self, header: H) { self.header = header } @@ -149,24 +149,29 @@ impl SealedHeader { self.hash = hash } + /// Returns a mutable reference to the header. + pub fn header_mut(&mut self) -> &mut H { + &mut self.header + } + /// Updates the parent block hash. pub fn set_parent_hash(&mut self, hash: BlockHash) { - self.header.parent_hash = hash + self.header.set_parent_hash(hash); } /// Updates the block number. pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { - self.header.number = number; + self.header.set_block_number(number); } /// Updates the block state root. pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) { - self.header.state_root = state_root; + self.header.set_state_root(state_root); } /// Updates the block difficulty. pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { - self.header.difficulty = difficulty; + self.header.set_difficulty(difficulty); } } diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 6937ec2859abb..2ccaf4b0d0676 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -63,7 +63,7 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } # eth reth-chainspec = { workspace = true, features = ["arbitrary"] } reth-codecs = { workspace = true, features = ["test-utils"] } -reth-primitives-traits = { workspace = true, features = ["arbitrary"] } +reth-primitives-traits = { workspace = true, features = ["arbitrary", "test-utils"] } reth-testing-utils.workspace = true reth-trie-common = { workspace = true, features = ["arbitrary"] } revm-primitives = { workspace = true, features = ["arbitrary"] } diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 9908c06677fbf..fd0dc0cee40a1 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -4,7 +4,9 @@ use crate::{ }; use alloc::vec::Vec; use alloy_consensus::Header; -use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; +use alloy_eips::{ + eip1898::BlockWithParent, eip2718::Encodable2718, eip4895::Withdrawals, BlockNumHash, +}; use alloy_primitives::{Address, B256}; use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; use derive_more::{Deref, DerefMut}; @@ -162,11 +164,9 @@ impl BlockWithSenders { /// Sealed Ethereum full block. /// /// Withdrawals can be optionally included at the end of the RLP encoded message. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct SealedBlock { /// Locked block header. - #[deref] - #[deref_mut] header: SealedHeader, /// Block body. body: B, @@ -185,6 +185,11 @@ impl SealedBlock { self.header.hash() } + /// Returns reference to block header. + pub const fn header(&self) -> &H { + self.header.header() + } + /// Returns reference to block body. pub const fn body(&self) -> &B { &self.body @@ -252,6 +257,16 @@ where H: alloy_consensus::BlockHeader, B: reth_primitives_traits::BlockBody, { + /// Return the number hash tuple. + pub fn num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.number(), self.hash()) + } + + /// Return a [`BlockWithParent`] for this header. + pub fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent { parent: self.parent_hash(), block: self.num_hash() } + } + /// Ensures that the transaction root in the block header is valid. /// /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure @@ -388,6 +403,14 @@ where } } +impl Deref for SealedBlock { + type Target = H; + + fn deref(&self) -> &Self::Target { + self.header.header() + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock where @@ -399,6 +422,52 @@ where } } +#[cfg(any(test, feature = "test-utils"))] +impl SealedBlock +where + H: reth_primitives_traits::test_utils::TestHeader, +{ + /// Returns a mutable reference to the header. + pub fn header_mut(&mut self) -> &mut H { + self.header.header_mut() + } + + /// Returns a mutable reference to the header. + pub fn body_mut(&mut self) -> &mut B { + &mut self.body + } + + /// Updates the block header. + pub fn set_header(&mut self, header: H) { + self.header.set_header(header) + } + + /// Updates the block hash. + pub fn set_hash(&mut self, hash: alloy_primitives::BlockHash) { + self.header.set_hash(hash); + } + + /// Updates the parent block hash. + pub fn set_parent_hash(&mut self, hash: alloy_primitives::BlockHash) { + self.header.set_parent_hash(hash); + } + + /// Updates the block number. + pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { + self.header.set_block_number(number); + } + + /// Updates the block state root. + pub fn set_state_root(&mut self, state_root: B256) { + self.header.set_state_root(state_root); + } + + /// Updates the block difficulty. + pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { + self.header.set_difficulty(difficulty); + } +} + /// A helepr trait to construct [`SealedBlock`] from a [`reth_primitives_traits::Block`]. pub type SealedBlockFor = SealedBlock< ::Header, From 017217f3eb244affa3fe8ecef34bdc958da9ee0a Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 9 Jan 2025 12:40:45 +0100 Subject: [PATCH 041/113] feat(trie): add leaf value retrieval methods to `SparseStateTrie` (#13750) --- crates/trie/sparse/src/state.rs | 10 ++++++++++ crates/trie/sparse/src/trie.rs | 9 +++++++++ 2 files changed, 19 insertions(+) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index fecb3c5fb40e4..953198a36794f 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -97,6 +97,16 @@ impl SparseStateTrie { self.revealed.get(account).is_some_and(|slots| slots.contains(slot)) } + /// Returns reference to bytes representing leaf value for the target account. + pub fn get_account_value(&self, account: &B256) -> Option<&Vec> { + self.state.as_revealed_ref()?.get_leaf_value(&Nibbles::unpack(account)) + } + + /// Returns reference to bytes representing leaf value for the target account and storage slot. + pub fn get_storage_slot_value(&self, account: &B256, slot: &B256) -> Option<&Vec> { + self.storages.get(account)?.as_revealed_ref()?.get_leaf_value(&Nibbles::unpack(slot)) + } + /// Returns mutable reference to storage sparse trie if it was revealed. pub fn storage_trie_mut( &mut self, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index f472578e3d9d8..c41fe5ee420d0 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -72,6 +72,15 @@ impl

SparseTrie

{ matches!(self, Self::Blind) } + /// Returns reference to revealed sparse trie if the trie is not blind. + pub fn as_revealed_ref(&self) -> Option<&RevealedSparseTrie

> { + if let Self::Revealed(revealed) = self { + Some(revealed) + } else { + None + } + } + /// Returns mutable reference to revealed sparse trie if the trie is not blind. pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie

> { if let Self::Revealed(revealed) = self { From 66f934b8d0ee80920b03c6754dcc0c2da0587e7b Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 9 Jan 2025 14:23:31 +0100 Subject: [PATCH 042/113] chore(trie): simplify blinded provider (#13753) --- crates/engine/tree/src/tree/root.rs | 26 ++++++++++++-------------- crates/trie/sparse/src/blinded.rs | 9 ++------- crates/trie/sparse/src/state.rs | 13 +++---------- crates/trie/sparse/src/trie.rs | 16 ++++------------ crates/trie/trie/src/proof/blinded.rs | 8 ++------ crates/trie/trie/src/witness.rs | 13 ++++--------- 6 files changed, 27 insertions(+), 58 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 660c304f75e56..b41de299a1250 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -17,7 +17,7 @@ use reth_trie::{ use reth_trie_parallel::{proof::ParallelProof, root::ParallelStateRootError}; use reth_trie_sparse::{ blinded::{BlindedProvider, BlindedProviderFactory}, - errors::{SparseStateTrieError, SparseStateTrieResult, SparseTrieError, SparseTrieErrorKind}, + errors::{SparseStateTrieError, SparseStateTrieResult, SparseTrieErrorKind}, SparseStateTrie, }; use revm_primitives::{keccak256, EvmState, B256}; @@ -278,7 +278,7 @@ pub struct StateRootTask<'env, Factory, BPF: BlindedProviderFactory> { thread_pool: &'env rayon::ThreadPool, } -impl<'env, Factory, ABP, SBP, BPF> StateRootTask<'env, Factory, BPF> +impl<'env, Factory, BPF> StateRootTask<'env, Factory, BPF> where Factory: DatabaseProviderFactory + StateCommitmentProvider @@ -286,12 +286,9 @@ where + Send + Sync + 'static, - ABP: BlindedProvider + Send + Sync + 'env, - SBP: BlindedProvider + Send + Sync + 'env, - BPF: BlindedProviderFactory - + Send - + Sync - + 'env, + BPF: BlindedProviderFactory + Send + Sync + 'env, + BPF::AccountNodeProvider: BlindedProvider + Send + Sync + 'env, + BPF::StorageNodeProvider: BlindedProvider + Send + Sync + 'env, { /// Creates a new state root task with the unified message channel pub fn new( @@ -759,16 +756,17 @@ where /// Updates the sparse trie with the given proofs and state, and returns the updated trie and the /// time it took. -fn update_sparse_trie< - ABP: BlindedProvider + Send + Sync, - SBP: BlindedProvider + Send + Sync, - BPF: BlindedProviderFactory + Send + Sync, ->( +fn update_sparse_trie( mut trie: Box>, multiproof: MultiProof, targets: MultiProofTargets, state: HashedPostState, -) -> SparseStateTrieResult<(Box>, Duration)> { +) -> SparseStateTrieResult<(Box>, Duration)> +where + BPF: BlindedProviderFactory + Send + Sync, + BPF::AccountNodeProvider: BlindedProvider + Send + Sync, + BPF::StorageNodeProvider: BlindedProvider + Send + Sync, +{ trace!(target: "engine::root::sparse", "Updating sparse trie"); let started_at = Instant::now(); diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs index a9f0e89c29c1f..28a41ba11fa64 100644 --- a/crates/trie/sparse/src/blinded.rs +++ b/crates/trie/sparse/src/blinded.rs @@ -20,11 +20,8 @@ pub trait BlindedProviderFactory { /// Trie node provider for retrieving blinded nodes. pub trait BlindedProvider { - /// The error type for the provider. - type Error: Into; - /// Retrieve blinded node by path. - fn blinded_node(&mut self, path: &Nibbles) -> Result, Self::Error>; + fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError>; } /// Default blinded node provider factory that creates [`DefaultBlindedProvider`]. @@ -49,9 +46,7 @@ impl BlindedProviderFactory for DefaultBlindedProviderFactory { pub struct DefaultBlindedProvider; impl BlindedProvider for DefaultBlindedProvider { - type Error = SparseTrieError; - - fn blinded_node(&mut self, _path: &Nibbles) -> Result, Self::Error> { + fn blinded_node(&mut self, _path: &Nibbles) -> Result, SparseTrieError> { Ok(None) } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 953198a36794f..13718f87ac022 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,5 +1,5 @@ use crate::{ - blinded::{BlindedProvider, BlindedProviderFactory, DefaultBlindedProviderFactory}, + blinded::{BlindedProviderFactory, DefaultBlindedProviderFactory}, RevealedSparseTrie, SparseTrie, }; use alloy_primitives::{ @@ -8,9 +8,7 @@ use alloy_primitives::{ Bytes, B256, }; use alloy_rlp::{Decodable, Encodable}; -use reth_execution_errors::{ - SparseStateTrieErrorKind, SparseStateTrieResult, SparseTrieError, SparseTrieErrorKind, -}; +use reth_execution_errors::{SparseStateTrieErrorKind, SparseStateTrieResult, SparseTrieErrorKind}; use reth_primitives_traits::Account; use reth_tracing::tracing::trace; use reth_trie_common::{ @@ -452,12 +450,7 @@ impl SparseStateTrie { }) } } -impl SparseStateTrie -where - F: BlindedProviderFactory, - SparseTrieError: From<::Error> - + From<::Error>, -{ +impl SparseStateTrie { /// Update the account leaf node. pub fn update_account_leaf( &mut self, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index c41fe5ee420d0..7ff0e40e1a21c 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -5,7 +5,7 @@ use alloy_primitives::{ B256, }; use alloy_rlp::Decodable; -use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind, SparseTrieResult}; +use reth_execution_errors::{SparseTrieErrorKind, SparseTrieResult}; use reth_tracing::tracing::trace; use reth_trie_common::{ prefix_set::{PrefixSet, PrefixSetMut}, @@ -73,7 +73,7 @@ impl

SparseTrie

{ } /// Returns reference to revealed sparse trie if the trie is not blind. - pub fn as_revealed_ref(&self) -> Option<&RevealedSparseTrie

> { + pub const fn as_revealed_ref(&self) -> Option<&RevealedSparseTrie

> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -131,11 +131,7 @@ impl

SparseTrie

{ } } -impl

SparseTrie

-where - P: BlindedProvider, - SparseTrieError: From, -{ +impl SparseTrie

{ /// Update the leaf node. pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; @@ -825,11 +821,7 @@ impl

RevealedSparseTrie

{ } } -impl

RevealedSparseTrie

-where - P: BlindedProvider, - SparseTrieError: From, -{ +impl RevealedSparseTrie

{ /// Update the leaf node with provided value. pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { self.prefix_set.insert(path.clone()); diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs index 33a1a43b579b3..c8d0f3bb5a22c 100644 --- a/crates/trie/trie/src/proof/blinded.rs +++ b/crates/trie/trie/src/proof/blinded.rs @@ -84,9 +84,7 @@ where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, { - type Error = SparseTrieError; - - fn blinded_node(&mut self, path: &Nibbles) -> Result, Self::Error> { + fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { let targets = HashMap::from_iter([(pad_path_to_key(path), HashSet::default())]); let proof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) @@ -128,9 +126,7 @@ where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, { - type Error = SparseTrieError; - - fn blinded_node(&mut self, path: &Nibbles) -> Result, Self::Error> { + fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { let targets = HashSet::from_iter([pad_path_to_key(path)]); let storage_prefix_set = self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index e29792146856b..5d43c0ea145f1 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -212,8 +212,8 @@ impl WitnessBlindedProviderFactory { impl BlindedProviderFactory for WitnessBlindedProviderFactory where F: BlindedProviderFactory, - F::AccountNodeProvider: BlindedProvider, - F::StorageNodeProvider: BlindedProvider, + F::AccountNodeProvider: BlindedProvider, + F::StorageNodeProvider: BlindedProvider, { type AccountNodeProvider = WitnessBlindedProvider; type StorageNodeProvider = WitnessBlindedProvider; @@ -243,13 +243,8 @@ impl

WitnessBlindedProvider

{ } } -impl

BlindedProvider for WitnessBlindedProvider

-where - P: BlindedProvider, -{ - type Error = P::Error; - - fn blinded_node(&mut self, path: &Nibbles) -> Result, Self::Error> { +impl BlindedProvider for WitnessBlindedProvider

{ + fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { let maybe_node = self.provider.blinded_node(path)?; if let Some(node) = &maybe_node { self.tx From bf65ed45c5c7e0ea4a8ea5228cb813fa5b9a3cac Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 9 Jan 2025 14:58:09 +0100 Subject: [PATCH 043/113] chore!: make senders fields private (#13752) --- crates/chain-state/src/in_memory.rs | 4 +- crates/chain-state/src/test_utils.rs | 5 +- crates/engine/tree/src/download.rs | 12 ++--- crates/engine/tree/src/tree/mod.rs | 19 ++++--- crates/evm/execution-types/src/chain.rs | 4 +- crates/exex/exex/src/backfill/job.rs | 2 +- crates/optimism/evm/src/execute.rs | 16 +++--- crates/primitives/src/block.rs | 54 +++++++++++++++++-- .../rpc-eth-api/src/helpers/pending_block.rs | 2 +- crates/rpc/rpc-types-compat/src/block.rs | 2 +- crates/rpc/rpc/src/validation.rs | 2 +- .../src/providers/blockchain_provider.rs | 8 +-- .../src/providers/database/provider.rs | 2 +- .../storage/provider/src/test_utils/blocks.rs | 25 +++++++-- .../transaction-pool/src/blobstore/tracker.rs | 16 +++--- 15 files changed, 117 insertions(+), 56 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index d0aafbd57e2b8..fac148d4a716d 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -648,7 +648,7 @@ impl BlockState { pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - SealedBlockWithSenders { block, senders } + SealedBlockWithSenders::new_unchecked(block, senders) } /// Returns the hash of executed block that determines the state. @@ -840,7 +840,7 @@ impl ExecutedBlock { /// /// Note: this clones the block and senders. pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { - SealedBlockWithSenders { block: (*self.block).clone(), senders: (*self.senders).clone() } + SealedBlockWithSenders::new_unchecked((*self.block).clone(), (*self.senders).clone()) } /// Returns a reference to the block's execution outcome diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 2ce18ea6d74c3..a0ddeb8d0fe2c 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -207,9 +207,10 @@ impl TestBlockBuilder { ) -> ExecutedBlock { let block_with_senders = self.generate_random_block(block_number, parent_hash); + let (block, senders) = block_with_senders.split(); ExecutedBlock::new( - Arc::new(block_with_senders.block.clone()), - Arc::new(block_with_senders.senders), + Arc::new(block), + Arc::new(senders), Arc::new(ExecutionOutcome::new( BundleState::default(), receipts, diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 26c5b405de064..1359843c0a354 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -233,10 +233,9 @@ where .into_iter() .map(|b| { let senders = b.senders().unwrap_or_default(); - OrderedSealedBlockWithSenders(SealedBlockWithSenders { - block: b, - senders, - }) + OrderedSealedBlockWithSenders(SealedBlockWithSenders::new_unchecked( + b, senders, + )) }) .map(Reverse), ); @@ -290,14 +289,13 @@ impl Ord for OrderedSealedBlockWithSenders { impl From> for OrderedSealedBlockWithSenders { fn from(block: SealedBlockFor) -> Self { let senders = block.senders().unwrap_or_default(); - Self(SealedBlockWithSenders { block, senders }) + Self(SealedBlockWithSenders::new_unchecked(block, senders)) } } impl From> for SealedBlockWithSenders { fn from(value: OrderedSealedBlockWithSenders) -> Self { - let senders = value.0.senders; - Self { block: value.0.block, senders } + value.0 } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 03b9f0ab50b66..1103c569f34c7 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1597,10 +1597,11 @@ where return Ok(None) }; - let SealedBlockWithSenders { block, senders } = self + let (block, senders) = self .provider .sealed_block_with_senders(hash.into(), TransactionVariant::WithHash)? - .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))?; + .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))? + .split(); let execution_output = self .provider .get_state(block.number())? @@ -2452,7 +2453,7 @@ where let executed: ExecutedBlock = ExecutedBlock { block: sealed_block.clone(), - senders: Arc::new(block.senders), + senders: Arc::new(block.senders().to_vec()), execution_output: Arc::new(ExecutionOutcome::from((output, block_number))), hashed_state: Arc::new(hashed_state), trie: Arc::new(trie_output), @@ -3002,9 +3003,11 @@ mod tests { self.persist_blocks( blocks .into_iter() - .map(|b| SealedBlockWithSenders { - block: (*b.block).clone(), - senders: b.senders.to_vec(), + .map(|b| { + SealedBlockWithSenders::new_unchecked( + (*b.block).clone(), + b.senders().clone(), + ) }) .collect(), ); @@ -3710,7 +3713,7 @@ mod tests { for block in &chain_a { test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { block: Arc::new(block.block.clone()), - senders: Arc::new(block.senders.clone()), + senders: Arc::new(block.senders().to_vec()), execution_output: Arc::new(ExecutionOutcome::default()), hashed_state: Arc::new(HashedPostState::default()), trie: Arc::new(TrieUpdates::default()), @@ -3721,7 +3724,7 @@ mod tests { for block in &chain_b { test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { block: Arc::new(block.block.clone()), - senders: Arc::new(block.senders.clone()), + senders: Arc::new(block.senders().to_vec()), execution_output: Arc::new(ExecutionOutcome::default()), hashed_state: Arc::new(HashedPostState::default()), trie: Arc::new(TrieUpdates::default()), diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 43b5269bef3bb..e5f7270bfef71 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -784,13 +784,13 @@ mod tests { let block1_hash = B256::new([15; 32]); block1.set_block_number(1); block1.set_hash(block1_hash); - block1.senders.push(Address::new([4; 20])); + block1.push_sender(Address::new([4; 20])); let mut block2: SealedBlockWithSenders = Default::default(); let block2_hash = B256::new([16; 32]); block2.set_block_number(2); block2.set_hash(block2_hash); - block2.senders.push(Address::new([4; 20])); + block2.push_sender(Address::new([4; 20])); let mut block_state_extended = execution_outcome1; block_state_extended.extend(execution_outcome2); diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 3bb0e04ec25f9..126a2562f7085 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -107,7 +107,7 @@ where let execute_start = Instant::now(); // Unseal the block for execution - let (block, senders) = block.into_components(); + let (block, senders) = block.split(); let (header, body) = block.split(); let (unsealed_header, hash) = header.split(); let block = P::Block::new(unsealed_header, body).with_senders_unchecked(senders); diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 402f0ab16f7ac..ac5d750626567 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -420,13 +420,13 @@ mod tests { // Attempt to execute a block with one deposit and one non-deposit transaction executor - .execute_and_verify_one(&BlockWithSenders { - block: Block { + .execute_and_verify_one(&BlockWithSenders::new_unchecked( + Block { header, body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() }, }, - senders: vec![addr, addr], - }) + vec![addr, addr], + )) .unwrap(); let receipts = executor.receipts(); @@ -496,13 +496,13 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail executor - .execute_and_verify_one(&BlockWithSenders { - block: Block { + .execute_and_verify_one(&BlockWithSenders::new_unchecked( + Block { header, body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() }, }, - senders: vec![addr, addr], - }) + vec![addr, addr], + )) .expect("Executing a block while canyon is active should not fail"); let receipts = executor.receipts(); diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index fd0dc0cee40a1..8c6c8a870d612 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -91,7 +91,7 @@ pub struct BlockWithSenders { #[deref_mut] pub block: B, /// List of senders that match the transactions in the block - pub senders: Vec

, + senders: Vec
, } impl BlockWithSenders { @@ -105,6 +105,16 @@ impl BlockWithSenders { (block.body().transactions().len() == senders.len()).then_some(Self { block, senders }) } + /// Returns all senders of the transactions in the block. + pub fn senders(&self) -> &[Address] { + &self.senders + } + + /// Returns an iterator over all senders in the block. + pub fn senders_iter(&self) -> impl Iterator { + self.senders.iter() + } + /// Seal the block with a known hash. /// /// WARNING: This method does not perform validation whether the hash is correct. @@ -122,7 +132,7 @@ impl BlockWithSenders { /// Split Structure to its components #[inline] - pub fn into_components(self) -> (B, Vec
) { + pub fn split(self) -> (B, Vec
) { (self.block, self.senders) } @@ -483,7 +493,7 @@ pub struct SealedBlockWithSenders { #[serde(bound = "SealedBlock: Serialize + serde::de::DeserializeOwned")] pub block: SealedBlock, /// List of senders that match transactions from block. - pub senders: Vec
, + senders: Vec
, } impl Default for SealedBlockWithSenders { @@ -493,6 +503,14 @@ impl Default for SealedBlockWithSenders { } impl SealedBlockWithSenders { + /// New sealed block with sender + pub const fn new_unchecked( + block: SealedBlock, + senders: Vec
, + ) -> Self { + Self { block, senders } + } + /// New sealed block with sender. Return none if len of tx and senders does not match pub fn new(block: SealedBlock, senders: Vec
) -> Option { (block.body.transactions().len() == senders.len()).then_some(Self { block, senders }) @@ -500,16 +518,26 @@ impl SealedBlockWithSenders { } impl SealedBlockWithSenders { + /// Returns all senders of the transactions in the block. + pub fn senders(&self) -> &[Address] { + &self.senders + } + + /// Returns an iterator over all senders in the block. + pub fn senders_iter(&self) -> impl Iterator { + self.senders.iter() + } + /// Split Structure to its components #[inline] - pub fn into_components(self) -> (SealedBlock, Vec
) { + pub fn split(self) -> (SealedBlock, Vec
) { (self.block, self.senders) } /// Returns the unsealed [`BlockWithSenders`] #[inline] pub fn unseal(self) -> BlockWithSenders { - let (block, senders) = self.into_components(); + let (block, senders) = self.split(); let (header, body) = block.split(); let header = header.unseal(); BlockWithSenders::new_unchecked(B::new(header, body), senders) @@ -555,6 +583,22 @@ impl SealedBlockWithSenders { } } +#[cfg(any(test, feature = "test-utils"))] +impl SealedBlockWithSenders +where + B: reth_primitives_traits::Block, +{ + /// Returns a mutable reference to the recovered senders. + pub fn senders_mut(&mut self) -> &mut Vec
{ + &mut self.senders + } + + /// Appends the sender to the list of senders. + pub fn push_sender(&mut self, sender: Address) { + self.senders.push(sender); + } +} + #[cfg(any(test, feature = "arbitrary"))] impl<'a, B> arbitrary::Arbitrary<'a> for SealedBlockWithSenders where diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index fd69422da68b6..9b52b94a4db86 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -426,6 +426,6 @@ pub trait LoadPendingBlock: results, ); - Ok((SealedBlockWithSenders { block: block.seal_slow(), senders }, receipts)) + Ok((SealedBlockWithSenders::new_unchecked(block.seal_slow(), senders), receipts)) } } diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index 5eac699e12a51..ed97c7f5b40bc 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -80,7 +80,7 @@ where // `from_block_with_transactions`, however we need to compute the length before let block_length = block.block.length(); let transactions = block.block.body().transactions().to_vec(); - let transactions_with_senders = transactions.into_iter().zip(block.senders); + let transactions_with_senders = transactions.into_iter().zip(block.senders_iter().copied()); let transactions = transactions_with_senders .enumerate() .map(|(idx, (tx, sender))| { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 1c40004f8bf39..d2faf0dd52e9d 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -115,7 +115,7 @@ where if self.disallow.contains(&message.proposer_fee_recipient) { return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient)) } - for (sender, tx) in block.senders.iter().zip(block.transactions()) { + for (sender, tx) in block.senders_iter().zip(block.transactions()) { if self.disallow.contains(sender) { return Err(ValidationApiError::Blacklist(*sender)) } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 1c19e8260b8d9..046d000c72115 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1211,10 +1211,10 @@ mod tests { assert_eq!( provider.pending_block_with_senders()?, - Some(reth_primitives::SealedBlockWithSenders { - block: block.clone(), - senders: block.senders().unwrap() - }) + Some(reth_primitives::SealedBlockWithSenders::new_unchecked( + block.clone(), + block.senders().unwrap() + )) ); assert_eq!(provider.pending_block_and_receipts()?, Some((block, vec![]))); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8f7dbbc2177b0..34713d108ba22 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -2800,7 +2800,7 @@ impl BlockWrite // Ensures we have all the senders for the block's transactions. for (transaction, sender) in - block.block.body().transactions().iter().zip(block.senders.iter()) + block.block.body().transactions().iter().zip(block.senders_iter()) { let hash = transaction.tx_hash(); diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 9924375ecb997..44773402450dc 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -240,7 +240,10 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { header.parent_hash = B256::ZERO; let block = SealedBlock::new(SealedHeader::seal(header), body); - (SealedBlockWithSenders { block, senders: vec![Address::new([0x30; 20])] }, execution_outcome) + ( + SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x30; 20])]), + execution_outcome, + ) } /// Block two that points to block 1 @@ -304,7 +307,10 @@ fn block2( header.parent_hash = parent_hash; let block = SealedBlock::new(SealedHeader::seal(header), body); - (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) + ( + SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x31; 20])]), + execution_outcome, + ) } /// Block three that points to block 2 @@ -368,7 +374,10 @@ fn block3( header.parent_hash = parent_hash; let block = SealedBlock::new(SealedHeader::seal(header), body); - (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) + ( + SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x31; 20])]), + execution_outcome, + ) } /// Block four that points to block 3 @@ -457,7 +466,10 @@ fn block4( header.parent_hash = parent_hash; let block = SealedBlock::new(SealedHeader::seal(header), body); - (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) + ( + SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x31; 20])]), + execution_outcome, + ) } /// Block five that points to block 4 @@ -543,5 +555,8 @@ fn block5( header.parent_hash = parent_hash; let block = SealedBlock::new(SealedHeader::seal(header), body); - (SealedBlockWithSenders { block, senders: vec![Address::new([0x31; 20])] }, execution_outcome) + ( + SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x31; 20])]), + execution_outcome, + ) } diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index 88c8faa7872a7..de51b87c825fc 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -127,8 +127,8 @@ mod tests { let tx3_hash = B256::random(); // Non-EIP-4844 transaction // Creating a first block with EIP-4844 transactions - let block1 = SealedBlockWithSenders { - block: SealedBlock::new( + let block1 = SealedBlockWithSenders::new_unchecked( + SealedBlock::new( SealedHeader::new(Header { number: 10, ..Default::default() }, B256::random()), BlockBody { transactions: vec![ @@ -152,13 +152,13 @@ mod tests { ..Default::default() }, ), - ..Default::default() - }; + Default::default(), + ); // Creating a second block with EIP-1559 and EIP-2930 transactions // Note: This block does not contain any EIP-4844 transactions - let block2 = SealedBlockWithSenders { - block: SealedBlock::new( + let block2 = SealedBlockWithSenders::new_unchecked( + SealedBlock::new( SealedHeader::new(Header { number: 11, ..Default::default() }, B256::random()), BlockBody { transactions: vec![ @@ -176,8 +176,8 @@ mod tests { ..Default::default() }, ), - ..Default::default() - }; + Default::default(), + ); // Extract blocks from the chain let chain: Chain = Chain::new(vec![block1, block2], Default::default(), None); From 4a8c88f4d0e319d2ec61cffbbfb014ab0886a5a4 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 9 Jan 2025 15:25:00 +0100 Subject: [PATCH 044/113] chore(engine): use Arc for StateRootTask (#13755) --- crates/engine/tree/benches/state_root_task.rs | 16 +++++++----- crates/engine/tree/src/tree/mod.rs | 16 +++++++----- crates/engine/tree/src/tree/root.rs | 26 +++++++++++-------- crates/trie/parallel/src/proof.rs | 12 ++++----- 4 files changed, 39 insertions(+), 31 deletions(-) diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 9958cf0cacb8d..8c5b871385abb 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -23,7 +23,7 @@ use revm_primitives::{ Account as RevmAccount, AccountInfo, AccountStatus, Address, EvmState, EvmStorageSlot, HashMap, B256, KECCAK_EMPTY, U256, }; -use std::hint::black_box; +use std::{hint::black_box, sync::Arc}; #[derive(Debug, Clone)] struct BenchParams { @@ -217,11 +217,13 @@ fn bench_state_root(c: &mut Criterion) { let num_threads = std::thread::available_parallelism() .map_or(1, |num| (num.get() / 2).max(1)); - let state_root_task_pool = rayon::ThreadPoolBuilder::new() - .num_threads(num_threads) - .thread_name(|i| format!("proof-worker-{}", i)) - .build() - .expect("Failed to create proof worker thread pool"); + let state_root_task_pool = Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_threads) + .thread_name(|i| format!("proof-worker-{}", i)) + .build() + .expect("Failed to create proof worker thread pool"), + ); ( config, @@ -258,7 +260,7 @@ fn bench_state_root(c: &mut Criterion) { let task = StateRootTask::new( config, blinded_provider_factory, - &state_root_task_pool, + state_root_task_pool, ); let mut hook = task.state_hook(); let handle = task.spawn(scope); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 1103c569f34c7..429dda7283cb3 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -539,7 +539,7 @@ where /// The engine API variant of this handler engine_kind: EngineApiKind, /// state root task thread pool - state_root_task_pool: rayon::ThreadPool, + state_root_task_pool: Arc, } impl std::fmt::Debug @@ -606,11 +606,13 @@ where let num_threads = std::thread::available_parallelism().map_or(1, |num| (num.get() / 2).max(1)); - let state_root_task_pool = rayon::ThreadPoolBuilder::new() - .num_threads(num_threads) - .thread_name(|i| format!("srt-worker-{}", i)) - .build() - .expect("Failed to create proof worker thread pool"); + let state_root_task_pool = Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(num_threads) + .thread_name(|i| format!("srt-worker-{}", i)) + .build() + .expect("Failed to create proof worker thread pool"), + ); Self { provider, @@ -2313,7 +2315,7 @@ where let state_root_task = StateRootTask::new( state_root_config, blinded_provider_factory, - &self.state_root_task_pool, + self.state_root_task_pool.clone(), ); let state_hook = state_root_task.state_hook(); (Some(state_root_task.spawn(scope)), Box::new(state_hook) as Box) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index b41de299a1250..6bb213bce4029 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -260,7 +260,7 @@ fn evm_state_to_hashed_post_state(update: EvmState) -> HashedPostState { /// to the tree. /// Then it updates relevant leaves according to the result of the transaction. #[derive(Debug)] -pub struct StateRootTask<'env, Factory, BPF: BlindedProviderFactory> { +pub struct StateRootTask { /// Task configuration. config: StateRootConfig, /// Receiver for state root related messages. @@ -275,10 +275,10 @@ pub struct StateRootTask<'env, Factory, BPF: BlindedProviderFactory> { /// progress. sparse_trie: Option>>, /// Reference to the shared thread pool for parallel proof generation - thread_pool: &'env rayon::ThreadPool, + thread_pool: Arc, } -impl<'env, Factory, BPF> StateRootTask<'env, Factory, BPF> +impl<'env, Factory, BPF> StateRootTask where Factory: DatabaseProviderFactory + StateCommitmentProvider @@ -294,7 +294,7 @@ where pub fn new( config: StateRootConfig, blinded_provider: BPF, - thread_pool: &'env rayon::ThreadPool, + thread_pool: Arc, ) -> Self { let (tx, rx) = channel(); @@ -344,7 +344,7 @@ where fetched_proof_targets: &mut MultiProofTargets, proof_sequence_number: u64, state_root_message_sender: Sender>, - thread_pool: &'env rayon::ThreadPool, + thread_pool: Arc, ) { let proof_targets = targets.into_iter().map(|address| (keccak256(address), Default::default())).collect(); @@ -371,7 +371,7 @@ where fetched_proof_targets: &mut MultiProofTargets, proof_sequence_number: u64, state_root_message_sender: Sender>, - thread_pool: &'env rayon::ThreadPool, + thread_pool: Arc, ) { let hashed_state_update = evm_state_to_hashed_post_state(update); @@ -396,7 +396,7 @@ where proof_targets: MultiProofTargets, proof_sequence_number: u64, state_root_message_sender: Sender>, - thread_pool: &'env rayon::ThreadPool, + thread_pool: Arc, ) { // Dispatch proof gathering for this state update scope.spawn(move |_| { @@ -533,7 +533,7 @@ where &mut self.fetched_proof_targets, self.proof_sequencer.next_sequence(), self.tx.clone(), - self.thread_pool, + self.thread_pool.clone(), ); } StateRootMessage::StateUpdate(update) => { @@ -557,7 +557,7 @@ where &mut self.fetched_proof_targets, self.proof_sequencer.next_sequence(), self.tx.clone(), - self.thread_pool, + self.thread_pool.clone(), ); } StateRootMessage::FinishedStateUpdates => { @@ -735,7 +735,7 @@ fn get_proof_targets( /// Calculate multiproof for the targets. #[inline] fn calculate_multiproof( - thread_pool: &rayon::ThreadPool, + thread_pool: Arc, config: StateRootConfig, proof_targets: MultiProofTargets, ) -> ProviderResult @@ -993,7 +993,11 @@ mod tests { .expect("Failed to create proof worker thread pool"); let (root_from_task, _) = std::thread::scope(|std_scope| { - let task = StateRootTask::new(config, blinded_provider_factory, &state_root_task_pool); + let task = StateRootTask::new( + config, + blinded_provider_factory, + Arc::new(state_root_task_pool), + ); let mut state_hook = task.state_hook(); let handle = task.spawn(std_scope); diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index ce0c185e1aa36..31df5f2328794 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -32,7 +32,7 @@ use crate::metrics::ParallelStateRootMetrics; /// TODO: #[derive(Debug)] -pub struct ParallelProof<'env, Factory> { +pub struct ParallelProof { /// Consistent view of the database. view: ConsistentDbView, /// The sorted collection of cached in-memory intermediate trie nodes that @@ -47,20 +47,20 @@ pub struct ParallelProof<'env, Factory> { /// Flag indicating whether to include branch node hash masks in the proof. collect_branch_node_hash_masks: bool, /// Thread pool for local tasks - thread_pool: &'env rayon::ThreadPool, + thread_pool: Arc, /// Parallel state root metrics. #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics, } -impl<'env, Factory> ParallelProof<'env, Factory> { +impl ParallelProof { /// Create new state proof generator. pub fn new( view: ConsistentDbView, nodes_sorted: Arc, state_sorted: Arc, prefix_sets: Arc, - thread_pool: &'env rayon::ThreadPool, + thread_pool: Arc, ) -> Self { Self { view, @@ -81,7 +81,7 @@ impl<'env, Factory> ParallelProof<'env, Factory> { } } -impl ParallelProof<'_, Factory> +impl ParallelProof where Factory: DatabaseProviderFactory + StateCommitmentProvider @@ -407,7 +407,7 @@ mod tests { Default::default(), Default::default(), Default::default(), - &state_root_task_pool + Arc::new(state_root_task_pool) ) .multiproof(targets.clone()) .unwrap(), From 1f78b9e7e48e98ddda7ffdfa62ebdb66b67db2d0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 9 Jan 2025 16:34:57 +0100 Subject: [PATCH 045/113] chore(trie): reveal witness by ref (#13751) --- crates/trie/sparse/src/state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 13718f87ac022..b95cb62c7e652 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -285,7 +285,7 @@ impl SparseStateTrie { pub fn reveal_witness( &mut self, state_root: B256, - witness: B256HashMap, + witness: &B256HashMap, ) -> SparseStateTrieResult<()> { // Create a `(hash, path, maybe_account)` queue for traversing witness trie nodes // starting from the root node. From 986c75434a92ca34fdd37d0fa1d9cf53d7e74477 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 9 Jan 2025 15:27:50 -0500 Subject: [PATCH 046/113] chore(tree): use MultiProofTargets for PrefetchProofs (#13717) --- crates/engine/tree/src/tree/root.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 6bb213bce4029..790d24e948d74 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -1,6 +1,6 @@ //! State root task related functionality. -use alloy_primitives::{map::HashSet, Address}; +use alloy_primitives::map::HashSet; use derive_more::derive::Deref; use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_errors::{ProviderError, ProviderResult}; @@ -101,7 +101,7 @@ impl StateRootConfig { #[derive(Debug)] pub enum StateRootMessage { /// Prefetch proof targets - PrefetchProofs(HashSet
), + PrefetchProofs(MultiProofTargets), /// New state update from transaction execution StateUpdate(EvmState), /// Proof calculation completed for a specific state update @@ -340,21 +340,19 @@ where fn on_prefetch_proof( scope: &rayon::Scope<'env>, config: StateRootConfig, - targets: HashSet
, + targets: MultiProofTargets, fetched_proof_targets: &mut MultiProofTargets, proof_sequence_number: u64, state_root_message_sender: Sender>, thread_pool: Arc, ) { - let proof_targets = - targets.into_iter().map(|address| (keccak256(address), Default::default())).collect(); - extend_multi_proof_targets_ref(fetched_proof_targets, &proof_targets); + extend_multi_proof_targets_ref(fetched_proof_targets, &targets); Self::spawn_multiproof( scope, config, Default::default(), - proof_targets, + targets, proof_sequence_number, state_root_message_sender, thread_pool, From 69f9e1628a33dfeab1b66b4d28015269207e4907 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 10 Jan 2025 11:28:54 +0000 Subject: [PATCH 047/113] fix(trie): sparse trie tree masks (#13760) --- crates/engine/tree/src/tree/root.rs | 2 +- crates/engine/tree/src/tree/trie_updates.rs | 2 +- crates/trie/common/src/proofs.rs | 9 + crates/trie/parallel/src/proof.rs | 41 ++-- crates/trie/sparse/src/state.rs | 43 ++-- crates/trie/sparse/src/trie.rs | 221 ++++++++++++-------- crates/trie/trie/src/proof/mod.rs | 84 ++++---- 7 files changed, 245 insertions(+), 157 deletions(-) diff --git a/crates/engine/tree/src/tree/root.rs b/crates/engine/tree/src/tree/root.rs index 790d24e948d74..93cac7b435ed8 100644 --- a/crates/engine/tree/src/tree/root.rs +++ b/crates/engine/tree/src/tree/root.rs @@ -748,7 +748,7 @@ where config.prefix_sets, thread_pool, ) - .with_branch_node_hash_masks(true) + .with_branch_node_masks(true) .multiproof(proof_targets)?) } diff --git a/crates/engine/tree/src/tree/trie_updates.rs b/crates/engine/tree/src/tree/trie_updates.rs index ea78aca13b87a..576f0c7426476 100644 --- a/crates/engine/tree/src/tree/trie_updates.rs +++ b/crates/engine/tree/src/tree/trie_updates.rs @@ -198,7 +198,7 @@ fn branch_nodes_equal( ) -> bool { if let (Some(task), Some(regular)) = (task.as_ref(), regular.as_ref()) { task.state_mask == regular.state_mask && - // We do not compare the tree mask because it is known to be mismatching + task.tree_mask == regular.tree_mask && task.hash_mask == regular.hash_mask && task.hashes == regular.hashes && task.root_hash == regular.root_hash diff --git a/crates/trie/common/src/proofs.rs b/crates/trie/common/src/proofs.rs index 54171710761e1..2e64ef39728c4 100644 --- a/crates/trie/common/src/proofs.rs +++ b/crates/trie/common/src/proofs.rs @@ -29,6 +29,8 @@ pub struct MultiProof { pub account_subtree: ProofNodes, /// The hash masks of the branch nodes in the account proof. pub branch_node_hash_masks: HashMap, + /// The tree masks of the branch nodes in the account proof. + pub branch_node_tree_masks: HashMap, /// Storage trie multiproofs. pub storages: B256HashMap, } @@ -115,6 +117,7 @@ impl MultiProof { self.account_subtree.extend_from(other.account_subtree); self.branch_node_hash_masks.extend(other.branch_node_hash_masks); + self.branch_node_tree_masks.extend(other.branch_node_tree_masks); for (hashed_address, storage) in other.storages { match self.storages.entry(hashed_address) { @@ -123,6 +126,7 @@ impl MultiProof { let entry = entry.get_mut(); entry.subtree.extend_from(storage.subtree); entry.branch_node_hash_masks.extend(storage.branch_node_hash_masks); + entry.branch_node_tree_masks.extend(storage.branch_node_tree_masks); } hash_map::Entry::Vacant(entry) => { entry.insert(storage); @@ -141,6 +145,8 @@ pub struct StorageMultiProof { pub subtree: ProofNodes, /// The hash masks of the branch nodes in the storage proof. pub branch_node_hash_masks: HashMap, + /// The tree masks of the branch nodes in the storage proof. + pub branch_node_tree_masks: HashMap, } impl StorageMultiProof { @@ -153,6 +159,7 @@ impl StorageMultiProof { Bytes::from([EMPTY_STRING_CODE]), )]), branch_node_hash_masks: HashMap::default(), + branch_node_tree_masks: HashMap::default(), } } @@ -398,6 +405,7 @@ mod tests { root, subtree: subtree1, branch_node_hash_masks: HashMap::default(), + branch_node_tree_masks: HashMap::default(), }, ); @@ -412,6 +420,7 @@ mod tests { root, subtree: subtree2, branch_node_hash_masks: HashMap::default(), + branch_node_tree_masks: HashMap::default(), }, ); diff --git a/crates/trie/parallel/src/proof.rs b/crates/trie/parallel/src/proof.rs index 31df5f2328794..f7716ee131612 100644 --- a/crates/trie/parallel/src/proof.rs +++ b/crates/trie/parallel/src/proof.rs @@ -44,8 +44,8 @@ pub struct ParallelProof { /// invalidate the in-memory nodes, not all keys from `state_sorted` might be present here, /// if we have cached nodes for them. pub prefix_sets: Arc, - /// Flag indicating whether to include branch node hash masks in the proof. - collect_branch_node_hash_masks: bool, + /// Flag indicating whether to include branch node masks in the proof. + collect_branch_node_masks: bool, /// Thread pool for local tasks thread_pool: Arc, /// Parallel state root metrics. @@ -67,16 +67,16 @@ impl ParallelProof { nodes_sorted, state_sorted, prefix_sets, - collect_branch_node_hash_masks: false, + collect_branch_node_masks: false, thread_pool, #[cfg(feature = "metrics")] metrics: ParallelStateRootMetrics::default(), } } - /// Set the flag indicating whether to include branch node hash masks in the proof. - pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { - self.collect_branch_node_hash_masks = branch_node_hash_masks; + /// Set the flag indicating whether to include branch node masks in the proof. + pub const fn with_branch_node_masks(mut self, branch_node_masks: bool) -> Self { + self.collect_branch_node_masks = branch_node_masks; self } } @@ -137,7 +137,7 @@ where let target_slots = targets.get(&hashed_address).cloned().unwrap_or_default(); let trie_nodes_sorted = self.nodes_sorted.clone(); let hashed_state_sorted = self.state_sorted.clone(); - let collect_masks = self.collect_branch_node_hash_masks; + let collect_masks = self.collect_branch_node_masks; let (tx, rx) = std::sync::mpsc::sync_channel(1); @@ -182,7 +182,7 @@ where hashed_address, ) .with_prefix_set_mut(PrefixSetMut::from(prefix_set.iter().cloned())) - .with_branch_node_hash_masks(collect_masks) + .with_branch_node_masks(collect_masks) .storage_multiproof(target_slots) .map_err(|e| ParallelStateRootError::Other(e.to_string())); @@ -233,7 +233,7 @@ where let retainer: ProofRetainer = targets.keys().map(Nibbles::unpack).collect(); let mut hash_builder = HashBuilder::default() .with_proof_retainer(retainer) - .with_updates(self.collect_branch_node_hash_masks); + .with_updates(self.collect_branch_node_masks); // Initialize all storage multiproofs as empty. // Storage multiproofs for non empty tries will be overwritten if necessary. @@ -301,18 +301,23 @@ where self.metrics.record_state_trie(tracker.finish()); let account_subtree = hash_builder.take_proof_nodes(); - let branch_node_hash_masks = if self.collect_branch_node_hash_masks { - hash_builder - .updated_branch_nodes - .unwrap_or_default() - .into_iter() - .map(|(path, node)| (path, node.hash_mask)) - .collect() + let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes + .iter() + .map(|(path, node)| (path.clone(), node.hash_mask)) + .collect(), + updated_branch_nodes + .into_iter() + .map(|(path, node)| (path, node.tree_mask)) + .collect(), + ) } else { - HashMap::default() + (HashMap::default(), HashMap::default()) }; - Ok(MultiProof { account_subtree, branch_node_hash_masks, storages }) + Ok(MultiProof { account_subtree, branch_node_hash_masks, branch_node_tree_masks, storages }) } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index b95cb62c7e652..505a326c0bf45 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -155,13 +155,14 @@ impl SparseStateTrie { self.provider_factory.account_node_provider(), root_node, None, + None, self.retain_updates, )?; // Reveal the remaining proof nodes. for (path, bytes) in proof { let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node, None)?; + trie.reveal_node(path, node, None, None)?; } // Mark leaf path as revealed. @@ -196,13 +197,14 @@ impl SparseStateTrie { self.provider_factory.storage_node_provider(account), root_node, None, + None, self.retain_updates, )?; // Reveal the remaining proof nodes. for (path, bytes) in proof { let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node, None)?; + trie.reveal_node(path, node, None, None)?; } // Mark leaf path as revealed. @@ -227,20 +229,24 @@ impl SparseStateTrie { self.provider_factory.account_node_provider(), root_node, multiproof.branch_node_hash_masks.get(&Nibbles::default()).copied(), + multiproof.branch_node_tree_masks.get(&Nibbles::default()).copied(), self.retain_updates, )?; // Reveal the remaining proof nodes. for (path, bytes) in account_nodes { let node = TrieNode::decode(&mut &bytes[..])?; - let hash_mask = if let TrieNode::Branch(_) = node { - multiproof.branch_node_hash_masks.get(&path).copied() + let (hash_mask, tree_mask) = if let TrieNode::Branch(_) = node { + ( + multiproof.branch_node_hash_masks.get(&path).copied(), + multiproof.branch_node_tree_masks.get(&path).copied(), + ) } else { - None + (None, None) }; - trace!(target: "trie::sparse", ?path, ?node, ?hash_mask, "Revealing account node"); - trie.reveal_node(path, node, hash_mask)?; + trace!(target: "trie::sparse", ?path, ?node, ?hash_mask, ?tree_mask, "Revealing account node"); + trie.reveal_node(path, node, hash_mask, tree_mask)?; } } @@ -254,20 +260,24 @@ impl SparseStateTrie { self.provider_factory.storage_node_provider(account), root_node, storage_subtree.branch_node_hash_masks.get(&Nibbles::default()).copied(), + storage_subtree.branch_node_tree_masks.get(&Nibbles::default()).copied(), self.retain_updates, )?; // Reveal the remaining proof nodes. for (path, bytes) in nodes { let node = TrieNode::decode(&mut &bytes[..])?; - let hash_mask = if let TrieNode::Branch(_) = node { - storage_subtree.branch_node_hash_masks.get(&path).copied() + let (hash_mask, tree_mask) = if let TrieNode::Branch(_) = node { + ( + storage_subtree.branch_node_hash_masks.get(&path).copied(), + storage_subtree.branch_node_tree_masks.get(&path).copied(), + ) } else { - None + (None, None) }; - trace!(target: "trie::sparse", ?account, ?path, ?node, ?hash_mask, "Revealing storage node"); - trie.reveal_node(path, node, hash_mask)?; + trace!(target: "trie::sparse", ?account, ?path, ?node, ?hash_mask, ?tree_mask, "Revealing storage node"); + trie.reveal_node(path, node, hash_mask, tree_mask)?; } } } @@ -348,6 +358,7 @@ impl SparseStateTrie { self.provider_factory.storage_node_provider(account), trie_node, None, + None, self.retain_updates, )?; } else { @@ -355,7 +366,7 @@ impl SparseStateTrie { storage_trie_entry .as_revealed_mut() .ok_or(SparseTrieErrorKind::Blind)? - .reveal_node(path, trie_node, None)?; + .reveal_node(path, trie_node, None, None)?; } } else if path.is_empty() { // Handle special state root node case. @@ -363,6 +374,7 @@ impl SparseStateTrie { self.provider_factory.account_node_provider(), trie_node, None, + None, self.retain_updates, )?; } else { @@ -370,7 +382,7 @@ impl SparseStateTrie { self.state .as_revealed_mut() .ok_or(SparseTrieErrorKind::Blind)? - .reveal_node(path, trie_node, None)?; + .reveal_node(path, trie_node, None, None)?; } } @@ -668,6 +680,7 @@ mod tests { Nibbles::from_nibbles([0x1]), TrieMask::new(0b00), )]), + branch_node_tree_masks: HashMap::default(), storages: HashMap::from_iter([ ( address_1, @@ -675,6 +688,7 @@ mod tests { root, subtree: storage_proof_nodes.clone(), branch_node_hash_masks: storage_branch_node_hash_masks.clone(), + branch_node_tree_masks: HashMap::default(), }, ), ( @@ -683,6 +697,7 @@ mod tests { root, subtree: storage_proof_nodes, branch_node_hash_masks: storage_branch_node_hash_masks, + branch_node_tree_masks: HashMap::default(), }, ), ]), diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 7ff0e40e1a21c..b7cba834567f9 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -60,9 +60,16 @@ impl SparseTrie { &mut self, root: TrieNode, hash_mask: Option, + tree_mask: Option, retain_updates: bool, ) -> SparseTrieResult<&mut RevealedSparseTrie> { - self.reveal_root_with_provider(Default::default(), root, hash_mask, retain_updates) + self.reveal_root_with_provider( + Default::default(), + root, + hash_mask, + tree_mask, + retain_updates, + ) } } @@ -100,6 +107,7 @@ impl

SparseTrie

{ provider: P, root: TrieNode, hash_mask: Option, + tree_mask: Option, retain_updates: bool, ) -> SparseTrieResult<&mut RevealedSparseTrie

> { if self.is_blind() { @@ -107,6 +115,7 @@ impl

SparseTrie

{ provider, root, hash_mask, + tree_mask, retain_updates, )?)) } @@ -163,6 +172,8 @@ pub struct RevealedSparseTrie

{ nodes: HashMap, /// All branch node hash masks. branch_node_hash_masks: HashMap, + /// All branch node tree masks. + branch_node_tree_masks: HashMap, /// All leaf values. values: HashMap>, /// Prefix set. @@ -178,6 +189,7 @@ impl

fmt::Debug for RevealedSparseTrie

{ f.debug_struct("RevealedSparseTrie") .field("nodes", &self.nodes) .field("branch_hash_masks", &self.branch_node_hash_masks) + .field("branch_tree_masks", &self.branch_node_tree_masks) .field("values", &self.values) .field("prefix_set", &self.prefix_set) .field("updates", &self.updates) @@ -192,6 +204,7 @@ impl Default for RevealedSparseTrie { provider: Default::default(), nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), branch_node_hash_masks: HashMap::default(), + branch_node_tree_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), updates: None, @@ -205,19 +218,21 @@ impl RevealedSparseTrie { pub fn from_root( node: TrieNode, hash_mask: Option, + tree_mask: Option, retain_updates: bool, ) -> SparseTrieResult { let mut this = Self { provider: Default::default(), nodes: HashMap::default(), branch_node_hash_masks: HashMap::default(), + branch_node_tree_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), updates: None, } .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), node, hash_mask)?; + this.reveal_node(Nibbles::default(), node, hash_mask, tree_mask)?; Ok(this) } } @@ -228,19 +243,21 @@ impl

RevealedSparseTrie

{ provider: P, node: TrieNode, hash_mask: Option, + tree_mask: Option, retain_updates: bool, ) -> SparseTrieResult { let mut this = Self { provider, nodes: HashMap::default(), branch_node_hash_masks: HashMap::default(), + branch_node_tree_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), updates: None, } .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), node, hash_mask)?; + this.reveal_node(Nibbles::default(), node, hash_mask, tree_mask)?; Ok(this) } @@ -250,6 +267,7 @@ impl

RevealedSparseTrie

{ provider, nodes: self.nodes, branch_node_hash_masks: self.branch_node_hash_masks, + branch_node_tree_masks: self.branch_node_tree_masks, values: self.values, prefix_set: self.prefix_set, updates: self.updates, @@ -286,6 +304,7 @@ impl

RevealedSparseTrie

{ path: Nibbles, node: TrieNode, hash_mask: Option, + tree_mask: Option, ) -> SparseTrieResult<()> { // If the node is already revealed and it's not a hash node, do nothing. if self.nodes.get(&path).is_some_and(|node| !node.is_hash()) { @@ -295,6 +314,9 @@ impl

RevealedSparseTrie

{ if let Some(hash_mask) = hash_mask { self.branch_node_hash_masks.insert(path.clone(), hash_mask); } + if let Some(tree_mask) = tree_mask { + self.branch_node_tree_masks.insert(path.clone(), tree_mask); + } match node { TrieNode::EmptyRoot => { @@ -321,7 +343,10 @@ impl

RevealedSparseTrie

{ // Memoize the hash of a previously blinded node in a new branch // node. hash: Some(*hash), - store_in_db_trie: None, + store_in_db_trie: Some( + hash_mask.is_some_and(|mask| !mask.is_empty()) || + tree_mask.is_some_and(|mask| !mask.is_empty()), + ), }); } // Branch node already exists, or an extension node was placed where a @@ -433,7 +458,7 @@ impl

RevealedSparseTrie

{ return Ok(()) } - self.reveal_node(path, TrieNode::decode(&mut &child[..])?, None) + self.reveal_node(path, TrieNode::decode(&mut &child[..])?, None, None) } /// Traverse trie nodes down to the leaf node and collect all nodes along the path. @@ -627,22 +652,20 @@ impl

RevealedSparseTrie

{ let mut prefix_set_contains = |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); - let (rlp_node, calculated, node_type) = match self.nodes.get_mut(&path).unwrap() { - SparseNode::Empty => { - (RlpNode::word_rlp(&EMPTY_ROOT_HASH), false, SparseNodeType::Empty) - } - SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), false, SparseNodeType::Hash), + let (rlp_node, node_type) = match self.nodes.get_mut(&path).unwrap() { + SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), + SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), SparseNodeType::Hash), SparseNode::Leaf { key, hash } => { let mut path = path.clone(); path.extend_from_slice_unchecked(key); if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - (RlpNode::word_rlp(&hash), false, SparseNodeType::Leaf) + (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) } else { let value = self.values.get(&path).unwrap(); self.rlp_buf.clear(); let rlp_node = LeafNodeRef { key, value }.rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); - (rlp_node, true, SparseNodeType::Leaf) + (rlp_node, SparseNodeType::Leaf) } } SparseNode::Extension { key, hash } => { @@ -651,22 +674,20 @@ impl

RevealedSparseTrie

{ if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { ( RlpNode::word_rlp(&hash), - false, SparseNodeType::Extension { store_in_db_trie: true }, ) } else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) { - let (_, child, _, node_type) = buffers.rlp_node_stack.pop().unwrap(); + let (_, child, child_node_type) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); ( rlp_node, - true, SparseNodeType::Extension { // Inherit the `store_in_db_trie` flag from the child node, which is // always the branch node - store_in_db_trie: node_type.store_in_db_trie(), + store_in_db_trie: child_node_type.store_in_db_trie(), }, ) } else { @@ -682,7 +703,6 @@ impl

RevealedSparseTrie

{ buffers.rlp_node_stack.push(( path, RlpNode::word_rlp(&hash), - false, SparseNodeType::Branch { store_in_db_trie }, )); continue @@ -710,8 +730,7 @@ impl

RevealedSparseTrie

{ let mut hashes = Vec::new(); for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { if buffers.rlp_node_stack.last().is_some_and(|e| &e.0 == child_path) { - let (_, child, calculated, node_type) = - buffers.rlp_node_stack.pop().unwrap(); + let (_, child, child_node_type) = buffers.rlp_node_stack.pop().unwrap(); // Update the masks only if we need to retain trie updates if retain_updates { @@ -720,13 +739,16 @@ impl

RevealedSparseTrie

{ // Determine whether we need to set trie mask bit. let should_set_tree_mask_bit = + // A blinded node has the tree mask bit set + ( + child_node_type.is_hash() && + self.branch_node_tree_masks + .get(&path) + .is_some_and(|mask| mask.is_bit_set(last_child_nibble)) + ) || // A branch or an extension node explicitly set the // `store_in_db_trie` flag - node_type.store_in_db_trie() || - // Set the flag according to whether a child node was - // pre-calculated (`calculated = false`), meaning that it wasn't - // in the database - !calculated; + child_node_type.store_in_db_trie(); if should_set_tree_mask_bit { tree_mask.set_bit(last_child_nibble); } @@ -735,8 +757,8 @@ impl

RevealedSparseTrie

{ // is a blinded node that has its hash mask bit set according to the // database, set the hash mask bit and save the hash. let hash = child.as_hash().filter(|_| { - node_type.is_branch() || - (node_type.is_hash() && + child_node_type.is_branch() || + (child_node_type.is_hash() && self.branch_node_hash_masks .get(&path) .is_some_and(|mask| { @@ -806,14 +828,10 @@ impl

RevealedSparseTrie

{ }; *store_in_db_trie = Some(store_in_db_trie_value); - ( - rlp_node, - true, - SparseNodeType::Branch { store_in_db_trie: store_in_db_trie_value }, - ) + (rlp_node, SparseNodeType::Branch { store_in_db_trie: store_in_db_trie_value }) } }; - buffers.rlp_node_stack.push((path, rlp_node, calculated, node_type)); + buffers.rlp_node_stack.push((path, rlp_node, node_type)); } debug_assert_eq!(buffers.rlp_node_stack.len(), 1); @@ -894,7 +912,7 @@ impl RevealedSparseTrie

{ // remove or do nothing, so // we can safely ignore the hash mask here and // pass `None`. - self.reveal_node(current.clone(), decoded, None)?; + self.reveal_node(current.clone(), decoded, None, None)?; } } } @@ -1046,7 +1064,7 @@ impl RevealedSparseTrie

{ // We'll never have to update the revealed branch node, only remove // or do nothing, so we can safely ignore the hash mask here and // pass `None`. - self.reveal_node(child_path.clone(), decoded, None)?; + self.reveal_node(child_path.clone(), decoded, None, None)?; } } @@ -1251,7 +1269,7 @@ struct RlpNodeBuffers { /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. path_stack: Vec<(Nibbles, Option)>, /// Stack of rlp nodes - rlp_node_stack: Vec<(Nibbles, RlpNode, bool, SparseNodeType)>, + rlp_node_stack: Vec<(Nibbles, RlpNode, SparseNodeType)>, /// Reusable branch child path branch_child_buf: SmallVec<[Nibbles; 16]>, /// Reusable branch value stack @@ -1336,7 +1354,8 @@ mod tests { state: impl IntoIterator + Clone, destroyed_accounts: B256HashSet, proof_targets: impl IntoIterator, - ) -> (B256, TrieUpdates, ProofNodes, HashMap) { + ) -> (B256, TrieUpdates, ProofNodes, HashMap, HashMap) + { let mut account_rlp = Vec::new(); let mut hash_builder = HashBuilder::default() @@ -1383,12 +1402,19 @@ mod tests { .iter() .map(|(path, node)| (path.clone(), node.hash_mask)) .collect(); + let branch_node_tree_masks = hash_builder + .updated_branch_nodes + .clone() + .unwrap_or_default() + .iter() + .map(|(path, node)| (path.clone(), node.tree_mask)) + .collect(); let mut trie_updates = TrieUpdates::default(); let removed_keys = node_iter.walker.take_removed_keys(); trie_updates.finalize(hash_builder, removed_keys, destroyed_accounts); - (root, trie_updates, proof_nodes, branch_node_hash_masks) + (root, trie_updates, proof_nodes, branch_node_hash_masks, branch_node_tree_masks) } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. @@ -1450,7 +1476,7 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder([(key.clone(), value())], Default::default(), [key.clone()]); let mut sparse = RevealedSparseTrie::default().with_updates(true); @@ -1475,7 +1501,7 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(value)), Default::default(), @@ -1504,7 +1530,7 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(value)), Default::default(), @@ -1541,7 +1567,7 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( paths.iter().sorted_unstable().cloned().zip(std::iter::repeat_with(value)), Default::default(), @@ -1579,7 +1605,7 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(|| old_value)), Default::default(), @@ -1597,7 +1623,7 @@ mod tests { assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); assert_eq_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( paths.iter().cloned().zip(std::iter::repeat_with(|| new_value)), Default::default(), @@ -1871,7 +1897,7 @@ mod tests { )); let mut sparse = - RevealedSparseTrie::from_root(branch.clone(), Some(TrieMask::new(0b01)), false) + RevealedSparseTrie::from_root(branch.clone(), Some(TrieMask::new(0b01)), None, false) .unwrap(); // Reveal a branch node and one of its children @@ -1879,8 +1905,8 @@ mod tests { // Branch (Mask = 11) // ├── 0 -> Hash (Path = 0) // └── 1 -> Leaf (Path = 1) - sparse.reveal_node(Nibbles::default(), branch, Some(TrieMask::new(0b01))).unwrap(); - sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), None).unwrap(); + sparse.reveal_node(Nibbles::default(), branch, Some(TrieMask::new(0b01)), None).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), None, None).unwrap(); // Removing a blinded leaf should result in an error assert_matches!( @@ -1904,7 +1930,7 @@ mod tests { )); let mut sparse = - RevealedSparseTrie::from_root(branch.clone(), Some(TrieMask::new(0b01)), false) + RevealedSparseTrie::from_root(branch.clone(), Some(TrieMask::new(0b01)), None, false) .unwrap(); // Reveal a branch node and one of its children @@ -1912,8 +1938,8 @@ mod tests { // Branch (Mask = 11) // ├── 0 -> Hash (Path = 0) // └── 1 -> Leaf (Path = 1) - sparse.reveal_node(Nibbles::default(), branch, Some(TrieMask::new(0b01))).unwrap(); - sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), None).unwrap(); + sparse.reveal_node(Nibbles::default(), branch, Some(TrieMask::new(0b01)), None).unwrap(); + sparse.reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), None, None).unwrap(); // Removing a non-existent leaf should be a noop let sparse_old = sparse.clone(); @@ -1951,7 +1977,7 @@ mod tests { // Insert state updates into the hash builder and calculate the root state.extend(update); - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( state.clone(), Default::default(), @@ -1982,7 +2008,7 @@ mod tests { let sparse_root = updated_sparse.root(); let sparse_updates = updated_sparse.take_updates(); - let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( state.clone(), Default::default(), @@ -2063,24 +2089,29 @@ mod tests { }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( - [(key1(), value()), (key3(), value())], - Default::default(), - [Nibbles::default()], - ); + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key3(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), branch_node_hash_masks.get(&Nibbles::default()).copied(), + branch_node_tree_masks.get(&Nibbles::default()).copied(), false, ) .unwrap(); // Generate the proof for the first key and reveal it in the sparse trie - let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key1()]); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { let hash_mask = branch_node_hash_masks.get(&path).copied(); - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .unwrap(); } // Check that the branch node exists with only two nibbles set @@ -2099,11 +2130,14 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = run_hash_builder([(key1(), value()), (key3(), value())], Default::default(), [key3()]); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { let hash_mask = branch_node_hash_masks.get(&path).copied(); - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .unwrap(); } // Check that nothing changed in the branch node @@ -2114,7 +2148,7 @@ mod tests { // Generate the nodes for the full trie with all three key using the hash builder, and // compare them to the sparse trie - let (_, _, hash_builder_proof_nodes, _) = run_hash_builder( + let (_, _, hash_builder_proof_nodes, _, _) = run_hash_builder( [(key1(), value()), (key2(), value()), (key3(), value())], Default::default(), [key1(), key2(), key3()], @@ -2141,28 +2175,34 @@ mod tests { let value = || Account::default(); // Generate the proof for the root node and initialize the sparse trie with it - let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( - [(key1(), value()), (key2(), value()), (key3(), value())], - Default::default(), - [Nibbles::default()], - ); + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), branch_node_hash_masks.get(&Nibbles::default()).copied(), + branch_node_tree_masks.get(&Nibbles::default()).copied(), false, ) .unwrap(); // Generate the proof for the children of the root branch node and reveal it in the sparse // trie - let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( - [(key1(), value()), (key2(), value()), (key3(), value())], - Default::default(), - [key1(), Nibbles::from_nibbles_unchecked([0x01])], - ); + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [key1(), Nibbles::from_nibbles_unchecked([0x01])], + ); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { let hash_mask = branch_node_hash_masks.get(&path).copied(); - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .unwrap(); } // Check that the branch node exists @@ -2181,14 +2221,18 @@ mod tests { ); // Generate the proof for the third key and reveal it in the sparse trie - let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( - [(key1(), value()), (key2(), value()), (key3(), value())], - Default::default(), - [key2()], - ); + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + Default::default(), + [key2()], + ); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { let hash_mask = branch_node_hash_masks.get(&path).copied(); - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .unwrap(); } // Check that nothing changed in the extension node @@ -2219,14 +2263,16 @@ mod tests { }; // Generate the proof for the root node and initialize the sparse trie with it - let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = run_hash_builder( - [(key1(), value()), (key2(), value())], - Default::default(), - [Nibbles::default()], - ); + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value())], + Default::default(), + [Nibbles::default()], + ); let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), branch_node_hash_masks.get(&Nibbles::default()).copied(), + branch_node_tree_masks.get(&Nibbles::default()).copied(), false, ) .unwrap(); @@ -2247,11 +2293,14 @@ mod tests { ); // Generate the proof for the first key and reveal it in the sparse trie - let (_, _, hash_builder_proof_nodes, branch_node_hash_masks) = + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = run_hash_builder([(key1(), value()), (key2(), value())], Default::default(), [key1()]); for (path, node) in hash_builder_proof_nodes.nodes_sorted() { let hash_mask = branch_node_hash_masks.get(&path).copied(); - sparse.reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask).unwrap(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .unwrap(); } // Check that the branch node wasn't overwritten by the extension node in the proof @@ -2345,7 +2394,7 @@ mod tests { account_rlp }; - let (hash_builder_root, hash_builder_updates, _, _) = run_hash_builder( + let (hash_builder_root, hash_builder_updates, _, _, _) = run_hash_builder( [(key1(), value()), (key2(), value())], Default::default(), [Nibbles::default()], diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index 165c27c82e2e2..5c632b5cecaa5 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -33,8 +33,8 @@ pub struct Proof { hashed_cursor_factory: H, /// A set of prefix sets that have changes. prefix_sets: TriePrefixSetsMut, - /// Flag indicating whether to include branch node hash masks in the proof. - collect_branch_node_hash_masks: bool, + /// Flag indicating whether to include branch node masks in the proof. + collect_branch_node_masks: bool, } impl Proof { @@ -44,7 +44,7 @@ impl Proof { trie_cursor_factory: t, hashed_cursor_factory: h, prefix_sets: TriePrefixSetsMut::default(), - collect_branch_node_hash_masks: false, + collect_branch_node_masks: false, } } @@ -54,7 +54,7 @@ impl Proof { trie_cursor_factory, hashed_cursor_factory: self.hashed_cursor_factory, prefix_sets: self.prefix_sets, - collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, + collect_branch_node_masks: self.collect_branch_node_masks, } } @@ -64,7 +64,7 @@ impl Proof { trie_cursor_factory: self.trie_cursor_factory, hashed_cursor_factory, prefix_sets: self.prefix_sets, - collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, + collect_branch_node_masks: self.collect_branch_node_masks, } } @@ -74,9 +74,9 @@ impl Proof { self } - /// Set the flag indicating whether to include branch node hash masks in the proof. - pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { - self.collect_branch_node_hash_masks = branch_node_hash_masks; + /// Set the flag indicating whether to include branch node masks in the proof. + pub const fn with_branch_node_masks(mut self, branch_node_masks: bool) -> Self { + self.collect_branch_node_masks = branch_node_masks; self } } @@ -117,7 +117,7 @@ where let retainer = targets.keys().map(Nibbles::unpack).collect(); let mut hash_builder = HashBuilder::default() .with_proof_retainer(retainer) - .with_updates(self.collect_branch_node_hash_masks); + .with_updates(self.collect_branch_node_masks); // Initialize all storage multiproofs as empty. // Storage multiproofs for non empty tries will be overwritten if necessary. @@ -144,7 +144,7 @@ where hashed_address, ) .with_prefix_set_mut(storage_prefix_set) - .with_branch_node_hash_masks(self.collect_branch_node_hash_masks) + .with_branch_node_masks(self.collect_branch_node_masks) .storage_multiproof(proof_targets.unwrap_or_default())?; // Encode account @@ -164,18 +164,23 @@ where } let _ = hash_builder.root(); let account_subtree = hash_builder.take_proof_nodes(); - let branch_node_hash_masks = if self.collect_branch_node_hash_masks { - hash_builder - .updated_branch_nodes - .unwrap_or_default() - .into_iter() - .map(|(path, node)| (path, node.hash_mask)) - .collect() + let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes + .iter() + .map(|(path, node)| (path.clone(), node.hash_mask)) + .collect(), + updated_branch_nodes + .into_iter() + .map(|(path, node)| (path, node.tree_mask)) + .collect(), + ) } else { - HashMap::default() + (HashMap::default(), HashMap::default()) }; - Ok(MultiProof { account_subtree, branch_node_hash_masks, storages }) + Ok(MultiProof { account_subtree, branch_node_hash_masks, branch_node_tree_masks, storages }) } } @@ -190,8 +195,8 @@ pub struct StorageProof { hashed_address: B256, /// The set of storage slot prefixes that have changed. prefix_set: PrefixSetMut, - /// Flag indicating whether to include branch node hash masks in the proof. - collect_branch_node_hash_masks: bool, + /// Flag indicating whether to include branch node masks in the proof. + collect_branch_node_masks: bool, } impl StorageProof { @@ -207,7 +212,7 @@ impl StorageProof { hashed_cursor_factory: h, hashed_address, prefix_set: PrefixSetMut::default(), - collect_branch_node_hash_masks: false, + collect_branch_node_masks: false, } } @@ -218,7 +223,7 @@ impl StorageProof { hashed_cursor_factory: self.hashed_cursor_factory, hashed_address: self.hashed_address, prefix_set: self.prefix_set, - collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, + collect_branch_node_masks: self.collect_branch_node_masks, } } @@ -229,7 +234,7 @@ impl StorageProof { hashed_cursor_factory, hashed_address: self.hashed_address, prefix_set: self.prefix_set, - collect_branch_node_hash_masks: self.collect_branch_node_hash_masks, + collect_branch_node_masks: self.collect_branch_node_masks, } } @@ -239,9 +244,9 @@ impl StorageProof { self } - /// Set the flag indicating whether to include branch node hash masks in the proof. - pub const fn with_branch_node_hash_masks(mut self, branch_node_hash_masks: bool) -> Self { - self.collect_branch_node_hash_masks = branch_node_hash_masks; + /// Set the flag indicating whether to include branch node masks in the proof. + pub const fn with_branch_node_masks(mut self, branch_node_masks: bool) -> Self { + self.collect_branch_node_masks = branch_node_masks; self } } @@ -282,7 +287,7 @@ where let retainer = ProofRetainer::from_iter(target_nibbles); let mut hash_builder = HashBuilder::default() .with_proof_retainer(retainer) - .with_updates(self.collect_branch_node_hash_masks); + .with_updates(self.collect_branch_node_masks); let mut storage_node_iter = TrieNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { @@ -300,17 +305,22 @@ where let root = hash_builder.root(); let subtree = hash_builder.take_proof_nodes(); - let branch_node_hash_masks = if self.collect_branch_node_hash_masks { - hash_builder - .updated_branch_nodes - .unwrap_or_default() - .into_iter() - .map(|(path, node)| (path, node.hash_mask)) - .collect() + let (branch_node_hash_masks, branch_node_tree_masks) = if self.collect_branch_node_masks { + let updated_branch_nodes = hash_builder.updated_branch_nodes.unwrap_or_default(); + ( + updated_branch_nodes + .iter() + .map(|(path, node)| (path.clone(), node.hash_mask)) + .collect(), + updated_branch_nodes + .into_iter() + .map(|(path, node)| (path, node.tree_mask)) + .collect(), + ) } else { - HashMap::default() + (HashMap::default(), HashMap::default()) }; - Ok(StorageMultiProof { root, subtree, branch_node_hash_masks }) + Ok(StorageMultiProof { root, subtree, branch_node_hash_masks, branch_node_tree_masks }) } } From c601712147e8e03e227e332c51f9b2454946fe35 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 10 Jan 2025 12:30:50 +0000 Subject: [PATCH 048/113] feat(tree): --engine.state-root-task-compare-updates (#13763) --- bin/reth/src/main.rs | 9 ++++++++- book/cli/reth/node.md | 3 +++ crates/engine/tree/src/tree/config.rs | 22 ++++++++++++++++++++++ crates/engine/tree/src/tree/mod.rs | 9 +++++++-- 4 files changed, 40 insertions(+), 3 deletions(-) diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index 5daaa93ee3bf1..b263f03a3f794 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -43,6 +43,11 @@ pub struct EngineArgs { /// Enable state root task #[arg(long = "engine.state-root-task", conflicts_with = "legacy")] pub state_root_task_enabled: bool, + + /// Enable comparing trie updates from the state root task to the trie updates from the regular + /// state root calculation. + #[arg(long = "engine.state-root-task-compare-updates", conflicts_with = "legacy")] + pub state_root_task_compare_updates: bool, } impl Default for EngineArgs { @@ -53,6 +58,7 @@ impl Default for EngineArgs { persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, state_root_task_enabled: false, + state_root_task_compare_updates: false, } } } @@ -77,7 +83,8 @@ fn main() { let engine_tree_config = TreeConfig::default() .with_persistence_threshold(engine_args.persistence_threshold) .with_memory_block_buffer_target(engine_args.memory_block_buffer_target) - .with_state_root_task(engine_args.state_root_task_enabled); + .with_state_root_task(engine_args.state_root_task_enabled) + .with_always_compare_trie_updates(engine_args.state_root_task_compare_updates); let handle = builder .with_types_and_provider::>() .with_components(EthereumNode::components()) diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index c07585c447256..66a04a1dbf898 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -703,6 +703,9 @@ Engine: --engine.state-root-task Enable state root task + --engine.state-root-task-compare-updates + Enable comparing trie updates from the state root task to the trie updates from the regular state root calculation + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/crates/engine/tree/src/tree/config.rs b/crates/engine/tree/src/tree/config.rs index c0c68799aee0a..cec8bb4e8f063 100644 --- a/crates/engine/tree/src/tree/config.rs +++ b/crates/engine/tree/src/tree/config.rs @@ -43,6 +43,9 @@ pub struct TreeConfig { max_execute_block_batch_size: usize, /// Whether to use the new state root task calculation method instead of parallel calculation use_state_root_task: bool, + /// Whether to always compare trie updates from the state root task to the trie updates from + /// the regular state root calculation. + always_compare_trie_updates: bool, } impl Default for TreeConfig { @@ -54,6 +57,7 @@ impl Default for TreeConfig { max_invalid_header_cache_length: DEFAULT_MAX_INVALID_HEADER_CACHE_LENGTH, max_execute_block_batch_size: DEFAULT_MAX_EXECUTE_BLOCK_BATCH_SIZE, use_state_root_task: false, + always_compare_trie_updates: false, } } } @@ -67,6 +71,7 @@ impl TreeConfig { max_invalid_header_cache_length: u32, max_execute_block_batch_size: usize, use_state_root_task: bool, + always_compare_trie_updates: bool, ) -> Self { Self { persistence_threshold, @@ -75,6 +80,7 @@ impl TreeConfig { max_invalid_header_cache_length, max_execute_block_batch_size, use_state_root_task, + always_compare_trie_updates, } } @@ -108,6 +114,12 @@ impl TreeConfig { self.use_state_root_task } + /// Returns whether to always compare trie updates from the state root task to the trie updates + /// from the regular state root calculation. + pub const fn always_compare_trie_updates(&self) -> bool { + self.always_compare_trie_updates + } + /// Setter for persistence threshold. pub const fn with_persistence_threshold(mut self, persistence_threshold: u64) -> Self { self.persistence_threshold = persistence_threshold; @@ -152,4 +164,14 @@ impl TreeConfig { self.use_state_root_task = use_state_root_task; self } + + /// Setter for whether to always compare trie updates from the state root task to the trie + /// updates from the regular state root calculation. + pub const fn with_always_compare_trie_updates( + mut self, + always_compare_trie_updates: bool, + ) -> Self { + self.always_compare_trie_updates = always_compare_trie_updates; + self + } } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 429dda7283cb3..70dd46fb4bdc0 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2372,8 +2372,13 @@ where "Task state root finished" ); - if task_state_root != block.header().state_root() { - debug!(target: "engine::tree", "Task state root does not match block state root"); + if task_state_root != block.header().state_root() || + self.config.always_compare_trie_updates() + { + if task_state_root != block.header().state_root() { + debug!(target: "engine::tree", "Task state root does not match block state root"); + } + let (regular_root, regular_updates) = state_provider.state_root_with_updates(hashed_state.clone())?; From 1bf8d504616752dc4cc70071bc240c38b7c843e5 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 10 Jan 2025 15:56:20 +0000 Subject: [PATCH 049/113] feat(tree): when comparing trie updates, check the database (#13765) --- crates/engine/tree/Cargo.toml | 4 +- crates/engine/tree/src/tree/mod.rs | 8 +- crates/engine/tree/src/tree/trie_updates.rs | 179 ++++++++++++++------ 3 files changed, 138 insertions(+), 53 deletions(-) diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 822780657d8f1..76ce5a7ac5bbe 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-chain-state.workspace = true reth-chainspec = { workspace = true, optional = true } reth-consensus.workspace = true +reth-db.workspace = true reth-engine-primitives.workspace = true reth-errors.workspace = true reth-evm.workspace = true @@ -22,8 +23,8 @@ reth-network-p2p.workspace = true reth-payload-builder-primitives.workspace = true reth-payload-builder.workspace = true reth-payload-primitives.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true +reth-primitives.workspace = true reth-provider.workspace = true reth-prune.workspace = true reth-revm.workspace = true @@ -71,7 +72,6 @@ reth-tracing = { workspace = true, optional = true } # reth reth-chain-state = { workspace = true, features = ["test-utils"] } reth-chainspec.workspace = true -reth-db = { workspace = true, features = ["test-utils"] } reth-ethereum-engine-primitives.workspace = true reth-ethereum-consensus.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 70dd46fb4bdc0..ea92c0fb38980 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2383,7 +2383,13 @@ where state_provider.state_root_with_updates(hashed_state.clone())?; if regular_root == block.header().state_root() { - compare_trie_updates(&task_trie_updates, ®ular_updates); + let provider = self.provider.database_provider_ro()?; + compare_trie_updates( + provider.tx_ref(), + task_trie_updates.clone(), + regular_updates, + ) + .map_err(ProviderError::from)?; } else { debug!(target: "engine::tree", "Regular state root does not match block state root"); } diff --git a/crates/engine/tree/src/tree/trie_updates.rs b/crates/engine/tree/src/tree/trie_updates.rs index 576f0c7426476..9ec29f9c8b5ed 100644 --- a/crates/engine/tree/src/tree/trie_updates.rs +++ b/crates/engine/tree/src/tree/trie_updates.rs @@ -2,16 +2,26 @@ use alloy_primitives::{ map::{HashMap, HashSet}, B256, }; +use reth_db::{transaction::DbTx, DatabaseError}; use reth_trie::{ + trie_cursor::{TrieCursor, TrieCursorFactory}, updates::{StorageTrieUpdates, TrieUpdates}, BranchNodeCompact, Nibbles, }; +use reth_trie_db::DatabaseTrieCursorFactory; use tracing::debug; +#[derive(Debug)] +struct EntryDiff { + task: T, + regular: T, + database: T, +} + #[derive(Debug, Default)] struct TrieUpdatesDiff { - account_nodes: HashMap, Option)>, - removed_nodes: HashMap, + account_nodes: HashMap>>, + removed_nodes: HashMap>, storage_tries: HashMap, } @@ -24,12 +34,12 @@ impl TrieUpdatesDiff { pub(super) fn log_differences(mut self) { if self.has_differences() { - for (path, (task, regular)) in &mut self.account_nodes { - debug!(target: "engine::tree", ?path, ?task, ?regular, "Difference in account trie updates"); + for (path, EntryDiff { task, regular, database }) in &mut self.account_nodes { + debug!(target: "engine::tree", ?path, ?task, ?regular, ?database, "Difference in account trie updates"); } - for (path, (task, regular)) in &self.removed_nodes { - debug!(target: "engine::tree", ?path, ?task, ?regular, "Difference in removed account trie nodes"); + for (path, EntryDiff { task, regular, database }) in &self.removed_nodes { + debug!(target: "engine::tree", ?path, ?task, ?regular, ?database, "Difference in removed account trie nodes"); } for (address, storage_diff) in self.storage_tries { @@ -55,16 +65,17 @@ impl StorageTrieDiffEntry { debug!(target: "engine::tree", ?address, ?task, ?regular, "Difference in storage trie existence"); } Self::Value(mut storage_diff) => { - if let Some((task, regular)) = storage_diff.is_deleted { - debug!(target: "engine::tree", ?address, ?task, ?regular, "Difference in storage trie deletion"); + if let Some(EntryDiff { task, regular, database }) = storage_diff.is_deleted { + debug!(target: "engine::tree", ?address, ?task, ?regular, ?database, "Difference in storage trie deletion"); } - for (path, (task, regular)) in &mut storage_diff.storage_nodes { - debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, "Difference in storage trie updates"); + for (path, EntryDiff { task, regular, database }) in &mut storage_diff.storage_nodes + { + debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, ?database, "Difference in storage trie updates"); } - for (path, (task, regular)) in &storage_diff.removed_nodes { - debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, "Difference in removed account trie nodes"); + for (path, EntryDiff { task, regular, database }) in &storage_diff.removed_nodes { + debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, ?database, "Difference in removed account trie nodes"); } } } @@ -73,9 +84,9 @@ impl StorageTrieDiffEntry { #[derive(Debug, Default)] struct StorageTrieUpdatesDiff { - is_deleted: Option<(bool, bool)>, - storage_nodes: HashMap, Option)>, - removed_nodes: HashMap, + is_deleted: Option>, + storage_nodes: HashMap>>, + removed_nodes: HashMap>, } impl StorageTrieUpdatesDiff { @@ -88,10 +99,20 @@ impl StorageTrieUpdatesDiff { /// Compares the trie updates from state root task and regular state root calculation, and logs /// the differences if there's any. -pub(super) fn compare_trie_updates(task: &TrieUpdates, regular: &TrieUpdates) { +pub(super) fn compare_trie_updates( + tx: &impl DbTx, + task: TrieUpdates, + regular: TrieUpdates, +) -> Result<(), DatabaseError> { + let trie_cursor_factory = DatabaseTrieCursorFactory::new(tx); + + let mut task = adjust_trie_updates(task); + let mut regular = adjust_trie_updates(regular); + let mut diff = TrieUpdatesDiff::default(); // compare account nodes + let mut account_trie_cursor = trie_cursor_factory.account_trie_cursor()?; for key in task .account_nodes .keys() @@ -99,10 +120,11 @@ pub(super) fn compare_trie_updates(task: &TrieUpdates, regular: &TrieUpdates) { .cloned() .collect::>() { - let (left, right) = (task.account_nodes.get(&key), regular.account_nodes.get(&key)); + let (task, regular) = (task.account_nodes.remove(&key), regular.account_nodes.remove(&key)); + let database = account_trie_cursor.seek_exact(key.clone())?.map(|x| x.1); - if !branch_nodes_equal(left, right) { - diff.account_nodes.insert(key, (left.cloned(), right.cloned())); + if !branch_nodes_equal(task.as_ref(), regular.as_ref(), database.as_ref())? { + diff.account_nodes.insert(key, EntryDiff { task, regular, database }); } } @@ -114,10 +136,11 @@ pub(super) fn compare_trie_updates(task: &TrieUpdates, regular: &TrieUpdates) { .cloned() .collect::>() { - let (left, right) = + let (task, regular) = (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); - if left != right { - diff.removed_nodes.insert(key, (left, right)); + let database = account_trie_cursor.seek_exact(key.clone())?.is_none(); + if task != regular { + diff.removed_nodes.insert(key, EntryDiff { task, regular, database }); } } @@ -129,31 +152,43 @@ pub(super) fn compare_trie_updates(task: &TrieUpdates, regular: &TrieUpdates) { .copied() .collect::>() { - let (left, right) = (task.storage_tries.get(&key), regular.storage_tries.get(&key)); - if left != right { - if let Some((left, right)) = left.zip(right) { - let storage_diff = compare_storage_trie_updates(left, right); + let (mut task, mut regular) = + (task.storage_tries.remove(&key), regular.storage_tries.remove(&key)); + if task != regular { + let mut storage_trie_cursor = trie_cursor_factory.storage_trie_cursor(key)?; + if let Some((task, regular)) = task.as_mut().zip(regular.as_mut()) { + let storage_diff = + compare_storage_trie_updates(&mut storage_trie_cursor, task, regular)?; if storage_diff.has_differences() { diff.storage_tries.insert(key, StorageTrieDiffEntry::Value(storage_diff)); } } else { - diff.storage_tries - .insert(key, StorageTrieDiffEntry::Existence(left.is_some(), right.is_some())); + diff.storage_tries.insert( + key, + StorageTrieDiffEntry::Existence(task.is_some(), regular.is_some()), + ); } } } // log differences diff.log_differences(); + + Ok(()) } fn compare_storage_trie_updates( - task: &StorageTrieUpdates, - regular: &StorageTrieUpdates, -) -> StorageTrieUpdatesDiff { + trie_cursor: &mut impl TrieCursor, + task: &mut StorageTrieUpdates, + regular: &mut StorageTrieUpdates, +) -> Result { + let database_deleted = trie_cursor.next()?.is_none(); let mut diff = StorageTrieUpdatesDiff { - is_deleted: (task.is_deleted != regular.is_deleted) - .then_some((task.is_deleted, regular.is_deleted)), + is_deleted: (task.is_deleted != regular.is_deleted).then_some(EntryDiff { + task: task.is_deleted, + regular: regular.is_deleted, + database: database_deleted, + }), ..Default::default() }; @@ -165,9 +200,10 @@ fn compare_storage_trie_updates( .cloned() .collect::>() { - let (left, right) = (task.storage_nodes.get(&key), regular.storage_nodes.get(&key)); - if !branch_nodes_equal(left, right) { - diff.storage_nodes.insert(key, (left.cloned(), right.cloned())); + let (task, regular) = (task.storage_nodes.remove(&key), regular.storage_nodes.remove(&key)); + let database = trie_cursor.seek_exact(key.clone())?.map(|x| x.1); + if !branch_nodes_equal(task.as_ref(), regular.as_ref(), database.as_ref())? { + diff.storage_nodes.insert(key, EntryDiff { task, regular, database }); } } @@ -179,30 +215,73 @@ fn compare_storage_trie_updates( .cloned() .collect::>() { - let (left, right) = + let (task, regular) = (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); - if left != right { - diff.removed_nodes.insert(key, (left, right)); + let database = trie_cursor.seek_exact(key.clone())?.map(|x| x.1).is_none(); + if task != regular { + diff.removed_nodes.insert(key, EntryDiff { task, regular, database }); } } - diff + Ok(diff) +} + +/// Filters the removed nodes of both account trie updates and storage trie updates, so that they +/// don't include those nodes that were also updated. +fn adjust_trie_updates(trie_updates: TrieUpdates) -> TrieUpdates { + TrieUpdates { + removed_nodes: trie_updates + .removed_nodes + .into_iter() + .filter(|key| !trie_updates.account_nodes.contains_key(key)) + .collect(), + storage_tries: trie_updates + .storage_tries + .into_iter() + .map(|(address, updates)| { + ( + address, + StorageTrieUpdates { + removed_nodes: updates + .removed_nodes + .into_iter() + .filter(|key| !updates.storage_nodes.contains_key(key)) + .collect(), + ..updates + }, + ) + }) + .collect(), + ..trie_updates + } } /// Compares the branch nodes from state root task and regular state root calculation. /// +/// If one of the branch nodes is [`None`], it means it's not updated and the other is compared to +/// the branch node from the database. +/// /// Returns `true` if they are equal. fn branch_nodes_equal( task: Option<&BranchNodeCompact>, regular: Option<&BranchNodeCompact>, -) -> bool { - if let (Some(task), Some(regular)) = (task.as_ref(), regular.as_ref()) { - task.state_mask == regular.state_mask && - task.tree_mask == regular.tree_mask && - task.hash_mask == regular.hash_mask && - task.hashes == regular.hashes && - task.root_hash == regular.root_hash - } else { - task == regular - } + database: Option<&BranchNodeCompact>, +) -> Result { + Ok(match (task, regular) { + (Some(task), Some(regular)) => { + task.state_mask == regular.state_mask && + task.tree_mask == regular.tree_mask && + task.hash_mask == regular.hash_mask && + task.hashes == regular.hashes && + task.root_hash == regular.root_hash + } + (None, None) => true, + _ => { + if task.is_some() { + task == database + } else { + regular == database + } + } + }) } From ca30702f85768d75722260c37907f3ceec35c577 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 11 Jan 2025 00:04:37 +0100 Subject: [PATCH 050/113] test: add json genesis test (#13770) --- crates/optimism/chainspec/src/lib.rs | 64 ++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index e8f8a084e3c2a..2735b77dd0b6a 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -190,6 +190,11 @@ pub struct OpChainSpec { } impl OpChainSpec { + /// Converts the given [`Genesis`] into a [`OpChainSpec`]. + pub fn from_genesis(genesis: Genesis) -> Self { + genesis.into() + } + /// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. /// /// Caution: Caller must ensure that holocene is active in the parent header. @@ -1112,4 +1117,63 @@ mod tests { let base_fee = op_chain_spec.next_block_base_fee(&parent, 1735315546).unwrap(); assert_eq!(base_fee, U256::from(507)); } + + #[test] + fn json_genesis() { + let geth_genesis = r#" +{ + "config": { + "chainId": 1301, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "mergeNetsplitBlock": 0, + "shanghaiTime": 0, + "cancunTime": 0, + "bedrockBlock": 0, + "regolithTime": 0, + "canyonTime": 0, + "ecotoneTime": 0, + "fjordTime": 0, + "graniteTime": 0, + "holoceneTime": 1732633200, + "terminalTotalDifficulty": 0, + "terminalTotalDifficultyPassed": true, + "optimism": { + "eip1559Elasticity": 6, + "eip1559Denominator": 50, + "eip1559DenominatorCanyon": 250 + } + }, + "nonce": "0x0", + "timestamp": "0x66edad4c", + "extraData": "0x424544524f434b", + "gasLimit": "0x1c9c380", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x4200000000000000000000000000000000000011", + "alloc": {}, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas": "0x3b9aca00", + "excessBlobGas": "0x0", + "blobGasUsed": "0x0" +} + "#; + + let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap(); + let chainspec = OpChainSpec::from_genesis(genesis); + assert!(chainspec.is_holocene_active_at_timestamp(1732633200)); + } } From 5a23708eb02955c256dd6b2528778a26c50dbf8a Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sat, 11 Jan 2025 12:15:06 +0100 Subject: [PATCH 051/113] chore: make clippy happy (#13772) --- crates/chain-state/src/notifications.rs | 2 +- crates/chainspec/src/spec.rs | 2 +- crates/e2e-test-utils/src/network.rs | 2 +- crates/engine/tree/src/chain.rs | 1 - crates/ethereum-forks/src/display.rs | 2 +- crates/net/network/src/session/active.rs | 1 - crates/net/network/src/test_utils/testnet.rs | 22 +++++++------------ crates/net/network/src/transactions/mod.rs | 8 +++---- crates/net/p2p/src/test_utils/headers.rs | 1 - .../rpc-eth-api/src/helpers/transaction.rs | 2 +- crates/rpc/rpc-eth-types/src/fee_history.rs | 2 +- crates/rpc/rpc-testing-util/src/debug.rs | 4 ++-- crates/rpc/rpc-testing-util/src/trace.rs | 4 ++-- crates/storage/libmdbx-rs/src/environment.rs | 2 +- .../src/providers/blockchain_provider.rs | 2 +- .../provider/src/providers/consistent.rs | 2 +- crates/tokio-util/src/event_stream.rs | 1 - crates/transaction-pool/src/pool/best.rs | 1 - crates/trie/sparse/src/trie.rs | 2 +- 19 files changed, 26 insertions(+), 37 deletions(-) diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index ab2b88cba1074..808b6b42f8923 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -204,7 +204,7 @@ impl Stream for ForkChoiceStream { loop { match ready!(self.as_mut().project().st.poll_next(cx)) { Some(Some(notification)) => return Poll::Ready(Some(notification)), - Some(None) => continue, + Some(None) => {} None => return Poll::Ready(None), } } diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index c49082e7ce993..3816175c46cb8 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -611,7 +611,7 @@ impl ChainSpec { return Some(block_num); } } - ForkCondition::Block(_) | ForkCondition::Never => continue, + ForkCondition::Block(_) | ForkCondition::Never => {} } } } diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 8d8ea68aa93a8..dd703b312e1d0 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -49,7 +49,7 @@ where info!("Session established with peer: {:?}", peer_id); return Some(peer_id) } - _ => continue, + _ => {} } } None diff --git a/crates/engine/tree/src/chain.rs b/crates/engine/tree/src/chain.rs index 9523e67cade55..cc658bf2fa15a 100644 --- a/crates/engine/tree/src/chain.rs +++ b/crates/engine/tree/src/chain.rs @@ -117,7 +117,6 @@ where HandlerEvent::BackfillAction(action) => { // forward action to backfill_sync this.backfill_sync.on_action(action); - continue 'outer } HandlerEvent::Event(ev) => { // bubble up the event diff --git a/crates/ethereum-forks/src/display.rs b/crates/ethereum-forks/src/display.rs index f5c6f47a2ea3b..99e753cbb8126 100644 --- a/crates/ethereum-forks/src/display.rs +++ b/crates/ethereum-forks/src/display.rs @@ -160,7 +160,7 @@ impl DisplayHardforks { ForkCondition::Timestamp(_) => { post_merge.push(display_fork); } - ForkCondition::Never => continue, + ForkCondition::Never => {} } } diff --git a/crates/net/network/src/session/active.rs b/crates/net/network/src/session/active.rs index 042cca69f9dd9..475ede1baf697 100644 --- a/crates/net/network/src/session/active.rs +++ b/crates/net/network/src/session/active.rs @@ -622,7 +622,6 @@ impl Future for ActiveSession { OnIncomingMessageOutcome::NoCapacity(msg) => { // failed to send due to lack of capacity this.pending_message_to_session = Some(msg); - continue 'receive } } } diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index c9934a882b4c1..3477387c56ab0 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -698,11 +698,8 @@ impl NetworkEventStream { /// Awaits the next event for a session to be closed pub async fn next_session_closed(&mut self) -> Option<(PeerId, Option)> { while let Some(ev) = self.inner.next().await { - match ev { - NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }) => { - return Some((peer_id, reason)) - } - _ => continue, + if let NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }) = ev { + return Some((peer_id, reason)) } } None @@ -716,7 +713,7 @@ impl NetworkEventStream { NetworkEvent::Peer(PeerEvent::SessionEstablished(info)) => { return Some(info.peer_id) } - _ => continue, + _ => {} } } None @@ -729,15 +726,12 @@ impl NetworkEventStream { } let mut peers = Vec::with_capacity(num); while let Some(ev) = self.inner.next().await { - match ev { - NetworkEvent::ActivePeerSession { info: SessionInfo { peer_id, .. }, .. } => { - peers.push(peer_id); - num -= 1; - if num == 0 { - return peers; - } + if let NetworkEvent::ActivePeerSession { info: SessionInfo { peer_id, .. }, .. } = ev { + peers.push(peer_id); + num -= 1; + if num == 0 { + return peers; } - _ => continue, } } peers diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 7c2a690877f08..9cbc8e5b0b067 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -2026,7 +2026,7 @@ mod tests { transactions .on_network_event(NetworkEvent::Peer(PeerEvent::SessionEstablished(info))) } - NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => {} ev => { error!("unexpected event {ev:?}") } @@ -2097,7 +2097,7 @@ mod tests { // to insert a new peer in transactions peerset transactions.on_network_event(ev); } - NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => {} _ => { error!("unexpected event {ev:?}") } @@ -2166,7 +2166,7 @@ mod tests { // to insert a new peer in transactions peerset transactions.on_network_event(ev); } - NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => {} ev => { error!("unexpected event {ev:?}") } @@ -2241,7 +2241,7 @@ mod tests { NetworkEvent::Peer(PeerEvent::SessionEstablished(_)) => { transactions.on_network_event(ev); } - NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => continue, + NetworkEvent::Peer(PeerEvent::PeerAdded(_peer_id)) => {} ev => { error!("unexpected event {ev:?}") } diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index 6e20b335a1078..ee0d95d5004ba 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -169,7 +169,6 @@ impl Stream for TestDownload { headers.sort_unstable_by_key(|h| h.number); headers.into_iter().for_each(|h| this.buffer.push(h)); this.done = true; - continue } Err(err) => { this.done = true; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 6da59a98b24c7..285b12f856a23 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -253,7 +253,7 @@ pub trait EthTransactions: LoadTransaction { } // Check if the sender is a contract - if self.get_code(sender, None).await?.len() > 0 { + if !self.get_code(sender, None).await?.is_empty() { return Ok(None); } diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 2bf3fc7a1dfa2..ae509dd2fdb44 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -101,7 +101,7 @@ impl FeeHistoryCache { entries.pop_first(); } - if entries.len() == 0 { + if entries.is_empty() { self.inner.upper_bound.store(0, SeqCst); self.inner.lower_bound.store(0, SeqCst); return diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 36a01fa5903f1..1b389b1644e7a 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -297,7 +297,7 @@ impl DebugTraceTransactionsStream<'_> { pub async fn next_err(&mut self) -> Option<(RpcError, TxHash)> { loop { match self.next().await? { - Ok(_) => continue, + Ok(_) => {} Err(err) => return Some(err), } } @@ -329,7 +329,7 @@ impl DebugTraceBlockStream<'_> { pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { match self.next().await? { - Ok(_) => continue, + Ok(_) => {} Err(err) => return Some(err), } } diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index ee3fce68d3b52..b556a89504591 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -421,7 +421,7 @@ impl TraceBlockStream<'_> { pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { match self.next().await? { - Ok(_) => continue, + Ok(_) => {} Err(err) => return Some(err), } } @@ -453,7 +453,7 @@ impl TraceBlockOpcodeGasStream<'_> { pub async fn next_err(&mut self) -> Option<(RpcError, BlockId)> { loop { match self.next().await? { - Ok(_) => continue, + Ok(_) => {} Err(err) => return Some(err), } } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index 6a0b210401e45..56eb8bf5c8bd7 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -986,7 +986,7 @@ mod tests { let db = tx.open_db(None).unwrap(); for i in 1_000usize..1_000_000 { match tx.put(db.dbi(), i.to_le_bytes(), b"0", WriteFlags::empty()) { - Ok(_) => continue, + Ok(_) => {} Err(Error::MapFull) => break, result @ Err(_) => result.unwrap(), } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 046d000c72115..4a338d206c43a 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -1867,7 +1867,7 @@ mod tests { provider.account_block_changeset(last_database_block).unwrap(), database_changesets .into_iter() - .last() + .next_back() .unwrap() .into_iter() .sorted_by_key(|(address, _, _)| *address) diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 098c27c3c7538..4d6ce309fd5a0 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1791,7 +1791,7 @@ mod tests { consistent_provider.account_block_changeset(last_database_block).unwrap(), database_changesets .into_iter() - .last() + .next_back() .unwrap() .into_iter() .sorted_by_key(|(address, _, _)| *address) diff --git a/crates/tokio-util/src/event_stream.rs b/crates/tokio-util/src/event_stream.rs index 0e041f8d4c31c..e0ff4efe924b6 100644 --- a/crates/tokio-util/src/event_stream.rs +++ b/crates/tokio-util/src/event_stream.rs @@ -36,7 +36,6 @@ where Poll::Ready(Some(Ok(item))) => return Poll::Ready(Some(item)), Poll::Ready(Some(Err(e))) => { warn!("BroadcastStream lagged: {e:?}"); - continue } Poll::Ready(None) => return Poll::Ready(None), Poll::Pending => return Poll::Pending, diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index a07df7cd50983..aa9e7ec121dc3 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -130,7 +130,6 @@ impl BestTransactions { Err(TryRecvError::Lagged(_)) => { // Handle the case where the receiver lagged too far behind. // `num_skipped` indicates the number of messages that were skipped. - continue } // this case is still better than the existing iterator behavior where no new diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index b7cba834567f9..21a64c0d6c210 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -817,7 +817,7 @@ impl

RevealedSparseTrie

{ tree_mask, hash_mask, hashes, - hash.filter(|_| path.len() == 0), + hash.filter(|_| path.is_empty()), ); updates.updated_nodes.insert(path.clone(), branch_node); } From ebf300d236328dffa74c3c6f39588b1cf229d8ba Mon Sep 17 00:00:00 2001 From: Tien Dao <15717476+tiendn@users.noreply.github.com> Date: Sat, 11 Jan 2025 22:36:20 +0700 Subject: [PATCH 052/113] test(validation): add tests for EIP-7702 transaction filtering in EthMessageFilter (#13756) --- .../network/src/transactions/validation.rs | 117 +++++++++++++++++- 1 file changed, 116 insertions(+), 1 deletion(-) diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 5eb4c905f9f2a..0b5547e2c31c4 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -331,7 +331,6 @@ impl FilterAnnouncement for EthMessageFilter { } } -// TODO(eip7702): update tests as needed #[cfg(test)] mod test { use super::*; @@ -514,6 +513,122 @@ mod test { assert_eq!(expected_data, partially_valid_data.into_data()) } + #[test] + fn eth68_announcement_eip7702_tx() { + let types = vec![TxType::Eip7702 as u8, TxType::Legacy as u8]; + let sizes = vec![MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE]; + let hashes = vec![ + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") + .unwrap(), + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") + .unwrap(), + ]; + + let announcement = NewPooledTransactionHashes68 { + types: types.clone(), + sizes: sizes.clone(), + hashes: hashes.clone(), + }; + + let filter = EthMessageFilter::default(); + + let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); + assert_eq!(outcome, FilterOutcome::Ok); + + let (outcome, valid_data) = filter.filter_valid_entries_68(partially_valid_data); + assert_eq!(outcome, FilterOutcome::Ok); + + let mut expected_data = HashMap::default(); + expected_data.insert(hashes[0], Some((types[0], sizes[0]))); + expected_data.insert(hashes[1], Some((types[1], sizes[1]))); + + assert_eq!(expected_data, valid_data.into_data()); + } + + #[test] + fn eth68_announcement_eip7702_tx_size_validation() { + let types = vec![TxType::Eip7702 as u8, TxType::Eip7702 as u8, TxType::Eip7702 as u8]; + // Test with different sizes: too small, reasonable, too large + let sizes = vec![ + 1, // too small + MAX_MESSAGE_SIZE / 2, // reasonable size + MAX_MESSAGE_SIZE + 1, // too large + ]; + let hashes = vec![ + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") + .unwrap(), + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") + .unwrap(), + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcccc") + .unwrap(), + ]; + + let announcement = NewPooledTransactionHashes68 { + types: types.clone(), + sizes: sizes.clone(), + hashes: hashes.clone(), + }; + + let filter = EthMessageFilter::default(); + + let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); + assert_eq!(outcome, FilterOutcome::Ok); + + let (outcome, valid_data) = filter.filter_valid_entries_68(partially_valid_data); + assert_eq!(outcome, FilterOutcome::Ok); + + let mut expected_data = HashMap::default(); + + for i in 0..3 { + expected_data.insert(hashes[i], Some((types[i], sizes[i]))); + } + + assert_eq!(expected_data, valid_data.into_data()); + } + + #[test] + fn eth68_announcement_mixed_tx_types() { + let types = vec![ + TxType::Legacy as u8, + TxType::Eip7702 as u8, + TxType::Eip1559 as u8, + TxType::Eip4844 as u8, + ]; + let sizes = vec![MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE]; + let hashes = vec![ + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafa") + .unwrap(), + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefbbbb") + .unwrap(), + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcccc") + .unwrap(), + B256::from_str("0xbeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefcafebeefdddd") + .unwrap(), + ]; + + let announcement = NewPooledTransactionHashes68 { + types: types.clone(), + sizes: sizes.clone(), + hashes: hashes.clone(), + }; + + let filter = EthMessageFilter::default(); + + let (outcome, partially_valid_data) = filter.partially_filter_valid_entries(announcement); + assert_eq!(outcome, FilterOutcome::Ok); + + let (outcome, valid_data) = filter.filter_valid_entries_68(partially_valid_data); + assert_eq!(outcome, FilterOutcome::Ok); + + let mut expected_data = HashMap::default(); + // All transaction types should be included as they are valid + for i in 0..4 { + expected_data.insert(hashes[i], Some((types[i], sizes[i]))); + } + + assert_eq!(expected_data, valid_data.into_data()); + } + #[test] fn test_display_for_zst() { let filter = EthMessageFilter::default(); From 6ef86e9340a481841bfe773f6f09293ec4ed8213 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Sat, 11 Jan 2025 16:35:40 +0100 Subject: [PATCH 053/113] feat(rpc): setters for `TransportRpcModules` (#13773) --- crates/rpc/rpc-builder/src/lib.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 1e758522e4868..e852c39f8fc6a 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -2110,6 +2110,34 @@ pub struct TransportRpcModules { // === impl TransportRpcModules === impl TransportRpcModules { + /// Sets a custom [`TransportRpcModuleConfig`] for the configured modules. + /// This will overwrite current configuration, if any. + pub fn with_config(mut self, config: TransportRpcModuleConfig) -> Self { + self.config = config; + self + } + + /// Sets the [`RpcModule`] for the http transport. + /// This will overwrite current module, if any. + pub fn with_http(mut self, http: RpcModule<()>) -> Self { + self.http = Some(http); + self + } + + /// Sets the [`RpcModule`] for the ws transport. + /// This will overwrite current module, if any. + pub fn with_ws(mut self, ws: RpcModule<()>) -> Self { + self.ws = Some(ws); + self + } + + /// Sets the [`RpcModule`] for the http transport. + /// This will overwrite current module, if any. + pub fn with_ipc(mut self, ipc: RpcModule<()>) -> Self { + self.ipc = Some(ipc); + self + } + /// Returns the [`TransportRpcModuleConfig`] used to configure this instance. pub const fn module_config(&self) -> &TransportRpcModuleConfig { &self.config From 8e7768db2aa4416d5dd209040342f4151cba6f9f Mon Sep 17 00:00:00 2001 From: Tien Nguyen <116023870+htiennv@users.noreply.github.com> Date: Sun, 12 Jan 2025 00:27:11 +0700 Subject: [PATCH 054/113] feat: integrate EngineArgs into NodeCommand (#13748) Co-authored-by: Matthias Seitz --- bin/reth/src/main.rs | 120 ++----------------------- book/cli/reth/node.md | 8 -- crates/cli/commands/src/node.rs | 8 +- crates/node/builder/src/builder/mod.rs | 11 ++- crates/node/core/src/args/engine.rs | 58 ++++++++++++ crates/node/core/src/args/mod.rs | 4 + crates/node/core/src/node_config.rs | 14 ++- 7 files changed, 99 insertions(+), 124 deletions(-) create mode 100644 crates/node/core/src/args/engine.rs diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index b263f03a3f794..645a8e5d587e6 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -3,66 +3,12 @@ #[global_allocator] static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); -use clap::{Args, Parser}; +use clap::Parser; use reth::cli::Cli; use reth_ethereum_cli::chainspec::EthereumChainSpecParser; -use reth_node_builder::{ - engine_tree_config::{ - TreeConfig, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, - }, - EngineNodeLauncher, -}; -use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; -use reth_provider::providers::BlockchainProvider; -use reth_tracing::tracing::warn; +use reth_node_ethereum::EthereumNode; use tracing::info; -/// Parameters for configuring the engine -#[derive(Debug, Clone, Args, PartialEq, Eq)] -#[command(next_help_heading = "Engine")] -pub struct EngineArgs { - /// Enable the experimental engine features on reth binary - /// - /// DEPRECATED: experimental engine is default now, use --engine.legacy to enable the legacy - /// functionality - #[arg(long = "engine.experimental", default_value = "false")] - pub experimental: bool, - - /// Enable the legacy engine on reth binary - #[arg(long = "engine.legacy", default_value = "false")] - pub legacy: bool, - - /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", conflicts_with = "legacy", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] - pub persistence_threshold: u64, - - /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", conflicts_with = "legacy", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] - pub memory_block_buffer_target: u64, - - /// Enable state root task - #[arg(long = "engine.state-root-task", conflicts_with = "legacy")] - pub state_root_task_enabled: bool, - - /// Enable comparing trie updates from the state root task to the trie updates from the regular - /// state root calculation. - #[arg(long = "engine.state-root-task-compare-updates", conflicts_with = "legacy")] - pub state_root_task_compare_updates: bool, -} - -impl Default for EngineArgs { - fn default() -> Self { - Self { - experimental: false, - legacy: false, - persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, - memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, - state_root_task_enabled: false, - state_root_task_compare_updates: false, - } - } -} - fn main() { reth_cli_util::sigsegv_handler::install(); @@ -71,64 +17,12 @@ fn main() { std::env::set_var("RUST_BACKTRACE", "1"); } - if let Err(err) = - Cli::::parse().run(|builder, engine_args| async move { - if engine_args.experimental { - warn!(target: "reth::cli", "Experimental engine is default now, and the --engine.experimental flag is deprecated. To enable the legacy functionality, use --engine.legacy."); - } - - let use_legacy_engine = engine_args.legacy; - match use_legacy_engine { - false => { - let engine_tree_config = TreeConfig::default() - .with_persistence_threshold(engine_args.persistence_threshold) - .with_memory_block_buffer_target(engine_args.memory_block_buffer_target) - .with_state_root_task(engine_args.state_root_task_enabled) - .with_always_compare_trie_updates(engine_args.state_root_task_compare_updates); - let handle = builder - .with_types_and_provider::>() - .with_components(EthereumNode::components()) - .with_add_ons(EthereumAddOns::default()) - .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( - builder.task_executor().clone(), - builder.config().datadir(), - engine_tree_config, - ); - builder.launch_with(launcher) - }) - .await?; - handle.node_exit_future.await - } - true => { - info!(target: "reth::cli", "Running with legacy engine"); - let handle = builder.launch_node(EthereumNode::default()).await?; - handle.node_exit_future.await - } - } - }) - { + if let Err(err) = Cli::::parse().run(|builder, _| async move { + info!(target: "reth::cli", "Launching node"); + let handle = builder.launch_node(EthereumNode::default()).await?; + handle.node_exit_future.await + }) { eprintln!("Error: {err:?}"); std::process::exit(1); } } - -#[cfg(test)] -mod tests { - use super::*; - use clap::Parser; - - /// A helper type to parse Args more easily - #[derive(Parser)] - struct CommandParser { - #[command(flatten)] - args: T, - } - - #[test] - fn test_parse_engine_args() { - let default_args = EngineArgs::default(); - let args = CommandParser::::parse_from(["reth"]).args; - assert_eq!(args, default_args); - } -} diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index 66a04a1dbf898..210a3506e2d72 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -682,14 +682,6 @@ Pruning: Configure receipts log filter. Format: <`address`>:<`prune_mode`>[,<`address`>:<`prune_mode`>...] Where <`prune_mode`> can be 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' Engine: - --engine.experimental - Enable the experimental engine features on reth binary - - DEPRECATED: experimental engine is default now, use --engine.legacy to enable the legacy functionality - - --engine.legacy - Enable the legacy engine on reth binary - --engine.persistence-threshold Configure persistence threshold for engine experimental diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index b099a2c052226..189ca6b795b49 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -10,7 +10,7 @@ use reth_ethereum_cli::chainspec::EthereumChainSpecParser; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::{ - DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, + DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, @@ -107,6 +107,10 @@ pub struct NodeCommand< #[command(flatten)] pub pruning: PruningArgs, + /// Engine cli arguments + #[command(flatten, next_help_heading = "Engine")] + pub engine: EngineArgs, + /// Additional cli arguments #[command(flatten, next_help_heading = "Extension")] pub ext: Ext, @@ -160,6 +164,7 @@ impl< dev, pruning, ext, + engine, } = self; // set up node config @@ -177,6 +182,7 @@ impl< db, dev, pruning, + engine, }; let data_dir = node_config.datadir(); diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 82e81209a057e..8134f8d56eafa 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -17,6 +17,7 @@ use reth_db_api::{ database::Database, database_metrics::{DatabaseMetadata, DatabaseMetrics}, }; +use reth_engine_tree::tree::TreeConfig; use reth_exex::ExExContext; use reth_network::{ transactions::TransactionsManagerConfig, NetworkBuilder, NetworkConfig, NetworkConfigBuilder, @@ -563,8 +564,16 @@ where > { let Self { builder, task_executor } = self; + let engine_tree_config = TreeConfig::default() + .with_persistence_threshold(builder.config.engine.persistence_threshold) + .with_memory_block_buffer_target(builder.config.engine.memory_block_buffer_target) + .with_state_root_task(builder.config.engine.state_root_task_enabled) + .with_always_compare_trie_updates( + builder.config.engine.state_root_task_compare_updates, + ); + let launcher = - EngineNodeLauncher::new(task_executor, builder.config.datadir(), Default::default()); + EngineNodeLauncher::new(task_executor, builder.config.datadir(), engine_tree_config); builder.launch_with(launcher).await } } diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs new file mode 100644 index 0000000000000..411074d28b121 --- /dev/null +++ b/crates/node/core/src/args/engine.rs @@ -0,0 +1,58 @@ +//! clap [Args](clap::Args) for engine purposes + +use clap::Args; + +use crate::node_config::{DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD}; + +/// Parameters for configuring the engine driver. +#[derive(Debug, Clone, Args, PartialEq, Eq)] +#[command(next_help_heading = "Engine")] +pub struct EngineArgs { + /// Configure persistence threshold for engine experimental. + #[arg(long = "engine.persistence-threshold", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] + pub persistence_threshold: u64, + + /// Configure the target number of blocks to keep in memory. + #[arg(long = "engine.memory-block-buffer-target", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] + pub memory_block_buffer_target: u64, + + /// Enable state root task + #[arg(long = "engine.state-root-task")] + pub state_root_task_enabled: bool, + + /// Enable comparing trie updates from the state root task to the trie updates from the regular + /// state root calculation. + #[arg(long = "engine.state-root-task-compare-updates")] + pub state_root_task_compare_updates: bool, +} + +impl Default for EngineArgs { + fn default() -> Self { + Self { + persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, + memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, + state_root_task_enabled: false, + state_root_task_compare_updates: false, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use clap::Parser; + + /// A helper type to parse Args more easily + #[derive(Parser)] + struct CommandParser { + #[command(flatten)] + args: T, + } + + #[test] + fn test_parse_engine_args() { + let default_args = EngineArgs::default(); + let args = CommandParser::::parse_from(["reth"]).args; + assert_eq!(args, default_args); + } +} diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 7f1b643615156..1649b8b56b564 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -56,5 +56,9 @@ pub use datadir_args::DatadirArgs; mod benchmark_args; pub use benchmark_args::BenchmarkArgs; +/// EngineArgs for configuring the engine +mod engine; +pub use engine::EngineArgs; + mod error; pub mod types; diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index 861e47fc3cf1e..eb8aa2378b87f 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -2,7 +2,7 @@ use crate::{ args::{ - DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, + DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, EngineArgs, NetworkArgs, PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, }, dirs::{ChainPath, DataDirPath}, @@ -31,6 +31,12 @@ use std::{ }; use tracing::*; +/// Triggers persistence when the number of canonical blocks in memory exceeds this threshold. +pub const DEFAULT_PERSISTENCE_THRESHOLD: u64 = 2; + +/// How close to the canonical head we persist blocks. +pub const DEFAULT_MEMORY_BLOCK_BUFFER_TARGET: u64 = 2; + /// This includes all necessary configuration to launch the node. /// The individual configuration options can be overwritten before launching the node. /// @@ -133,6 +139,9 @@ pub struct NodeConfig { /// All pruning related arguments pub pruning: PruningArgs, + + /// All engine related arguments + pub engine: EngineArgs, } impl NodeConfig { @@ -161,6 +170,7 @@ impl NodeConfig { dev: DevArgs::default(), pruning: PruningArgs::default(), datadir: DatadirArgs::default(), + engine: EngineArgs::default(), } } @@ -449,6 +459,7 @@ impl NodeConfig { db: self.db, dev: self.dev, pruning: self.pruning, + engine: self.engine, } } } @@ -475,6 +486,7 @@ impl Clone for NodeConfig { dev: self.dev, pruning: self.pruning.clone(), datadir: self.datadir.clone(), + engine: self.engine.clone(), } } } From cc84f83b6c60aa65c1a37ef286635f17dfd7de0f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 11 Jan 2025 19:59:13 +0100 Subject: [PATCH 055/113] fix: apply legacy cleanup to opnode (#13775) --- crates/optimism/bin/src/main.rs | 8 ++++++-- crates/optimism/node/src/args.rs | 15 +-------------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index 55eb923470c4e..bba31e250117e 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -24,8 +24,12 @@ fn main() { if let Err(err) = Cli::::parse().run(|builder, rollup_args| async move { let engine_tree_config = TreeConfig::default() - .with_persistence_threshold(rollup_args.persistence_threshold) - .with_memory_block_buffer_target(rollup_args.memory_block_buffer_target); + .with_persistence_threshold(builder.config().engine.persistence_threshold) + .with_memory_block_buffer_target(builder.config().engine.memory_block_buffer_target) + .with_state_root_task(builder.config().engine.state_root_task_enabled) + .with_always_compare_trie_updates( + builder.config().engine.state_root_task_compare_updates, + ); let op_node = OpNode::new(rollup_args.clone()); let handle = builder diff --git a/crates/optimism/node/src/args.rs b/crates/optimism/node/src/args.rs index 658748c9c4411..87c8c1be6450c 100644 --- a/crates/optimism/node/src/args.rs +++ b/crates/optimism/node/src/args.rs @@ -2,10 +2,6 @@ //! clap [Args](clap::Args) for optimism rollup configuration -use reth_node_builder::engine_tree_config::{ - DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, -}; - /// Parameters for rollup configuration #[derive(Debug, Clone, PartialEq, Eq, clap::Args)] #[command(next_help_heading = "Rollup")] @@ -37,16 +33,9 @@ pub struct RollupArgs { /// enables discovery v4 if provided #[arg(long = "rollup.discovery.v4", default_value = "false")] pub discovery_v4: bool, - - /// Configure persistence threshold for engine experimental. - #[arg(long = "engine.persistence-threshold", default_value_t = DEFAULT_PERSISTENCE_THRESHOLD)] - pub persistence_threshold: u64, - - /// Configure the target number of blocks to keep in memory. - #[arg(long = "engine.memory-block-buffer-target", default_value_t = DEFAULT_MEMORY_BLOCK_BUFFER_TARGET)] - pub memory_block_buffer_target: u64, } +#[allow(clippy::derivable_impls)] impl Default for RollupArgs { fn default() -> Self { Self { @@ -55,8 +44,6 @@ impl Default for RollupArgs { enable_genesis_walkback: false, compute_pending_block: false, discovery_v4: false, - persistence_threshold: DEFAULT_PERSISTENCE_THRESHOLD, - memory_block_buffer_target: DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, } } } From 567d5c60e6f0c2c50b9f4226d5f691c61e4ece07 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 11 Jan 2025 21:43:24 -0500 Subject: [PATCH 056/113] feat: add snmalloc support (#13771) --- Cargo.lock | 29 +++++++++++++++++++++++++++++ Cargo.toml | 2 ++ bin/reth/Cargo.toml | 6 ++++++ crates/cli/util/Cargo.toml | 11 +++++++++++ crates/cli/util/src/allocator.rs | 15 ++++++++++++++- 5 files changed, 62 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 1211d0466f9d7..be630932f91d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1859,6 +1859,15 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +[[package]] +name = "cmake" +version = "0.1.52" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +dependencies = [ + "cc", +] + [[package]] name = "codspeed" version = "2.7.2" @@ -6680,6 +6689,7 @@ dependencies = [ "reth-fs-util", "secp256k1", "serde", + "snmalloc-rs", "thiserror 2.0.9", "tikv-jemallocator", "tracy-client", @@ -10416,6 +10426,25 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" +[[package]] +name = "snmalloc-rs" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d43ff92911d7d9705d1c0203300a3edfd00d16c8b8b0c27c56f9407a3f31e7a6" +dependencies = [ + "snmalloc-sys", +] + +[[package]] +name = "snmalloc-sys" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "954e1f984860770475196be81a547ed1517d34fcb8a15cb87bdb37cff3353230" +dependencies = [ + "cc", + "cmake", +] + [[package]] name = "socket2" version = "0.5.8" diff --git a/Cargo.toml b/Cargo.toml index a49d3052a5578..0e6a39084b2e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -604,9 +604,11 @@ tempfile = "3.8" test-fuzz = "6" rstest = "0.23.0" +# allocators tikv-jemalloc-ctl = "0.6" tikv-jemallocator = "0.6" tracy-client = "0.17.3" +snmalloc-rs = { version = "0.3.7", features = ["build_cc"] } # [patch.crates-io] # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "5492e40" } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index f7bdfd8ceed20..8487a08ea9768 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -118,6 +118,12 @@ jemalloc-prof = [ ] tracy-allocator = ["reth-cli-util/tracy-allocator"] +# Because jemalloc is default and preferred over snmalloc when both features are +# enabled, `--no-default-features` should be used when enabling snmalloc or +# snmalloc-native. +snmalloc = ["reth-cli-util/snmalloc"] +snmalloc-native = ["reth-cli-util/snmalloc-native"] + min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] min-info-logs = ["tracing/release_max_level_info"] diff --git a/crates/cli/util/Cargo.toml b/crates/cli/util/Cargo.toml index 70515f83b4b74..b7f0f11ff214f 100644 --- a/crates/cli/util/Cargo.toml +++ b/crates/cli/util/Cargo.toml @@ -30,10 +30,21 @@ tracy-client = { workspace = true, optional = true, features = ["demangle"] } [target.'cfg(unix)'.dependencies] tikv-jemallocator = { workspace = true, optional = true } +snmalloc-rs = { workspace = true, optional = true } libc = "0.2" [features] jemalloc = ["dep:tikv-jemallocator"] + +# Enables jemalloc profiling features jemalloc-prof = ["jemalloc", "tikv-jemallocator?/profiling"] +# Wraps the selected allocator in the tracy profiling allocator tracy-allocator = ["dep:tracy-client"] + +snmalloc = ["dep:snmalloc-rs"] + +# Enables the snmalloc-rs `native-cpu` feature, which optimizes snmalloc for the +# native CPU of the host machine. Not sure why this feature is not derived from +# RUSTFLAGS or enabled when `target-cpu=native`. +snmalloc-native = ["snmalloc", "snmalloc-rs/native-cpu"] diff --git a/crates/cli/util/src/allocator.rs b/crates/cli/util/src/allocator.rs index ee13e7c61cb52..753c987324d72 100644 --- a/crates/cli/util/src/allocator.rs +++ b/crates/cli/util/src/allocator.rs @@ -1,14 +1,27 @@ //! Custom allocator implementation. +//! +//! We provide support for jemalloc and snmalloc on unix systems, and prefer jemalloc if both are +//! enabled. -// We use jemalloc for performance reasons. +// We provide jemalloc allocator support, alongside snmalloc. If both features are enabled, jemalloc +// is prioritized. cfg_if::cfg_if! { if #[cfg(all(feature = "jemalloc", unix))] { type AllocatorInner = tikv_jemallocator::Jemalloc; + } else if #[cfg(all(feature = "snmalloc", unix))] { + type AllocatorInner = snmalloc_rs::SnMalloc; } else { type AllocatorInner = std::alloc::System; } } +// This is to prevent clippy unused warnings when we do `--all-features` +cfg_if::cfg_if! { + if #[cfg(all(feature = "snmalloc", feature = "jemalloc", unix))] { + use snmalloc_rs as _; + } +} + cfg_if::cfg_if! { if #[cfg(feature = "tracy-allocator")] { type AllocatorWrapper = tracy_client::ProfiledAllocator; From 23526646cbe19ed8d743ba74e3bb984a8628d9e2 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Mon, 13 Jan 2025 12:09:02 +0700 Subject: [PATCH 057/113] perf(op-receipts): reuse `l1_block_info` for multiple receipts (#13781) --- crates/optimism/rpc/src/eth/block.rs | 9 +++++++-- crates/optimism/rpc/src/eth/receipt.rs | 16 ++++++++-------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index dc35db1e42a8a..b53fd0730d2b1 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -40,7 +40,7 @@ where let excess_blob_gas = block.excess_blob_gas(); let timestamp = block.timestamp(); - let l1_block_info = + let mut l1_block_info = reth_optimism_evm::extract_l1_info(block.body()).map_err(OpEthApiError::from)?; return block @@ -60,13 +60,18 @@ where timestamp, }; + // We must clear this cache as different L2 transactions can have different + // L1 costs. A potential improvement here is to only clear the cache if the + // new transaction input has changed, since otherwise the L1 cost wouldn't. + l1_block_info.clear_tx_l1_cost(); + Ok(OpReceiptBuilder::new( &self.inner.eth_api.provider().chain_spec(), tx, meta, receipt, &receipts, - l1_block_info.clone(), + &mut l1_block_info, )? .build()) }) diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index d83cb7a5e8448..2bb08e22edf8f 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -40,7 +40,7 @@ where meta.block_hash.into(), )))?; - let l1_block_info = + let mut l1_block_info = reth_optimism_evm::extract_l1_info(block.body()).map_err(OpEthApiError::from)?; Ok(OpReceiptBuilder::new( @@ -49,7 +49,7 @@ where meta, &receipt, &receipts, - l1_block_info, + &mut l1_block_info, )? .build()) } @@ -107,7 +107,7 @@ impl OpReceiptFieldsBuilder { mut self, chain_spec: &OpChainSpec, tx: &OpTransactionSigned, - mut l1_block_info: revm::L1BlockInfo, + l1_block_info: &mut revm::L1BlockInfo, ) -> Result { let raw_tx = tx.encoded_2718(); let timestamp = self.block_timestamp; @@ -199,7 +199,7 @@ impl OpReceiptBuilder { meta: TransactionMeta, receipt: &OpReceipt, all_receipts: &[OpReceipt], - l1_block_info: revm::L1BlockInfo, + l1_block_info: &mut revm::L1BlockInfo, ) -> Result { let timestamp = meta.timestamp; let core_receipt = @@ -298,14 +298,14 @@ mod test { ..Default::default() }; - let l1_block_info = + let mut l1_block_info = reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); // test assert!(OP_MAINNET.is_fjord_active_at_timestamp(BLOCK_124665056_TIMESTAMP)); let receipt_meta = OpReceiptFieldsBuilder::new(BLOCK_124665056_TIMESTAMP) - .l1_block_info(&OP_MAINNET, &tx_1, l1_block_info) + .l1_block_info(&OP_MAINNET, &tx_1, &mut l1_block_info) .expect("should parse revm l1 info") .build(); @@ -363,7 +363,7 @@ mod test { body: BlockBody { transactions: vec![tx_0], ..Default::default() }, ..Default::default() }; - let l1_block_info = + let mut l1_block_info = reth_optimism_evm::extract_l1_info(&block.body).expect("should extract l1 info"); // https://basescan.org/tx/0xf9420cbaf66a2dda75a015488d37262cbfd4abd0aad7bb2be8a63e14b1fa7a94 @@ -371,7 +371,7 @@ mod test { let tx_1 = OpTransactionSigned::decode_2718(&mut &tx[..]).unwrap(); let receipt_meta = OpReceiptFieldsBuilder::new(1730216981) - .l1_block_info(&BASE_MAINNET, &tx_1, l1_block_info) + .l1_block_info(&BASE_MAINNET, &tx_1, &mut l1_block_info) .expect("should parse revm l1 info") .build(); From 75ee913f0ba56f155b2cf65b87211ca5a07f7af0 Mon Sep 17 00:00:00 2001 From: Tien Dao <15717476+tiendn@users.noreply.github.com> Date: Mon, 13 Jan 2025 12:21:25 +0700 Subject: [PATCH 058/113] refactor: update receipt codec from `HackReceipt` to `OpGethReceipt` (#13738) --- book/cli/reth/import-receipts-op.md | 2 +- .../cli/src/commands/import_receipts.rs | 6 ++-- crates/optimism/cli/src/lib.rs | 6 ++-- crates/optimism/cli/src/receipt_file_codec.rs | 32 +++++++++---------- 4 files changed, 23 insertions(+), 23 deletions(-) diff --git a/book/cli/reth/import-receipts-op.md b/book/cli/reth/import-receipts-op.md index a5e1863cea207..0b7135e1d7a52 100644 --- a/book/cli/reth/import-receipts-op.md +++ b/book/cli/reth/import-receipts-op.md @@ -49,7 +49,7 @@ Database: [possible values: true, false] - The path to a receipts file for import. File must use `HackReceiptFileCodec` (used for + The path to a receipts file for import. File must use `OpGethReceiptFileCodec` (used for exporting OP chain segment below Bedrock block via testinprod/op-geth). diff --git a/crates/optimism/cli/src/commands/import_receipts.rs b/crates/optimism/cli/src/commands/import_receipts.rs index e564982cfd574..e6235de92406e 100644 --- a/crates/optimism/cli/src/commands/import_receipts.rs +++ b/crates/optimism/cli/src/commands/import_receipts.rs @@ -26,7 +26,7 @@ use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use tracing::{debug, info, trace, warn}; -use crate::receipt_file_codec::HackReceiptFileCodec; +use crate::receipt_file_codec::OpGethReceiptFileCodec; /// Initializes the database with the genesis block. #[derive(Debug, Parser)] @@ -38,7 +38,7 @@ pub struct ImportReceiptsOpCommand { #[arg(long, value_name = "CHUNK_LEN", verbatim_doc_comment)] chunk_len: Option, - /// The path to a receipts file for import. File must use `HackReceiptFileCodec` (used for + /// The path to a receipts file for import. File must use `OpGethReceiptFileCodec` (used for /// exporting OP chain segment below Bedrock block via testinprod/op-geth). /// /// @@ -161,7 +161,7 @@ where .expect("transaction static files must exist before importing receipts"); while let Some(file_client) = - reader.next_receipts_chunk::>>().await? + reader.next_receipts_chunk::>>().await? { if highest_block_receipts == highest_block_transactions { warn!(target: "reth::cli", highest_block_receipts, highest_block_transactions, "Ignoring all other blocks in the file since we have reached the desired height"); diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 4e1aa9469c148..5c3900a0e4838 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -16,14 +16,14 @@ pub mod chainspec; pub mod commands; /// Module with a codec for reading and encoding receipts in files. /// -/// Enables decoding and encoding `HackReceipt` type. See . +/// Enables decoding and encoding `OpGethReceipt` type. See . /// -/// Currently configured to use codec [`HackReceipt`](receipt_file_codec::HackReceipt) based on +/// Currently configured to use codec [`OpGethReceipt`](receipt_file_codec::OpGethReceipt) based on /// export of below Bedrock data using . Codec can /// be replaced with regular encoding of receipts for export. /// /// NOTE: receipts can be exported using regular op-geth encoding for `Receipt` type, to fit -/// reth's needs for importing. However, this would require patching the diff in to export the `Receipt` and not `HackReceipt` type (originally +/// reth's needs for importing. However, this would require patching the diff in to export the `Receipt` and not `OpGethReceipt` type (originally /// made for op-erigon's import needs). pub mod receipt_file_codec; diff --git a/crates/optimism/cli/src/receipt_file_codec.rs b/crates/optimism/cli/src/receipt_file_codec.rs index 96810543a72c5..e307b10ac07c9 100644 --- a/crates/optimism/cli/src/receipt_file_codec.rs +++ b/crates/optimism/cli/src/receipt_file_codec.rs @@ -26,17 +26,17 @@ use reth_downloaders::{file_client::FileClientError, receipt_file_client::Receip /// It's recommended to use [`with_capacity`](tokio_util::codec::FramedRead::with_capacity) to set /// the capacity of the framed reader to the size of the file. #[derive(Debug)] -pub struct HackReceiptFileCodec(core::marker::PhantomData); +pub struct OpGethReceiptFileCodec(core::marker::PhantomData); -impl Default for HackReceiptFileCodec { +impl Default for OpGethReceiptFileCodec { fn default() -> Self { Self(Default::default()) } } -impl Decoder for HackReceiptFileCodec +impl Decoder for OpGethReceiptFileCodec where - R: TryFrom>, + R: TryFrom>, { type Item = Option>; type Error = FileClientError; @@ -47,7 +47,7 @@ where } let buf_slice = &mut src.as_ref(); - let receipt = HackReceiptContainer::decode(buf_slice) + let receipt = OpGethReceiptContainer::decode(buf_slice) .map_err(|err| Self::Error::Rlp(err, src.to_vec()))? .0; src.advance(src.len() - buf_slice.len()); @@ -68,7 +68,7 @@ where /// See #[derive(Debug, PartialEq, Eq, RlpDecodable)] -pub struct HackReceipt { +pub struct OpGethReceipt { tx_type: u8, post_state: Bytes, status: u64, @@ -90,13 +90,13 @@ pub struct HackReceipt { #[derive(Debug, PartialEq, Eq, RlpDecodable)] #[rlp(trailing)] -struct HackReceiptContainer(Option); +struct OpGethReceiptContainer(Option); -impl TryFrom for Receipt { +impl TryFrom for Receipt { type Error = &'static str; - fn try_from(exported_receipt: HackReceipt) -> Result { - let HackReceipt { tx_type, status, cumulative_gas_used, logs, .. } = exported_receipt; + fn try_from(exported_receipt: OpGethReceipt) -> Result { + let OpGethReceipt { tx_type, status, cumulative_gas_used, logs, .. } = exported_receipt; #[allow(clippy::needless_update)] Ok(Self { @@ -109,10 +109,10 @@ impl TryFrom for Receipt { } } -impl TryFrom for OpReceipt { +impl TryFrom for OpReceipt { type Error = &'static str; - fn try_from(exported_receipt: HackReceipt) -> Result { + fn try_from(exported_receipt: OpGethReceipt) -> Result { let Receipt { tx_type, success, @@ -152,10 +152,10 @@ pub(crate) mod test { pub(crate) const HACK_RECEIPT_ENCODED_BLOCK_3: &[u8] = &hex!("f90271f9026e8080018301c60db9010000000000000000000000000000000000000000400000000000000000008000000000000000000000000000000000004000000000000000000000400004000000100000000000000000000000000000000000000000000000000000000000004000000000000000000000040000000000400080000400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000008100000000000000000000000000000000000004000000000000000000000000008000000000000000000010000000000000000000000000000400000000000000001000000000000000000000000002000f8faf89c948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff884a092e98423f8adac6e64d0608e519fd1cefb861498385c6dee70d58fc926ddc68ca000000000000000000000000000000000000000000000000000000000d101e54ba00000000000000000000000000000000000000000000000000000000000014218a0000000000000000000000000fa011d8d6c26f13abe2cefed38226e401b2b8a9980f85a948ce8c13d816fe6daf12d6fd9e4952e1fc88850aff842a0fe25c73e3b9089fac37d55c4c7efcba6f04af04cebd2fc4d6d7dbb07e1e5234fa000000000000000000000000000000000000000000000007ed8842f062774800080a08fab01dcec1da547e90a77597999e9153ff788fa6451d1cc942064427bd995019400000000000000000000000000000000000000008301c60da0da4509fe0ca03202ddbe4f68692c132d689ee098433691040ece18c3a45d44c50380018212c2821c2383312e35"); - fn hack_receipt_1() -> HackReceipt { + fn hack_receipt_1() -> OpGethReceipt { let receipt = receipt_block_1(); - HackReceipt { + OpGethReceipt { tx_type: receipt.receipt.tx_type as u8, post_state: Bytes::default(), status: receipt.receipt.success as u64, @@ -354,7 +354,7 @@ pub(crate) mod test { fn decode_hack_receipt() { let receipt = hack_receipt_1(); - let decoded = HackReceiptContainer::decode(&mut &HACK_RECEIPT_ENCODED_BLOCK_1[..]) + let decoded = OpGethReceiptContainer::decode(&mut &HACK_RECEIPT_ENCODED_BLOCK_1[..]) .unwrap() .0 .unwrap(); @@ -373,7 +373,7 @@ pub(crate) mod test { let encoded = &mut BytesMut::from(&receipt_1_to_3[..]); - let mut codec = HackReceiptFileCodec::default(); + let mut codec = OpGethReceiptFileCodec::default(); // test From 4e3810a17c787f1c75dbff5811146de15f3ed2f9 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 05:21:47 +0000 Subject: [PATCH 059/113] chore(deps): weekly `cargo update` (#13776) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 457 +++++++++++++++++++++++++++-------------------------- 1 file changed, 234 insertions(+), 223 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index be630932f91d8..03fd74c606641 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.53" +version = "0.1.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da226340862e036ab26336dc99ca85311c6b662267c1440e1733890fd688802c" +checksum = "1e39f295f876b61a1222d937e1dd31f965e4a1acc3bba98e448dd7e84b1a4566" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -170,7 +170,7 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -203,7 +203,7 @@ dependencies = [ "crc", "rand 0.8.5", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -291,7 +291,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", ] @@ -317,7 +317,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -345,7 +345,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", "url", ] @@ -415,7 +415,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", "url", @@ -460,7 +460,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -549,7 +549,7 @@ dependencies = [ "ethereum_ssz_derive", "serde", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -602,7 +602,7 @@ dependencies = [ "jsonrpsee-types", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -630,7 +630,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -668,7 +668,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -686,7 +686,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -700,7 +700,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -716,7 +716,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "syn-solidity", "tiny-keccak", ] @@ -732,7 +732,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "syn-solidity", ] @@ -771,7 +771,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tower 0.5.2", "tracing", @@ -938,7 +938,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1136,7 +1136,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1172,7 +1172,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1183,7 +1183,7 @@ checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1221,7 +1221,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1318,7 +1318,7 @@ version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -1327,7 +1327,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1353,9 +1353,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "1be3f42a67d6d345ecd59f675f3f012d6974981560836e938c22b424b85ce1be" dependencies = [ "arbitrary", "serde", @@ -1419,7 +1419,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c340fe0f0b267787095cbe35240c6786ff19da63ec7b69367ba338eace8169b" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "boa_interner", "boa_macros", "boa_string", @@ -1435,7 +1435,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f620c3f06f51e65c0504ddf04978be1b814ac6586f0b45f6019801ab5efd37f9" dependencies = [ "arrayvec", - "bitflags 2.6.0", + "bitflags 2.7.0", "boa_ast", "boa_gc", "boa_interner", @@ -1469,7 +1469,7 @@ dependencies = [ "static_assertions", "tap", "thin-vec", - "thiserror 2.0.9", + "thiserror 2.0.11", "time", ] @@ -1510,7 +1510,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "synstructure", ] @@ -1520,7 +1520,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cc142dac798cdc6e2dbccfddeb50f36d2523bb977a976e19bdb3ae19b740804" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "boa_ast", "boa_interner", "boa_macros", @@ -1632,7 +1632,7 @@ checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1720,9 +1720,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.7" +version = "1.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" +checksum = "ad0cf6e91fde44c773c6ee7ec6bba798504641a8bc2eb7e37a04ffbf4dfaa55a" dependencies = [ "jobserver", "libc", @@ -1821,9 +1821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.24" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9560b07a799281c7e0958b9296854d6fafd4c5f31444a7e5bb1ad6dde5ccf1bd" +checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" dependencies = [ "clap_builder", "clap_derive", @@ -1831,9 +1831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.24" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874e0dd3eb68bf99058751ac9712f622e61e6f393a94f7128fa26e3f02f5c7cd" +checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" dependencies = [ "anstream", "anstyle", @@ -1850,7 +1850,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2214,7 +2214,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "crossterm_winapi", "mio 1.0.3", "parking_lot", @@ -2326,7 +2326,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2350,7 +2350,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2361,7 +2361,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2470,7 +2470,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2491,7 +2491,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "unicode-xid", ] @@ -2605,7 +2605,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2686,7 +2686,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "walkdir", ] @@ -2751,7 +2751,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2762,7 +2762,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2782,7 +2782,7 @@ checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2838,7 +2838,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2862,7 +2862,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -2951,7 +2951,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", ] @@ -3408,7 +3408,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -3719,7 +3719,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "tinyvec", "tokio", "tracing", @@ -3743,7 +3743,7 @@ dependencies = [ "resolv-conf", "serde", "smallvec", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -4090,7 +4090,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -4147,7 +4147,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -4260,15 +4260,15 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "894813a444908c0c8c0e221b041771d107c4a21de1d317dc49bcc66e3c9e5b3f" +checksum = "0bf9fed6d91cfb734e7476a06bde8300a1b94e217e1b523b6f0cd1a01998c71d" dependencies = [ "darling", "indoc", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -4517,7 +4517,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -4719,7 +4719,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "libc", "redox_syscall", ] @@ -4790,9 +4790,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -4854,6 +4854,32 @@ dependencies = [ "libc", ] +[[package]] +name = "maili-protocol" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "428caa534dd054a449e64d8007d0fd0a15519d1033b272d37d02b74a29cf69f7" +dependencies = [ + "alloc-no-stdlib", + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "async-trait", + "brotli", + "derive_more", + "miniz_oxide", + "op-alloy-consensus", + "op-alloy-genesis", + "rand 0.8.5", + "serde", + "thiserror 2.0.11", + "tracing", + "unsigned-varint", +] + [[package]] name = "match_cfg" version = "0.1.0" @@ -4912,7 +4938,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -5151,7 +5177,7 @@ version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "filetime", "fsevent-sys", "inotify", @@ -5302,7 +5328,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -5355,9 +5381,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "250244eadaf1a25e0e2ad263110ad2d1b43c2e57ddf4c025e71552d98196a8d3" +checksum = "442518bf0ef88f4d79409527565b8cdee235c891f2e2a829497caec5ed9d8d1c" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5368,14 +5394,14 @@ dependencies = [ "derive_more", "serde", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] name = "op-alloy-genesis" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98334a9cdccc5878e9d5c48afc9cc1b84da58dbc68d41f9488d8f71688b495d3" +checksum = "8a2af7fee1fa297569199b524493e50355eab3f1bff75cef492036eb4a3ffb5e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5383,14 +5409,14 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] name = "op-alloy-network" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd588157ac14db601d6497b81ae738b2581c60886fc592976fab6c282619604" +checksum = "d9e7e9fc656dfa8cc3b6e799da23e100b3d47e31ec6b5a4ed9d44e11f0967ad8" dependencies = [ "alloy-consensus", "alloy-network", @@ -5403,47 +5429,32 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "753762429c31f838b59c886b31456c9bf02fd38fb890621665523a9087ae06ae" +checksum = "5a144b1ed079913b11c0640f4eaa3d2ac1bdb6cc35e3658a1640e88b241e0c32" dependencies = [ - "alloc-no-stdlib", - "alloy-consensus", - "alloy-eips", - "alloy-primitives", - "alloy-rlp", - "alloy-serde", - "alloy-sol-types", - "async-trait", - "brotli", - "derive_more", - "miniz_oxide", - "op-alloy-consensus", - "op-alloy-genesis", - "serde", - "thiserror 2.0.9", - "tracing", - "unsigned-varint", + "maili-protocol", ] [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f483fb052ef807682ae5b5729c3a61a092ee4f7334e6e6055de67e9f28ef880" +checksum = "0ff030fa1051bb38a0b526727aec511c0172d6f074a0d63cfedf522b11cc8b09" dependencies = [ "alloy-eips", "alloy-primitives", "jsonrpsee", + "op-alloy-protocol", "op-alloy-rpc-types", "op-alloy-rpc-types-engine", ] [[package]] name = "op-alloy-rpc-types" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1d3872021aa28b10fc6cf8252e792e802d89e8b2cdaa57dcb9243c461b286" +checksum = "50223d61cad040db6721bcc2d489c924c1691ce3f5e674d4d8776131dab786a0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5460,9 +5471,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43f00d4060a6a38f5bf0a8182b4cc4c7071e2bc96942f414619251b522169eb" +checksum = "f5e2419373bae23ea3f6cf5a49c624d9b644061e2e929d4f9629cbcbffa4964d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5472,7 +5483,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-protocol", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -5646,7 +5657,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.9", + "thiserror 2.0.11", "ucd-trie", ] @@ -5690,7 +5701,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -5719,7 +5730,7 @@ checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -5866,7 +5877,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "483f8c21f64f3ea09fe0f30f5d48c3e8eefe5dac9129f0075f76593b4c1da705" dependencies = [ "proc-macro2", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -5917,14 +5928,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -5935,7 +5946,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "chrono", "flate2", "hex", @@ -5950,7 +5961,7 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc5b72d8145275d844d4b5f6d4e1eef00c8cd889edb6035c21675d1bb1f45c9f" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "hex", "procfs-core 0.17.0", "rustix", @@ -5962,7 +5973,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "chrono", "hex", ] @@ -5973,7 +5984,7 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "hex", ] @@ -5985,7 +5996,7 @@ checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.6.0", + "bitflags 2.7.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -6015,7 +6026,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -6070,7 +6081,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -6089,7 +6100,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.9", + "thiserror 2.0.11", "tinyvec", "tracing", "web-time", @@ -6220,7 +6231,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdef7f9be5c0122f890d58bdf4d964349ba6a6161f705907526d891efabba57d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "cassowary", "compact_str", "crossterm", @@ -6241,7 +6252,7 @@ version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", ] [[package]] @@ -6276,7 +6287,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", ] [[package]] @@ -6532,7 +6543,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tower 0.4.13", "tracing", @@ -6690,7 +6701,7 @@ dependencies = [ "secp256k1", "serde", "snmalloc-rs", - "thiserror 2.0.9", + "thiserror 2.0.11", "tikv-jemallocator", "tracy-client", ] @@ -6726,7 +6737,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -6832,7 +6843,7 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -6890,7 +6901,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", ] @@ -6932,7 +6943,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tracing", @@ -6957,7 +6968,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -6984,7 +6995,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tracing", @@ -7022,7 +7033,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tokio-util", @@ -7098,7 +7109,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tokio-util", @@ -7152,7 +7163,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", ] @@ -7179,7 +7190,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", ] @@ -7238,7 +7249,7 @@ dependencies = [ "reth-trie-sparse", "revm-primitives", "schnellru", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -7283,7 +7294,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -7316,7 +7327,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tokio-util", @@ -7345,7 +7356,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -7532,7 +7543,7 @@ dependencies = [ "reth-prune-types", "reth-storage-errors", "revm-primitives", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -7626,7 +7637,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", ] @@ -7653,7 +7664,7 @@ version = "1.1.5" dependencies = [ "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -7696,7 +7707,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tokio-util", @@ -7708,7 +7719,7 @@ dependencies = [ name = "reth-libmdbx" version = "1.1.5" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "byteorder", "codspeed-criterion-compat", "dashmap", @@ -7721,7 +7732,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", ] @@ -7760,7 +7771,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -7820,7 +7831,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tokio-util", @@ -7845,7 +7856,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", ] @@ -7883,7 +7894,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "url", ] @@ -7914,7 +7925,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", "zstd", ] @@ -8044,7 +8055,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "toml", "tracing", @@ -8176,7 +8187,7 @@ dependencies = [ "reth-optimism-forks", "reth-primitives-traits", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -8272,7 +8283,7 @@ dependencies = [ "reth-revm", "revm", "revm-primitives", - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", ] @@ -8378,7 +8389,7 @@ dependencies = [ "reth-transaction-pool", "revm", "sha2 0.10.8", - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", ] @@ -8449,7 +8460,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -8515,7 +8526,7 @@ dependencies = [ "reth-primitives", "revm-primitives", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", ] @@ -8614,7 +8625,7 @@ dependencies = [ "serde_json", "serde_with", "test-fuzz", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -8693,7 +8704,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.1.0", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -8713,7 +8724,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 2.0.9", + "thiserror 2.0.11", "toml", ] @@ -8798,7 +8809,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tower 0.4.13", @@ -8890,7 +8901,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-util", "tower 0.4.13", @@ -8929,7 +8940,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -9013,7 +9024,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tracing", @@ -9113,7 +9124,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", ] @@ -9141,7 +9152,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tracing", @@ -9235,7 +9246,7 @@ dependencies = [ "reth-fs-util", "reth-primitives-traits", "reth-static-file-types", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -9249,7 +9260,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tracing", "tracing-futures", @@ -9303,7 +9314,7 @@ dependencies = [ "aquamarine", "assert_matches", "auto_impl", - "bitflags 2.6.0", + "bitflags 2.7.0", "codspeed-criterion-compat", "futures-util", "metrics", @@ -9334,7 +9345,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "tokio-stream", "tracing", @@ -9449,7 +9460,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", ] @@ -9474,7 +9485,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "smallvec", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -9516,7 +9527,7 @@ dependencies = [ "colorchoice", "revm", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] @@ -9559,7 +9570,7 @@ dependencies = [ "alloy-eip7702", "alloy-primitives", "auto_impl", - "bitflags 2.6.0", + "bitflags 2.7.0", "bitvec", "c-kzg", "cfg-if", @@ -9661,9 +9672,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.9" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41589aba99537475bf697f2118357cad1c31590c5a1b9f6d9fc4ad6d07503661" +checksum = "a652edd001c53df0b3f96a36a8dc93fce6866988efc16808235653c6bcac8bf2" dependencies = [ "bytemuck", "byteorder", @@ -9710,7 +9721,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.95", + "syn 2.0.96", "unicode-ident", ] @@ -9798,7 +9809,7 @@ version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "errno", "libc", "linux-raw-sys", @@ -9807,9 +9818,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.20" +version = "0.23.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" +checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" dependencies = [ "log", "once_cell", @@ -10028,7 +10039,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -10042,7 +10053,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.7.0", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -10115,7 +10126,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10150,7 +10161,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10201,7 +10212,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10234,7 +10245,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10536,7 +10547,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10560,9 +10571,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.12.4" +version = "12.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd33e73f154e36ec223c18013f7064a2c120f1162fc086ac9933542def186b00" +checksum = "bf08b42a6f9469bd8584daee39a1352c8133ccabc5151ccccb15896ef047d99a" dependencies = [ "debugid", "memmap2", @@ -10572,9 +10583,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.4" +version = "12.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e51191290147f071777e37fe111800bb82a9059f9c95b19d2dd41bfeddf477" +checksum = "32f73b5a5bd4da72720c45756a2d11edf110116b87f998bda59b97be8c2c7cf1" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10594,9 +10605,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.95" +version = "2.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" dependencies = [ "proc-macro2", "quote", @@ -10612,7 +10623,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10632,7 +10643,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10710,7 +10721,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10743,11 +10754,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.9" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.9", + "thiserror-impl 2.0.11", ] [[package]] @@ -10758,18 +10769,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] name = "thiserror-impl" -version = "2.0.9" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -10902,9 +10913,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -10920,13 +10931,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -11060,7 +11071,7 @@ checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", "base64 0.22.1", - "bitflags 2.6.0", + "bitflags 2.7.0", "bytes", "futures-core", "futures-util", @@ -11127,7 +11138,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -11422,9 +11433,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +checksum = "b913a3b5fe84142e269d63cc62b64319ccaf89b748fc31fe025177f767a756c4" dependencies = [ "getrandom 0.2.15", ] @@ -11463,7 +11474,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -11533,7 +11544,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "wasm-bindgen-shared", ] @@ -11568,7 +11579,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11734,7 +11745,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -11745,7 +11756,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -11756,7 +11767,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -11767,7 +11778,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -11959,9 +11970,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.22" +version = "0.6.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" +checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" dependencies = [ "memchr", ] @@ -12042,7 +12053,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "synstructure", ] @@ -12064,7 +12075,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -12084,7 +12095,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "synstructure", ] @@ -12105,7 +12116,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -12127,7 +12138,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] From ac25fd8c182f9fe28c8610981d791989ffcd9e9c Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Mon, 13 Jan 2025 11:40:46 +0100 Subject: [PATCH 060/113] feat(engine): validate execution requests (#13685) --- Cargo.lock | 1 + crates/payload/primitives/Cargo.toml | 3 + crates/payload/primitives/src/lib.rs | 77 ++++++++++++++++++++- crates/rpc/rpc-engine-api/src/engine_api.rs | 6 +- 4 files changed, 84 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 03fd74c606641..06bca5a49bbb0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8519,6 +8519,7 @@ dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rpc-types-engine", + "assert_matches", "op-alloy-rpc-types-engine", "reth-chain-state", "reth-chainspec", diff --git a/crates/payload/primitives/Cargo.toml b/crates/payload/primitives/Cargo.toml index d4070b4688e0f..caeb538e1e2d8 100644 --- a/crates/payload/primitives/Cargo.toml +++ b/crates/payload/primitives/Cargo.toml @@ -31,5 +31,8 @@ serde.workspace = true thiserror.workspace = true tokio = { workspace = true, default-features = false, features = ["sync"] } +[dev-dependencies] +assert_matches.workspace = true + [features] op = ["dep:op-alloy-rpc-types-engine"] \ No newline at end of file diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 523e6fb057a63..eaafaf9959e67 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -8,6 +8,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +use alloy_primitives::Bytes; +use reth_chainspec::EthereumHardforks; + mod error; pub use error::{ EngineObjectValidationError, InvalidPayloadAttributesError, PayloadBuilderError, @@ -24,7 +27,6 @@ pub use traits::{ mod payload; pub use payload::PayloadOrAttributes; -use reth_chainspec::EthereumHardforks; /// The types that are used by the engine API. pub trait PayloadTypes: Send + Sync + Unpin + core::fmt::Debug + Clone + 'static { /// The built payload type. @@ -363,12 +365,85 @@ pub enum PayloadKind { WaitForPending, } +/// Validates that execution requests are valid according to Engine API specification. +/// +/// `executionRequests`: `Array of DATA` - List of execution layer triggered requests. Each list +/// element is a `requests` byte array as defined by [EIP-7685](https://eips.ethereum.org/EIPS/eip-7685). +/// The first byte of each element is the `request_type` and the remaining bytes are the +/// `request_data`. Elements of the list **MUST** be ordered by `request_type` in ascending order. +/// Elements with empty `request_data` **MUST** be excluded from the list. If any element is out of +/// order or has a length of 1-byte or shorter, client software **MUST** return `-32602: Invalid +/// params` error. +pub fn validate_execution_requests(requests: &[Bytes]) -> Result<(), EngineObjectValidationError> { + let mut last_request_type = None; + for request in requests { + if request.len() <= 1 { + return Err(EngineObjectValidationError::InvalidParams( + "empty execution request".to_string().into(), + )) + } + + let request_type = request[0]; + if Some(request_type) < last_request_type { + return Err(EngineObjectValidationError::InvalidParams( + "execution requests out of order".to_string().into(), + )) + } + + last_request_type = Some(request_type); + } + Ok(()) +} + #[cfg(test)] mod tests { use super::*; + use assert_matches::assert_matches; #[test] fn version_ord() { assert!(EngineApiMessageVersion::V4 > EngineApiMessageVersion::V3); } + + #[test] + fn execution_requests_validation() { + assert_matches!(validate_execution_requests(&[]), Ok(())); + + let valid_requests = [ + Bytes::from_iter([1, 2]), + Bytes::from_iter([2, 3]), + Bytes::from_iter([3, 4]), + Bytes::from_iter([4, 5]), + ]; + assert_matches!(validate_execution_requests(&valid_requests), Ok(())); + + let requests_with_empty = [ + Bytes::from_iter([1, 2]), + Bytes::from_iter([2, 3]), + Bytes::new(), + Bytes::from_iter([3, 4]), + ]; + assert_matches!( + validate_execution_requests(&requests_with_empty), + Err(EngineObjectValidationError::InvalidParams(_)) + ); + + let mut requests_valid_reversed = valid_requests; + requests_valid_reversed.reverse(); + assert_matches!( + validate_execution_requests(&requests_with_empty), + Err(EngineObjectValidationError::InvalidParams(_)) + ); + + let requests_out_of_order = [ + Bytes::from_iter([1, 2]), + Bytes::from_iter([2, 3]), + Bytes::from_iter([4, 5]), + Bytes::from_iter([3, 4]), + ]; + assert_matches!( + validate_execution_requests(&requests_out_of_order), + Err(EngineObjectValidationError::InvalidParams(_)) + ); + } } diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index fa3fba285745e..c3ed8dc5add99 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -20,8 +20,8 @@ use reth_chainspec::{EthereumHardfork, EthereumHardforks}; use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes, EngineValidator}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ - validate_payload_timestamp, EngineApiMessageVersion, PayloadBuilderAttributes, - PayloadOrAttributes, + validate_execution_requests, validate_payload_timestamp, EngineApiMessageVersion, + PayloadBuilderAttributes, PayloadOrAttributes, }; use reth_rpc_api::EngineApiServer; use reth_rpc_types_compat::engine::payload::convert_to_payload_body_v1; @@ -268,6 +268,8 @@ where .validator .validate_version_specific_fields(EngineApiMessageVersion::V4, payload_or_attrs)?; + validate_execution_requests(&execution_requests)?; + Ok(self .inner .beacon_consensus From 749facc477aaf3611f991973eeaaea00a5889fd8 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 13 Jan 2025 19:25:37 +0400 Subject: [PATCH 061/113] feat: add `evm_for_block` helper to simplify EVM setup (#13787) --- .../engine/invalid-block-hooks/src/witness.rs | 20 +---- crates/engine/util/src/reorg.rs | 17 ++-- crates/ethereum/evm/src/execute.rs | 29 +------ crates/evm/src/lib.rs | 80 ++++++++++++++++++- crates/optimism/evm/src/execute.rs | 28 ++----- .../custom-beacon-withdrawals/src/main.rs | 32 ++------ 6 files changed, 103 insertions(+), 103 deletions(-) diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index a0c986e4384ab..b7a06951fb47c 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -6,15 +6,14 @@ use pretty_assertions::Comparison; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_engine_primitives::InvalidBlockHook; use reth_evm::{ - env::EvmEnv, state_change::post_block_balance_increments, system_calls::SystemCaller, - ConfigureEvm, + state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; use reth_primitives_traits::SignedTransaction; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ - database::StateProviderDatabase, db::states::bundle_state::BundleRetention, - primitives::EnvWithHandlerCfg, DatabaseCommit, StateBuilder, + database::StateProviderDatabase, db::states::bundle_state::BundleRetention, DatabaseCommit, + StateBuilder, }; use reth_rpc_api::DebugApiClient; use reth_tracing::tracing::warn; @@ -77,19 +76,8 @@ where .with_bundle_update() .build(); - // Setup environment for the execution. - let EvmEnv { cfg_env_with_handler_cfg, block_env } = - self.evm_config.cfg_and_block_env(block.header()); - // Setup EVM - let mut evm = self.evm_config.evm_with_env( - &mut db, - EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg, - block_env, - Default::default(), - ), - ); + let mut evm = self.evm_config.evm_for_block(&mut db, block.header()); let mut system_caller = SystemCaller::new(self.evm_config.clone(), self.provider.chain_spec()); diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 073f83545ab69..fc38f15be2b68 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -14,8 +14,8 @@ use reth_engine_primitives::{ use reth_errors::{BlockExecutionError, BlockValidationError, RethError, RethResult}; use reth_ethereum_forks::EthereumHardforks; use reth_evm::{ - env::EvmEnv, state_change::post_block_withdrawals_balance_increments, - system_calls::SystemCaller, ConfigureEvm, + state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, + ConfigureEvm, }; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ @@ -29,7 +29,7 @@ use reth_revm::{ DatabaseCommit, }; use reth_rpc_types_compat::engine::payload::block_to_payload; -use revm_primitives::{EVMError, EnvWithHandlerCfg}; +use revm_primitives::EVMError; use std::{ collections::VecDeque, future::Future, @@ -297,15 +297,8 @@ where .with_bundle_update() .build(); - // Configure environments - let EvmEnv { cfg_env_with_handler_cfg, block_env } = - evm_config.cfg_and_block_env(&reorg_target.header); - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg, - block_env, - Default::default(), - ); - let mut evm = evm_config.evm_with_env(&mut state, env); + // Configure EVM + let mut evm = evm_config.evm_for_block(&mut state, &reorg_target.header); // apply eip-4788 pre block contract call let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 3d8fed3b5cc01..0d793fc8a8b43 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -12,7 +12,6 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; use reth_consensus::ConsensusError; use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ - env::EvmEnv, execute::{ balance_increment_state, BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, @@ -26,7 +25,7 @@ use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, - EnvWithHandlerCfg, ResultAndState, + ResultAndState, }; /// Factory for [`EthExecutionStrategy`]. @@ -113,23 +112,6 @@ where } } -impl EthExecutionStrategy -where - DB: Database + Display>, - EvmConfig: ConfigureEvm, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// # Caution - /// - /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &EvmConfig::Header) -> EnvWithHandlerCfg { - let EvmEnv { cfg_env_with_handler_cfg, block_env } = - self.evm_config.cfg_and_block_env(header); - EnvWithHandlerCfg::new_with_cfg_env(cfg_env_with_handler_cfg, block_env, Default::default()) - } -} - impl BlockExecutionStrategy for EthExecutionStrategy where DB: Database + Display>, @@ -153,8 +135,7 @@ where (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); self.state.set_state_clear_flag(state_clear_flag); - let env = self.evm_env_for_block(&block.header); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); self.system_caller.apply_pre_execution_changes(&block.header, &mut evm)?; @@ -165,8 +146,7 @@ where &mut self, block: &BlockWithSenders, ) -> Result, Self::Error> { - let env = self.evm_env_for_block(&block.header); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); let mut cumulative_gas_used = 0; let mut receipts = Vec::with_capacity(block.body.transactions.len()); @@ -227,8 +207,7 @@ where block: &BlockWithSenders, receipts: &[Receipt], ) -> Result { - let env = self.evm_env_for_block(&block.header); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { // Collect all EIP-6110 deposits diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index eff95cea696c0..2eb5fc04c4132 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -18,6 +18,7 @@ extern crate alloc; use crate::builder::RethEvmBuilder; +use alloc::boxed::Box; use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; use reth_primitives_traits::{BlockHeader, SignedTransaction}; @@ -41,7 +42,6 @@ pub mod system_calls; pub mod test_utils; /// Trait for configuring the EVM for executing full blocks. -#[auto_impl::auto_impl(&, Arc)] pub trait ConfigureEvm: ConfigureEvmEnv { /// Associated type for the default external context that should be configured for the EVM. type DefaultExternalContext<'a>; @@ -70,6 +70,31 @@ pub trait ConfigureEvm: ConfigureEvmEnv { evm } + /// Returns a new EVM with the given database configured with `cfg` and `block_env` + /// configuration derived from the given header. Relies on + /// [`ConfigureEvmEnv::cfg_and_block_env`]. + /// + /// # Caution + /// + /// This does not initialize the tx environment. + fn evm_for_block( + &self, + db: DB, + header: &Self::Header, + ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + let EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { cfg_env, handler_cfg }, + block_env, + } = self.cfg_and_block_env(header); + self.evm_with_env( + db, + EnvWithHandlerCfg { + env: Box::new(Env { cfg: cfg_env, block: block_env, tx: Default::default() }), + handler_cfg, + }, + ) + } + /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// @@ -109,6 +134,59 @@ pub trait ConfigureEvm: ConfigureEvmEnv { fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a>; } +impl<'b, T> ConfigureEvm for &'b T +where + T: ConfigureEvm, + &'b T: ConfigureEvmEnv

, +{ + type DefaultExternalContext<'a> = T::DefaultExternalContext<'a>; + + fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> { + (*self).default_external_context() + } + + fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + (*self).evm(db) + } + + fn evm_for_block( + &self, + db: DB, + header: &Self::Header, + ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + (*self).evm_for_block(db, header) + } + + fn evm_with_env( + &self, + db: DB, + env: EnvWithHandlerCfg, + ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + (*self).evm_with_env(db, env) + } + + fn evm_with_env_and_inspector( + &self, + db: DB, + env: EnvWithHandlerCfg, + inspector: I, + ) -> Evm<'_, I, DB> + where + DB: Database, + I: GetInspector, + { + (*self).evm_with_env_and_inspector(db, env, inspector) + } + + fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + where + DB: Database, + I: GetInspector, + { + (*self).evm_with_inspector(db, inspector) + } +} + /// This represents the set of methods used to configure the EVM's environment before block /// execution. /// diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index ac5d750626567..97aa1592dc8bc 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -2,14 +2,13 @@ use crate::{l1::ensure_create2_deployer, OpBlockExecutionError, OpEvmConfig}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::{Eip658Value, Header, Receipt, Transaction as _}; +use alloy_consensus::{Eip658Value, Receipt, Transaction as _}; use alloy_eips::eip7685::Requests; use core::fmt::Display; use op_alloy_consensus::{OpDepositReceipt, OpTxType}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ - env::EvmEnv, execute::{ balance_increment_state, BasicBlockExecutorProvider, BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, BlockValidationError, ExecuteOutput, @@ -26,7 +25,7 @@ use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt, OpTransactionSi use reth_primitives::BlockWithSenders; use reth_primitives_traits::SignedTransaction; use reth_revm::{Database, State}; -use revm_primitives::{db::DatabaseCommit, EnvWithHandlerCfg, ResultAndState}; +use revm_primitives::{db::DatabaseCommit, ResultAndState}; use tracing::trace; /// Factory for [`OpExecutionStrategy`]. @@ -104,21 +103,6 @@ where } } -impl OpExecutionStrategy -where - DB: Database + Display>, - EvmConfig: ConfigureEvm
, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// Caution: this does not initialize the tx environment. - fn evm_env_for_block(&self, header: &Header) -> EnvWithHandlerCfg { - let evm_env = self.evm_config.cfg_and_block_env(header); - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; - EnvWithHandlerCfg::new_with_cfg_env(cfg_env_with_handler_cfg, block_env, Default::default()) - } -} - impl BlockExecutionStrategy for OpExecutionStrategy where DB: Database + Display>, @@ -141,8 +125,7 @@ where (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); self.state.set_state_clear_flag(state_clear_flag); - let env = self.evm_env_for_block(&block.header); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); self.system_caller.apply_beacon_root_contract_call( block.timestamp, @@ -165,8 +148,7 @@ where &mut self, block: &BlockWithSenders, ) -> Result, Self::Error> { - let env = self.evm_env_for_block(&block.header); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); let is_regolith = self.chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(block.timestamp); @@ -318,7 +300,7 @@ impl OpExecutorProvider { mod tests { use super::*; use crate::OpChainSpec; - use alloy_consensus::TxEip1559; + use alloy_consensus::{Header, TxEip1559}; use alloy_primitives::{ b256, Address, PrimitiveSignature as Signature, StorageKey, StorageValue, U256, }; diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index f484b082be7a3..7d20b298f7caf 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -9,23 +9,20 @@ use alloy_sol_types::SolCall; #[cfg(feature = "optimism")] use reth::revm::primitives::OptimismFields; use reth::{ - api::{ConfigureEvm, ConfigureEvmEnv, NodeTypesWithEngine}, + api::{ConfigureEvm, NodeTypesWithEngine}, builder::{components::ExecutorBuilder, BuilderContext, FullNodeTypes}, cli::Cli, providers::ProviderError, revm::{ interpreter::Host, - primitives::{address, Address, Bytes, Env, EnvWithHandlerCfg, TransactTo, TxEnv, U256}, + primitives::{address, Address, Bytes, Env, TransactTo, TxEnv, U256}, Database, DatabaseCommit, Evm, State, }, }; use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_evm::{ - env::EvmEnv, - execute::{ - BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, ExecuteOutput, - InternalBlockExecutionError, - }, +use reth_evm::execute::{ + BlockExecutionError, BlockExecutionStrategy, BlockExecutionStrategyFactory, ExecuteOutput, + InternalBlockExecutionError, }; use reth_evm_ethereum::EthEvmConfig; use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; @@ -120,22 +117,6 @@ where state: State, } -impl CustomExecutorStrategy -where - DB: Database + Display>, -{ - /// Configures a new evm configuration and block environment for the given block. - /// - /// # Caution - /// - /// This does not initialize the tx environment. - fn evm_env_for_block(&self, header: &alloy_consensus::Header) -> EnvWithHandlerCfg { - let evm_env = self.evm_config.cfg_and_block_env(header); - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; - EnvWithHandlerCfg::new_with_cfg_env(cfg_env_with_handler_cfg, block_env, Default::default()) - } -} - impl BlockExecutionStrategy for CustomExecutorStrategy where DB: Database + Display>, @@ -165,8 +146,7 @@ where block: &BlockWithSenders, _receipts: &[Receipt], ) -> Result { - let env = self.evm_env_for_block(&block.header); - let mut evm = self.evm_config.evm_with_env(&mut self.state, env); + let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); if let Some(withdrawals) = block.body.withdrawals.as_ref() { apply_withdrawals_contract_call(withdrawals, &mut evm)?; From 6f7c445742b5ad375ddbe485183d4725d4305362 Mon Sep 17 00:00:00 2001 From: int88 <106391185+int88@users.noreply.github.com> Date: Mon, 13 Jan 2025 23:56:19 +0800 Subject: [PATCH 062/113] chore: remove unused p2p error type (#13785) --- crates/net/eth-wire/src/errors/p2p.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/net/eth-wire/src/errors/p2p.rs b/crates/net/eth-wire/src/errors/p2p.rs index f24e2cebc7846..c77816b48b102 100644 --- a/crates/net/eth-wire/src/errors/p2p.rs +++ b/crates/net/eth-wire/src/errors/p2p.rs @@ -63,10 +63,6 @@ pub enum P2PStreamError { #[error("mismatched protocol version in Hello message: {0}")] MismatchedProtocolVersion(GotExpected), - /// Ping started before the handshake completed. - #[error("started ping task before the handshake completed")] - PingBeforeHandshake, - /// Too many messages buffered before sending. #[error("too many messages buffered before sending")] SendBufferFull, From 6ac5785b35537829931cc1298b38dbf6d834acff Mon Sep 17 00:00:00 2001 From: Ocheretovich Date: Mon, 13 Jan 2025 18:29:24 +0200 Subject: [PATCH 063/113] docs: edited the "deny" shield link. (#13779) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8a6b8ddb42fbb..54d9f98e7a351 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ | [Crate Docs](https://reth.rs/docs) [gh-ci]: https://github.com/paradigmxyz/reth/actions/workflows/unit.yml -[gh-deny]: https://github.com/paradigmxyz/reth/actions/workflows/deny.yml +[gh-deny]: https://github.com/paradigmxyz/reth/actions/workflows/lint.yml [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https%3A%2F%2Ftg.sumanjay.workers.dev%2Fparadigm%5Freth ## What is Reth? From d761ac42f5e15c46c00db90ca1b5b6f7f62a4f7c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 13 Jan 2025 17:24:37 +0000 Subject: [PATCH 064/113] feat(trie): blinded providers trace logs (#13786) --- crates/trie/trie/src/proof/blinded.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs index c8d0f3bb5a22c..57d0de97fbe7d 100644 --- a/crates/trie/trie/src/proof/blinded.rs +++ b/crates/trie/trie/src/proof/blinded.rs @@ -8,6 +8,7 @@ use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_trie_common::{prefix_set::TriePrefixSetsMut, Nibbles}; use reth_trie_sparse::blinded::{pad_path_to_key, BlindedProvider, BlindedProviderFactory}; use std::sync::Arc; +use tracing::trace; /// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. #[derive(Debug)] @@ -91,8 +92,10 @@ where .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) .multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + let node = proof.account_subtree.into_inner().remove(path); - Ok(proof.account_subtree.into_inner().remove(path)) + trace!(target: "trie::proof::blinded", ?path, ?node, "Blinded node for account trie"); + Ok(node) } } @@ -138,7 +141,9 @@ where .with_prefix_set_mut(storage_prefix_set) .storage_multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; + let node = proof.subtree.into_inner().remove(path); - Ok(proof.subtree.into_inner().remove(path)) + trace!(target: "trie::proof::blinded", account = ?self.account, ?path, ?node, "Blinded node for storage trie"); + Ok(node) } } From df00877b708749b5f8fa53ac178f40d330dd04bd Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Mon, 13 Jan 2025 21:54:53 +0000 Subject: [PATCH 065/113] fix(cmd): initialize `StaticFileProducer` with config `PruneModes` unwind command (#13791) --- crates/cli/commands/src/stage/unwind.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index e5ff560559777..7171a45bb23ca 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -17,7 +17,6 @@ use reth_provider::{ providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, ChainStateBlockReader, ChainStateBlockWriter, ProviderFactory, StaticFileProviderFactory, StorageLocation, }; -use reth_prune::PruneModes; use reth_stages::{ sets::{DefaultStages, OfflineStages}, stages::ExecutionStage, @@ -120,7 +119,7 @@ impl> Command let builder = if self.offline { Pipeline::::builder().add_stages( - OfflineStages::new(executor, config.stages, PruneModes::default()) + OfflineStages::new(executor, config.stages, prune_modes.clone()) .builder() .disable(reth_stages::StageId::SenderRecovery), ) @@ -145,7 +144,7 @@ impl> Command max_duration: None, }, stage_conf.execution_external_clean_threshold(), - prune_modes, + prune_modes.clone(), ExExManagerHandle::empty(), )), ) @@ -153,7 +152,7 @@ impl> Command let pipeline = builder.build( provider_factory.clone(), - StaticFileProducer::new(provider_factory, PruneModes::default()), + StaticFileProducer::new(provider_factory, prune_modes), ); Ok(pipeline) } From feccf3595b630463e2a0f2cce41fc011114f5d8b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 14 Jan 2025 14:25:35 +0100 Subject: [PATCH 066/113] chore: bump revm 19.3 (#13793) --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06bca5a49bbb0..b4d1043148b2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9498,9 +9498,9 @@ dependencies = [ [[package]] name = "revm" -version = "19.2.0" +version = "19.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b829dc9d6e62c5a540dfdceb0c4d2217e445bf5f6f5ed3866817e7a9637c019" +checksum = "0a5a57589c308880c0f89ebf68d92aeef0d51e1ed88867474f895f6fd0f25c64" dependencies = [ "auto_impl", "cfg-if", @@ -9533,9 +9533,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ff76b50b5a9fa861fbc236fc82ce1afdf58861f65012aea807d679e54630d6" +checksum = "c0f632e761f171fb2f6ace8d1552a5793e0350578d4acec3e79ade1489f4c2a6" dependencies = [ "revm-primitives", "serde", diff --git a/Cargo.toml b/Cargo.toml index 0e6a39084b2e7..4be2f7fb73c36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -418,7 +418,7 @@ reth-trie-sparse = { path = "crates/trie/sparse" } reth-zstd-compressors = { path = "crates/storage/zstd-compressors", default-features = false } # revm -revm = { version = "19.2.0", default-features = false } +revm = { version = "19.3.0", default-features = false } revm-primitives = { version = "15.1.0", default-features = false } revm-interpreter = { version = "15.0.0", default-features = false } revm-inspectors = "0.14.1" From b4610a04e6a1ceecbeacce92da5e14ea43476e57 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 14 Jan 2025 17:11:18 +0400 Subject: [PATCH 067/113] feat: add receipt builder for `OpExecutionStrategy` (#13792) --- Cargo.lock | 1 + crates/optimism/consensus/Cargo.toml | 26 +-- crates/optimism/consensus/src/lib.rs | 2 +- crates/optimism/consensus/src/proof.rs | 8 +- crates/optimism/consensus/src/validation.rs | 28 +-- crates/optimism/evm/src/execute.rs | 197 +++++++++++------- crates/optimism/evm/src/lib.rs | 2 + crates/optimism/evm/src/receipts.rs | 75 +++++++ crates/optimism/node/src/node.rs | 5 +- crates/optimism/primitives/src/lib.rs | 2 +- crates/optimism/primitives/src/receipt.rs | 15 ++ .../primitives/src/transaction/signed.rs | 26 ++- 12 files changed, 279 insertions(+), 108 deletions(-) create mode 100644 crates/optimism/evm/src/receipts.rs diff --git a/Cargo.lock b/Cargo.lock index b4d1043148b2b..8cae6fe6a8c68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8253,6 +8253,7 @@ dependencies = [ "reth-optimism-forks", "reth-optimism-primitives", "reth-primitives", + "reth-primitives-traits", "tracing", ] diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 024bb957f8153..e49ffdce2285b 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -17,6 +17,7 @@ reth-chainspec.workspace = true reth-consensus-common.workspace = true reth-consensus.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true # op-reth reth-optimism-forks.workspace = true @@ -40,17 +41,18 @@ reth-optimism-chainspec.workspace = true [features] default = ["std"] std = [ - "reth-chainspec/std", - "reth-consensus/std", - "reth-consensus-common/std", - "reth-primitives/std", - "reth-optimism-forks/std", - "reth-optimism-chainspec/std", - "reth-optimism-primitives/std", - "alloy-eips/std", - "alloy-primitives/std", - "alloy-consensus/std", - "alloy-trie/std", - "op-alloy-consensus/std", + "reth-chainspec/std", + "reth-consensus/std", + "reth-consensus-common/std", + "reth-primitives/std", + "reth-optimism-forks/std", + "reth-optimism-chainspec/std", + "reth-optimism-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "alloy-trie/std", + "op-alloy-consensus/std", + "reth-primitives-traits/std" ] optimism = ["reth-primitives/optimism", "reth-optimism-primitives/optimism"] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index cedc8c462929f..5f1423211b50f 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -59,7 +59,7 @@ impl FullConsensus for OpBeaconConsensus { block: &BlockWithSenders, input: PostExecutionInput<'_, OpReceipt>, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec, input.receipts) + validate_block_post_execution(&block.header, &self.chain_spec, input.receipts) } } diff --git a/crates/optimism/consensus/src/proof.rs b/crates/optimism/consensus/src/proof.rs index e83990bdaba68..439edf0d39850 100644 --- a/crates/optimism/consensus/src/proof.rs +++ b/crates/optimism/consensus/src/proof.rs @@ -7,12 +7,12 @@ use alloy_primitives::B256; use alloy_trie::root::ordered_trie_root_with_encoder; use reth_chainspec::ChainSpec; use reth_optimism_forks::{OpHardfork, OpHardforks}; -use reth_optimism_primitives::OpReceipt; +use reth_optimism_primitives::{DepositReceipt, OpReceipt}; use reth_primitives::ReceiptWithBloom; /// Calculates the receipt root for a header. -pub(crate) fn calculate_receipt_root_optimism( - receipts: &[ReceiptWithBloom], +pub(crate) fn calculate_receipt_root_optimism( + receipts: &[ReceiptWithBloom], chain_spec: &ChainSpec, timestamp: u64, ) -> B256 { @@ -28,7 +28,7 @@ pub(crate) fn calculate_receipt_root_optimism( .iter() .cloned() .map(|mut r| { - if let OpReceipt::Deposit(receipt) = &mut r.receipt { + if let Some(receipt) = r.receipt.as_deposit_receipt_mut() { receipt.deposit_nonce = None; } r diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs index 8aef0086375b8..51d0745dd4d01 100644 --- a/crates/optimism/consensus/src/validation.rs +++ b/crates/optimism/consensus/src/validation.rs @@ -1,32 +1,32 @@ use crate::proof::calculate_receipt_root_optimism; use alloc::vec::Vec; -use alloy_consensus::TxReceipt; +use alloy_consensus::{BlockHeader, TxReceipt}; use alloy_primitives::{Bloom, B256}; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_optimism_primitives::{OpBlock, OpReceipt}; -use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected}; +use reth_optimism_primitives::DepositReceipt; +use reth_primitives::{gas_spent_by_transactions, GotExpected}; /// Validate a block with regard to execution results: /// /// - Compares the receipts root in the block header to the block body /// - Compares the gas used in the block header to the actual gas usage after execution -pub fn validate_block_post_execution( - block: &BlockWithSenders, +pub fn validate_block_post_execution( + header: impl BlockHeader, chain_spec: &ChainSpec, - receipts: &[OpReceipt], + receipts: &[R], ) -> Result<(), ConsensusError> { // Before Byzantium, receipts contained state root that would mean that expensive // operation as hashing that is required for state root got calculated in every // transaction This was replaced with is_success flag. // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if chain_spec.is_byzantium_active_at_block(block.header.number) { + if chain_spec.is_byzantium_active_at_block(header.number()) { if let Err(error) = verify_receipts( - block.header.receipts_root, - block.header.logs_bloom, + header.receipts_root(), + header.logs_bloom(), receipts, chain_spec, - block.header.timestamp, + header.timestamp(), ) { tracing::debug!(%error, ?receipts, "receipts verification failed"); return Err(error) @@ -36,9 +36,9 @@ pub fn validate_block_post_execution( // Check if gas used matches the value set in header. let cumulative_gas_used = receipts.last().map(|receipt| receipt.cumulative_gas_used()).unwrap_or(0); - if block.header.gas_used != cumulative_gas_used { + if header.gas_used() != cumulative_gas_used { return Err(ConsensusError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.header.gas_used }, + gas: GotExpected { got: cumulative_gas_used, expected: header.gas_used() }, gas_spent_by_tx: gas_spent_by_transactions(receipts), }) } @@ -47,10 +47,10 @@ pub fn validate_block_post_execution( } /// Verify the calculated receipts root against the expected receipts root. -fn verify_receipts( +fn verify_receipts( expected_receipts_root: B256, expected_logs_bloom: Bloom, - receipts: &[OpReceipt], + receipts: &[R], chain_spec: &ChainSpec, timestamp: u64, ) -> Result<(), ConsensusError> { diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 97aa1592dc8bc..59fd6e0ecac12 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,11 +1,14 @@ //! Optimism block execution strategy. -use crate::{l1::ensure_create2_deployer, OpBlockExecutionError, OpEvmConfig}; +use crate::{ + l1::ensure_create2_deployer, BasicOpReceiptBuilder, OpBlockExecutionError, OpEvmConfig, + OpReceiptBuilder, ReceiptBuilderCtx, +}; use alloc::{boxed::Box, sync::Arc, vec::Vec}; use alloy_consensus::{Eip658Value, Receipt, Transaction as _}; use alloy_eips::eip7685::Requests; use core::fmt::Display; -use op_alloy_consensus::{OpDepositReceipt, OpTxType}; +use op_alloy_consensus::{DepositTransaction, OpDepositReceipt}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_evm::{ @@ -21,48 +24,63 @@ use reth_evm::{ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OpHardfork; -use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt, OpTransactionSigned}; -use reth_primitives::BlockWithSenders; -use reth_primitives_traits::SignedTransaction; +use reth_optimism_primitives::{DepositReceipt, OpPrimitives, OpReceipt}; +use reth_primitives::{BlockWithSenders, NodePrimitives}; +use reth_primitives_traits::{Block, BlockBody, SignedTransaction}; use reth_revm::{Database, State}; use revm_primitives::{db::DatabaseCommit, ResultAndState}; use tracing::trace; /// Factory for [`OpExecutionStrategy`]. #[derive(Debug, Clone)] -pub struct OpExecutionStrategyFactory { +pub struct OpExecutionStrategyFactory { /// The chainspec chain_spec: Arc, /// How to create an EVM. evm_config: EvmConfig, + /// Receipt builder. + receipt_builder: Arc>, } -impl OpExecutionStrategyFactory { +impl OpExecutionStrategyFactory { /// Creates a new default optimism executor strategy factory. pub fn optimism(chain_spec: Arc) -> Self { - Self::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)) + Self::new( + chain_spec.clone(), + OpEvmConfig::new(chain_spec), + BasicOpReceiptBuilder::default(), + ) } } -impl OpExecutionStrategyFactory { +impl OpExecutionStrategyFactory { /// Creates a new executor strategy factory. - pub const fn new(chain_spec: Arc, evm_config: EvmConfig) -> Self { - Self { chain_spec, evm_config } + pub fn new( + chain_spec: Arc, + evm_config: EvmConfig, + receipt_builder: impl OpReceiptBuilder, + ) -> Self { + Self { chain_spec, evm_config, receipt_builder: Arc::new(receipt_builder) } } } -impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory +impl BlockExecutionStrategyFactory for OpExecutionStrategyFactory where + N: NodePrimitives< + BlockHeader = alloy_consensus::Header, + Receipt = OpReceipt, + SignedTx: DepositTransaction, + >, EvmConfig: Clone + Unpin + Sync + Send + 'static - + ConfigureEvm
, + + ConfigureEvm
, { - type Primitives = OpPrimitives; + type Primitives = N; type Strategy + Display>> = - OpExecutionStrategy; + OpExecutionStrategy; fn create_strategy(&self, db: DB) -> Self::Strategy where @@ -70,13 +88,18 @@ where { let state = State::builder().with_database(db).with_bundle_update().without_state_clear().build(); - OpExecutionStrategy::new(state, self.chain_spec.clone(), self.evm_config.clone()) + OpExecutionStrategy::new( + state, + self.chain_spec.clone(), + self.evm_config.clone(), + self.receipt_builder.clone(), + ) } } /// Block execution strategy for Optimism. #[allow(missing_debug_implementations)] -pub struct OpExecutionStrategy +pub struct OpExecutionStrategy where EvmConfig: Clone, { @@ -90,26 +113,46 @@ where state: State, /// Utility to call system smart contracts. system_caller: SystemCaller, + /// Receipt builder. + receipt_builder: Arc>, } -impl OpExecutionStrategy +impl OpExecutionStrategy where + N: NodePrimitives, EvmConfig: Clone, { /// Creates a new [`OpExecutionStrategy`] - pub fn new(state: State, chain_spec: Arc, evm_config: EvmConfig) -> Self { + pub fn new( + state: State, + chain_spec: Arc, + evm_config: EvmConfig, + receipt_builder: Arc>, + ) -> Self { let system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); - Self { state, chain_spec, evm_config, system_caller, tx_env_overrides: None } + Self { + state, + chain_spec, + evm_config, + system_caller, + tx_env_overrides: None, + receipt_builder, + } } } -impl BlockExecutionStrategy for OpExecutionStrategy +impl BlockExecutionStrategy for OpExecutionStrategy where DB: Database + Display>, - EvmConfig: ConfigureEvm
, + N: NodePrimitives< + BlockHeader = alloy_consensus::Header, + SignedTx: DepositTransaction, + Receipt: DepositReceipt, + >, + EvmConfig: ConfigureEvm
, { type DB = DB; - type Primitives = OpPrimitives; + type Primitives = N; type Error = BlockExecutionError; fn init(&mut self, tx_env_overrides: Box) { @@ -118,19 +161,19 @@ where fn apply_pre_execution_changes( &mut self, - block: &BlockWithSenders, + block: &BlockWithSenders, ) -> Result<(), Self::Error> { // Set state clear flag if the block is after the Spurious Dragon hardfork. let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + (*self.chain_spec).is_spurious_dragon_active_at_block(block.header().number); self.state.set_state_clear_flag(state_clear_flag); - let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); + let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); self.system_caller.apply_beacon_root_contract_call( - block.timestamp, - block.number, - block.parent_beacon_block_root, + block.header().timestamp, + block.header().number, + block.header().parent_beacon_block_root, &mut evm, )?; @@ -138,7 +181,7 @@ where // blocks will always have at least a single transaction in them (the L1 info transaction), // so we can safely assume that this will always be triggered upon the transition and that // the above check for empty blocks will never be hit on OP chains. - ensure_create2_deployer(self.chain_spec.clone(), block.timestamp, evm.db_mut()) + ensure_create2_deployer(self.chain_spec.clone(), block.header().timestamp, evm.db_mut()) .map_err(|_| OpBlockExecutionError::ForceCreate2DeployerFail)?; Ok(()) @@ -146,19 +189,21 @@ where fn execute_transactions( &mut self, - block: &BlockWithSenders, - ) -> Result, Self::Error> { - let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); + block: &BlockWithSenders, + ) -> Result, Self::Error> { + let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); - let is_regolith = - self.chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(block.timestamp); + let is_regolith = self + .chain_spec + .fork(OpHardfork::Regolith) + .active_at_timestamp(block.header().timestamp); let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.transactions.len()); + let mut receipts = Vec::with_capacity(block.body().transactions().len()); for (sender, transaction) in block.transactions_with_sender() { // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; + let block_available_gas = block.header().gas_limit - cumulative_gas_used; if transaction.gas_limit() > block_available_gas && (is_regolith || !transaction.is_deposit()) { @@ -211,33 +256,41 @@ where // append gas used cumulative_gas_used += result.gas_used(); - let receipt = Receipt { - // Success flag was added in `EIP-658: Embedding transaction status code in - // receipts`. - status: Eip658Value::Eip658(result.is_success()), - cumulative_gas_used, - logs: result.into_logs(), - }; - - // Push transaction changeset and calculate header bloom filter for receipt. - receipts.push(match transaction.tx_type() { - OpTxType::Legacy => OpReceipt::Legacy(receipt), - OpTxType::Eip2930 => OpReceipt::Eip2930(receipt), - OpTxType::Eip1559 => OpReceipt::Eip1559(receipt), - OpTxType::Eip7702 => OpReceipt::Eip7702(receipt), - OpTxType::Deposit => OpReceipt::Deposit(OpDepositReceipt { - inner: receipt, - deposit_nonce: depositor.map(|account| account.nonce), - // The deposit receipt version was introduced in Canyon to indicate an update to - // how receipt hashes should be computed when set. The state - // transition process ensures this is only set for - // post-Canyon deposit transactions. - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec - .is_fork_active_at_timestamp(OpHardfork::Canyon, block.timestamp)) - .then_some(1), - }), - }); + receipts.push( + match self.receipt_builder.build_receipt(ReceiptBuilderCtx { + header: block.header(), + tx: transaction, + result, + cumulative_gas_used, + }) { + Ok(receipt) => receipt, + Err(ctx) => { + let receipt = Receipt { + // Success flag was added in `EIP-658: Embedding transaction status code + // in receipts`. + status: Eip658Value::Eip658(ctx.result.is_success()), + cumulative_gas_used, + logs: ctx.result.into_logs(), + }; + + self.receipt_builder.build_deposit_receipt(OpDepositReceipt { + inner: receipt, + deposit_nonce: depositor.map(|account| account.nonce), + // The deposit receipt version was introduced in Canyon to indicate an + // update to how receipt hashes should be computed + // when set. The state transition process ensures + // this is only set for post-Canyon deposit + // transactions. + deposit_receipt_version: (transaction.is_deposit() && + self.chain_spec.is_fork_active_at_timestamp( + OpHardfork::Canyon, + block.header().timestamp, + )) + .then_some(1), + }) + } + }, + ); } Ok(ExecuteOutput { receipts, gas_used: cumulative_gas_used }) @@ -245,8 +298,8 @@ where fn apply_post_execution_changes( &mut self, - block: &BlockWithSenders, - _receipts: &[OpReceipt], + block: &BlockWithSenders, + _receipts: &[N::Receipt], ) -> Result { let balance_increments = post_block_balance_increments(&self.chain_spec.clone(), &block.block); @@ -275,11 +328,11 @@ where fn validate_block_post_execution( &self, - block: &BlockWithSenders, - receipts: &[OpReceipt], + block: &BlockWithSenders, + receipts: &[N::Receipt], _requests: &Requests, ) -> Result<(), ConsensusError> { - validate_block_post_execution(block, &self.chain_spec.clone(), receipts) + validate_block_post_execution(block.header(), &self.chain_spec.clone(), receipts) } } @@ -291,7 +344,7 @@ impl OpExecutorProvider { /// Creates a new default optimism executor strategy factory. pub fn optimism( chain_spec: Arc, - ) -> BasicBlockExecutorProvider { + ) -> BasicBlockExecutorProvider> { BasicBlockExecutorProvider::new(OpExecutionStrategyFactory::optimism(chain_spec)) } } @@ -308,6 +361,7 @@ mod tests { use reth_chainspec::MIN_TRANSACTION_GAS; use reth_evm::execute::{BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider}; use reth_optimism_chainspec::OpChainSpecBuilder; + use reth_optimism_primitives::OpTransactionSigned; use reth_primitives::{Account, Block, BlockBody}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, L1_BLOCK_CONTRACT, @@ -344,8 +398,7 @@ mod tests { fn executor_provider( chain_spec: Arc, ) -> BasicBlockExecutorProvider { - let strategy_factory = - OpExecutionStrategyFactory::new(chain_spec.clone(), OpEvmConfig::new(chain_spec)); + let strategy_factory = OpExecutionStrategyFactory::optimism(chain_spec); BasicBlockExecutorProvider::new(strategy_factory) } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 37c3fd548be63..f457c69b29096 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -33,6 +33,8 @@ mod execute; pub use execute::*; pub mod l1; pub use l1::*; +mod receipts; +pub use receipts::*; mod error; pub use error::OpBlockExecutionError; diff --git a/crates/optimism/evm/src/receipts.rs b/crates/optimism/evm/src/receipts.rs new file mode 100644 index 0000000000000..a2f6228d29f26 --- /dev/null +++ b/crates/optimism/evm/src/receipts.rs @@ -0,0 +1,75 @@ +use alloy_consensus::{Eip658Value, Header, Receipt}; +use core::fmt; +use op_alloy_consensus::{OpDepositReceipt, OpTxType}; +use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; +use revm_primitives::ExecutionResult; + +/// Context for building a receipt. +#[derive(Debug)] +pub struct ReceiptBuilderCtx<'a, T> { + /// Block header. + pub header: &'a Header, + /// Transaction + pub tx: &'a T, + /// Result of transaction execution. + pub result: ExecutionResult, + /// Cumulative gas used. + pub cumulative_gas_used: u64, +} + +/// Type that knows how to build a receipt based on execution result. +pub trait OpReceiptBuilder: fmt::Debug + Send + Sync + Unpin + 'static { + /// Receipt type. + type Receipt: Send + Sync + Clone + Unpin + 'static; + + /// Builds a receipt given a transaction and the result of the execution. + /// + /// Note: this method should return `Err` if the transaction is a deposit transaction. In that + /// case, the `build_deposit_receipt` method will be called. + fn build_receipt<'a>( + &self, + ctx: ReceiptBuilderCtx<'a, T>, + ) -> Result>; + + /// Builds receipt for a deposit transaction. + fn build_deposit_receipt(&self, inner: OpDepositReceipt) -> Self::Receipt; +} + +/// Basic builder for receipts of [`OpTransactionSigned`]. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct BasicOpReceiptBuilder; + +impl OpReceiptBuilder for BasicOpReceiptBuilder { + type Receipt = OpReceipt; + + fn build_receipt<'a>( + &self, + ctx: ReceiptBuilderCtx<'a, OpTransactionSigned>, + ) -> Result> { + match ctx.tx.tx_type() { + OpTxType::Deposit => Err(ctx), + ty => { + let receipt = Receipt { + // Success flag was added in `EIP-658: Embedding transaction status code in + // receipts`. + status: Eip658Value::Eip658(ctx.result.is_success()), + cumulative_gas_used: ctx.cumulative_gas_used, + logs: ctx.result.into_logs(), + }; + + Ok(match ty { + OpTxType::Legacy => OpReceipt::Legacy(receipt), + OpTxType::Eip1559 => OpReceipt::Eip1559(receipt), + OpTxType::Eip2930 => OpReceipt::Eip2930(receipt), + OpTxType::Eip7702 => OpReceipt::Eip7702(receipt), + OpTxType::Deposit => unreachable!(), + }) + } + } + } + + fn build_deposit_receipt(&self, inner: OpDepositReceipt) -> Self::Receipt { + OpReceipt::Deposit(inner) + } +} diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 568d1bfdb9fc3..8e61f0a4b0bae 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -318,15 +318,14 @@ where Node: FullNodeTypes>, { type EVM = OpEvmConfig; - type Executor = BasicBlockExecutorProvider; + type Executor = BasicBlockExecutorProvider>; async fn build_evm( self, ctx: &BuilderContext, ) -> eyre::Result<(Self::EVM, Self::Executor)> { let evm_config = OpEvmConfig::new(ctx.chain_spec()); - let strategy_factory = - OpExecutionStrategyFactory::new(ctx.chain_spec(), evm_config.clone()); + let strategy_factory = OpExecutionStrategyFactory::optimism(ctx.chain_spec()); let executor = BasicBlockExecutorProvider::new(strategy_factory); Ok((evm_config, executor)) diff --git a/crates/optimism/primitives/src/lib.rs b/crates/optimism/primitives/src/lib.rs index a0c082e8b76dc..7b62586f4d1cc 100644 --- a/crates/optimism/primitives/src/lib.rs +++ b/crates/optimism/primitives/src/lib.rs @@ -18,7 +18,7 @@ use reth_primitives_traits::Block; pub use transaction::{signed::OpTransactionSigned, tx_type::OpTxType}; mod receipt; -pub use receipt::OpReceipt; +pub use receipt::{DepositReceipt, OpReceipt}; /// Optimism-specific block type. pub type OpBlock = alloy_consensus::Block; diff --git a/crates/optimism/primitives/src/receipt.rs b/crates/optimism/primitives/src/receipt.rs index 2aee2aed2334b..b235dfe5fc356 100644 --- a/crates/optimism/primitives/src/receipt.rs +++ b/crates/optimism/primitives/src/receipt.rs @@ -211,6 +211,21 @@ impl InMemorySize for OpReceipt { impl reth_primitives_traits::Receipt for OpReceipt {} +/// Trait for deposit receipt. +pub trait DepositReceipt: reth_primitives_traits::Receipt { + /// Returns deposit receipt if it is a deposit transaction. + fn as_deposit_receipt_mut(&mut self) -> Option<&mut OpDepositReceipt>; +} + +impl DepositReceipt for OpReceipt { + fn as_deposit_receipt_mut(&mut self) -> Option<&mut OpDepositReceipt> { + match self { + Self::Deposit(receipt) => Some(receipt), + _ => None, + } + } +} + #[cfg(feature = "reth-codec")] mod compact { use super::*; diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index 6600922617f58..c0d07105d2e0a 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -22,7 +22,7 @@ use core::{ use derive_more::{AsRef, Deref}; #[cfg(not(feature = "std"))] use once_cell::sync::OnceCell as OnceLock; -use op_alloy_consensus::{OpPooledTransaction, OpTypedTransaction, TxDeposit}; +use op_alloy_consensus::{DepositTransaction, OpPooledTransaction, OpTypedTransaction, TxDeposit}; #[cfg(any(test, feature = "reth-codec"))] use proptest as _; use reth_primitives_traits::{ @@ -601,6 +601,30 @@ impl TryFrom for OpPooledTransaction { } } +impl DepositTransaction for OpTransactionSigned { + fn source_hash(&self) -> Option { + match &self.transaction { + OpTypedTransaction::Deposit(tx) => Some(tx.source_hash), + _ => None, + } + } + + fn mint(&self) -> Option { + match &self.transaction { + OpTypedTransaction::Deposit(tx) => tx.mint, + _ => None, + } + } + + fn is_system_transaction(&self) -> bool { + self.is_deposit() + } + + fn is_deposit(&self) -> bool { + self.is_deposit() + } +} + /// Bincode-compatible transaction type serde implementations. #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { From c4a591d16c8edd3a578a8774c5d3edb4599fe6d4 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 14 Jan 2025 20:22:46 +0000 Subject: [PATCH 068/113] fix(tree): use in memory trie cursor for trie updates comparison in tree (#13789) --- crates/engine/tree/src/tree/mod.rs | 89 +++++++++++---------- crates/engine/tree/src/tree/trie_updates.rs | 73 ++++++++++------- 2 files changed, 93 insertions(+), 69 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index ea92c0fb38980..3fff0562a80bd 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2276,52 +2276,60 @@ where let persistence_not_in_progress = !self.persistence_state.in_progress(); let state_root_result = std::thread::scope(|scope| { - let (state_root_handle, state_hook) = if persistence_not_in_progress && - self.config.use_state_root_task() - { - let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; - - let state_root_config = StateRootConfig::new_from_input( - consistent_view.clone(), - self.compute_trie_input(consistent_view.clone(), block.header().parent_hash()) + let (state_root_handle, in_memory_trie_cursor, state_hook) = + if persistence_not_in_progress && self.config.use_state_root_task() { + let consistent_view = + ConsistentDbView::new_with_latest_tip(self.provider.clone())?; + + let state_root_config = StateRootConfig::new_from_input( + consistent_view.clone(), + self.compute_trie_input( + consistent_view.clone(), + block.header().parent_hash(), + ) .map_err(|e| InsertBlockErrorKind::Other(Box::new(e)))?, - ); + ); - let provider_ro = consistent_view.provider_ro()?; - let nodes_sorted = state_root_config.nodes_sorted.clone(); - let state_sorted = state_root_config.state_sorted.clone(); - let prefix_sets = state_root_config.prefix_sets.clone(); + let provider_ro = consistent_view.provider_ro()?; + let nodes_sorted = state_root_config.nodes_sorted.clone(); + let state_sorted = state_root_config.state_sorted.clone(); + let prefix_sets = state_root_config.prefix_sets.clone(); - // context will hold the values that need to be kept alive - let context = - StateHookContext { provider_ro, nodes_sorted, state_sorted, prefix_sets }; + // context will hold the values that need to be kept alive + let context = + StateHookContext { provider_ro, nodes_sorted, state_sorted, prefix_sets }; - // it is ok to leak here because we are in a scoped thread, the - // memory will be freed when the thread completes - let context = Box::leak(Box::new(context)); + // it is ok to leak here because we are in a scoped thread, the + // memory will be freed when the thread completes + let context = Box::leak(Box::new(context)); - let blinded_provider_factory = ProofBlindedProviderFactory::new( - InMemoryTrieCursorFactory::new( + let in_memory_trie_cursor = InMemoryTrieCursorFactory::new( DatabaseTrieCursorFactory::new(context.provider_ro.tx_ref()), &context.nodes_sorted, - ), - HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(context.provider_ro.tx_ref()), - &context.state_sorted, - ), - context.prefix_sets.clone(), - ); - - let state_root_task = StateRootTask::new( - state_root_config, - blinded_provider_factory, - self.state_root_task_pool.clone(), - ); - let state_hook = state_root_task.state_hook(); - (Some(state_root_task.spawn(scope)), Box::new(state_hook) as Box) - } else { - (None, Box::new(|_state: &EvmState| {}) as Box) - }; + ); + let blinded_provider_factory = ProofBlindedProviderFactory::new( + in_memory_trie_cursor.clone(), + HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(context.provider_ro.tx_ref()), + &context.state_sorted, + ), + context.prefix_sets.clone(), + ); + + let state_root_task = StateRootTask::new( + state_root_config, + blinded_provider_factory, + self.state_root_task_pool.clone(), + ); + let state_hook = state_root_task.state_hook(); + ( + Some(state_root_task.spawn(scope)), + Some(in_memory_trie_cursor), + Box::new(state_hook) as Box, + ) + } else { + (None, None, Box::new(|_state: &EvmState| {}) as Box) + }; let execution_start = Instant::now(); let output = self.metrics.executor.execute_metered(executor, &block, state_hook)?; @@ -2383,9 +2391,8 @@ where state_provider.state_root_with_updates(hashed_state.clone())?; if regular_root == block.header().state_root() { - let provider = self.provider.database_provider_ro()?; compare_trie_updates( - provider.tx_ref(), + in_memory_trie_cursor.expect("in memory trie cursor must exist if use_state_root_task is true"), task_trie_updates.clone(), regular_updates, ) diff --git a/crates/engine/tree/src/tree/trie_updates.rs b/crates/engine/tree/src/tree/trie_updates.rs index 9ec29f9c8b5ed..a2e7dee3a7d43 100644 --- a/crates/engine/tree/src/tree/trie_updates.rs +++ b/crates/engine/tree/src/tree/trie_updates.rs @@ -1,14 +1,12 @@ -use alloy_primitives::{ - map::{HashMap, HashSet}, - B256, -}; -use reth_db::{transaction::DbTx, DatabaseError}; +use std::collections::BTreeSet; + +use alloy_primitives::{map::HashMap, B256}; +use reth_db::DatabaseError; use reth_trie::{ trie_cursor::{TrieCursor, TrieCursorFactory}, updates::{StorageTrieUpdates, TrieUpdates}, BranchNodeCompact, Nibbles, }; -use reth_trie_db::DatabaseTrieCursorFactory; use tracing::debug; #[derive(Debug)] @@ -38,8 +36,16 @@ impl TrieUpdatesDiff { debug!(target: "engine::tree", ?path, ?task, ?regular, ?database, "Difference in account trie updates"); } - for (path, EntryDiff { task, regular, database }) in &self.removed_nodes { - debug!(target: "engine::tree", ?path, ?task, ?regular, ?database, "Difference in removed account trie nodes"); + for ( + path, + EntryDiff { + task: task_removed, + regular: regular_removed, + database: database_not_exists, + }, + ) in &self.removed_nodes + { + debug!(target: "engine::tree", ?path, ?task_removed, ?regular_removed, ?database_not_exists, "Difference in removed account trie nodes"); } for (address, storage_diff) in self.storage_tries { @@ -74,8 +80,16 @@ impl StorageTrieDiffEntry { debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, ?database, "Difference in storage trie updates"); } - for (path, EntryDiff { task, regular, database }) in &storage_diff.removed_nodes { - debug!(target: "engine::tree", ?address, ?path, ?task, ?regular, ?database, "Difference in removed account trie nodes"); + for ( + path, + EntryDiff { + task: task_removed, + regular: regular_removed, + database: database_not_exists, + }, + ) in &storage_diff.removed_nodes + { + debug!(target: "engine::tree", ?address, ?path, ?task_removed, ?regular_removed, ?database_not_exists, "Difference in removed storage trie nodes"); } } } @@ -97,15 +111,13 @@ impl StorageTrieUpdatesDiff { } } -/// Compares the trie updates from state root task and regular state root calculation, and logs -/// the differences if there's any. +/// Compares the trie updates from state root task, regular state root calculation and database, +/// and logs the differences if there's any. pub(super) fn compare_trie_updates( - tx: &impl DbTx, + trie_cursor_factory: impl TrieCursorFactory, task: TrieUpdates, regular: TrieUpdates, ) -> Result<(), DatabaseError> { - let trie_cursor_factory = DatabaseTrieCursorFactory::new(tx); - let mut task = adjust_trie_updates(task); let mut regular = adjust_trie_updates(regular); @@ -118,7 +130,7 @@ pub(super) fn compare_trie_updates( .keys() .chain(regular.account_nodes.keys()) .cloned() - .collect::>() + .collect::>() { let (task, regular) = (task.account_nodes.remove(&key), regular.account_nodes.remove(&key)); let database = account_trie_cursor.seek_exact(key.clone())?.map(|x| x.1); @@ -129,12 +141,13 @@ pub(super) fn compare_trie_updates( } // compare removed nodes + let mut account_trie_cursor = trie_cursor_factory.account_trie_cursor()?; for key in task .removed_nodes .iter() .chain(regular.removed_nodes.iter()) .cloned() - .collect::>() + .collect::>() { let (task, regular) = (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); @@ -150,15 +163,17 @@ pub(super) fn compare_trie_updates( .keys() .chain(regular.storage_tries.keys()) .copied() - .collect::>() + .collect::>() { let (mut task, mut regular) = (task.storage_tries.remove(&key), regular.storage_tries.remove(&key)); if task != regular { - let mut storage_trie_cursor = trie_cursor_factory.storage_trie_cursor(key)?; if let Some((task, regular)) = task.as_mut().zip(regular.as_mut()) { - let storage_diff = - compare_storage_trie_updates(&mut storage_trie_cursor, task, regular)?; + let storage_diff = compare_storage_trie_updates( + || trie_cursor_factory.storage_trie_cursor(key), + task, + regular, + )?; if storage_diff.has_differences() { diff.storage_tries.insert(key, StorageTrieDiffEntry::Value(storage_diff)); } @@ -177,12 +192,12 @@ pub(super) fn compare_trie_updates( Ok(()) } -fn compare_storage_trie_updates( - trie_cursor: &mut impl TrieCursor, +fn compare_storage_trie_updates( + trie_cursor: impl Fn() -> Result, task: &mut StorageTrieUpdates, regular: &mut StorageTrieUpdates, ) -> Result { - let database_deleted = trie_cursor.next()?.is_none(); + let database_deleted = trie_cursor()?.next()?.is_none(); let mut diff = StorageTrieUpdatesDiff { is_deleted: (task.is_deleted != regular.is_deleted).then_some(EntryDiff { task: task.is_deleted, @@ -193,31 +208,33 @@ fn compare_storage_trie_updates( }; // compare storage nodes + let mut storage_trie_cursor = trie_cursor()?; for key in task .storage_nodes .keys() .chain(regular.storage_nodes.keys()) .cloned() - .collect::>() + .collect::>() { let (task, regular) = (task.storage_nodes.remove(&key), regular.storage_nodes.remove(&key)); - let database = trie_cursor.seek_exact(key.clone())?.map(|x| x.1); + let database = storage_trie_cursor.seek_exact(key.clone())?.map(|x| x.1); if !branch_nodes_equal(task.as_ref(), regular.as_ref(), database.as_ref())? { diff.storage_nodes.insert(key, EntryDiff { task, regular, database }); } } // compare removed nodes + let mut storage_trie_cursor = trie_cursor()?; for key in task .removed_nodes .iter() .chain(regular.removed_nodes.iter()) .cloned() - .collect::>() + .collect::>() { let (task, regular) = (task.removed_nodes.contains(&key), regular.removed_nodes.contains(&key)); - let database = trie_cursor.seek_exact(key.clone())?.map(|x| x.1).is_none(); + let database = storage_trie_cursor.seek_exact(key.clone())?.map(|x| x.1).is_none(); if task != regular { diff.removed_nodes.insert(key, EntryDiff { task, regular, database }); } From d318aa3c85069f09b39ae1f15497d09609f0c0b5 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 14 Jan 2025 17:04:44 -0500 Subject: [PATCH 069/113] fix: track local senders better during truncation (#13768) Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- crates/transaction-pool/src/pool/pending.rs | 53 +++++++++++++++++++-- 1 file changed, 50 insertions(+), 3 deletions(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 27706bd175430..d169bfa37af07 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -6,7 +6,7 @@ use crate::{ }, Priority, SubPoolLimit, TransactionOrdering, ValidPoolTransaction, }; -use rustc_hash::FxHashMap; +use rustc_hash::{FxHashMap, FxHashSet}; use std::{ cmp::Ordering, collections::{hash_map::Entry, BTreeMap}, @@ -379,10 +379,13 @@ impl PendingPool { // can be removed. let mut non_local_senders = self.highest_nonces.len(); - // keep track of unique senders from previous iterations, to understand how many unique + // keeps track of unique senders from previous iterations, to understand how many unique // senders were removed in the last iteration let mut unique_senders = self.highest_nonces.len(); + // keeps track of which senders we've marked as local + let mut local_senders = FxHashSet::default(); + // keep track of transactions to remove and how many have been removed so far let original_length = self.len(); let mut removed = Vec::new(); @@ -424,7 +427,10 @@ impl PendingPool { } if !remove_locals && tx.transaction.is_local() { - non_local_senders -= 1; + let sender_id = tx.transaction.sender_id(); + if local_senders.insert(sender_id) { + non_local_senders -= 1; + } continue } @@ -972,4 +978,45 @@ mod tests { assert!(pool.contains(tx2.id())); assert!(!pool.contains(tx1.id())); } + + #[test] + fn local_senders_tracking() { + let mut f = MockTransactionFactory::default(); + let mut pool = PendingPool::new(MockOrdering::default()); + + // Addresses for simulated senders A, B, C + let a = address!("000000000000000000000000000000000000000a"); + let b = address!("000000000000000000000000000000000000000b"); + let c = address!("000000000000000000000000000000000000000c"); + + // sender A (local) - 11+ transactions (large enough to keep limit exceeded) + // sender B (external) - 2 transactions + // sender C (external) - 2 transactions + + // Create transaction chains for senders A, B, C + let a_txs = MockTransactionSet::sequential_transactions_by_sender(a, 11, TxType::Eip1559); + let b_txs = MockTransactionSet::sequential_transactions_by_sender(b, 2, TxType::Eip1559); + let c_txs = MockTransactionSet::sequential_transactions_by_sender(c, 2, TxType::Eip1559); + + // create local txs for sender A + for tx in a_txs.into_vec() { + let final_tx = Arc::new(f.validated_with_origin(crate::TransactionOrigin::Local, tx)); + + pool.add_transaction(final_tx, 0); + } + + // create external txs for senders B and C + let remaining_txs = [b_txs.into_vec(), c_txs.into_vec()].concat(); + for tx in remaining_txs { + let final_tx = f.validated_arc(tx); + + pool.add_transaction(final_tx, 0); + } + + // Sanity check, ensuring everything is consistent. + pool.assert_invariants(); + + let pool_limit = SubPoolLimit { max_txs: 10, max_size: usize::MAX }; + pool.truncate_pool(pool_limit); + } } From 46f4d73c4d862d15cb57bce9074bdd308b6105b9 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 15 Jan 2025 02:40:22 +0400 Subject: [PATCH 070/113] chore: remove DefaultExternalContext AT (#13797) --- crates/ethereum/evm/src/lib.rs | 6 +--- crates/evm/src/builder.rs | 42 ++++++++-------------- crates/evm/src/lib.rs | 44 +++++------------------- crates/optimism/evm/src/lib.rs | 6 +--- examples/custom-evm/src/main.rs | 6 +--- examples/stateful-precompile/src/main.rs | 6 +--- 6 files changed, 27 insertions(+), 83 deletions(-) diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 86017035dd862..9cec2f9a92b71 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -182,11 +182,7 @@ impl ConfigureEvmEnv for EthEvmConfig { } } -impl ConfigureEvm for EthEvmConfig { - type DefaultExternalContext<'a> = (); - - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> {} -} +impl ConfigureEvm for EthEvmConfig {} #[cfg(test)] mod tests { diff --git a/crates/evm/src/builder.rs b/crates/evm/src/builder.rs index b955e41a255a5..94531dd0ff011 100644 --- a/crates/evm/src/builder.rs +++ b/crates/evm/src/builder.rs @@ -11,7 +11,7 @@ use revm_primitives::EnvWithHandlerCfg; /// This is useful for creating an EVM with a custom database and environment without having to /// necessarily rely on Revm inspector. #[derive(Debug)] -pub struct RethEvmBuilder { +pub struct RethEvmBuilder { /// The database to use for the EVM. db: DB, /// The environment to use for the EVM. @@ -20,15 +20,17 @@ pub struct RethEvmBuilder { external_context: EXT, } +impl RethEvmBuilder { + /// Create a new EVM builder with the given database. + pub const fn new(db: DB) -> Self { + Self { db, env: None, external_context: () } + } +} + impl RethEvmBuilder where DB: Database, { - /// Create a new EVM builder with the given database. - pub const fn new(db: DB, external_context: EXT) -> Self { - Self { db, env: None, external_context } - } - /// Set the environment for the EVM. pub fn with_env(mut self, env: Box) -> Self { self.env = Some(env); @@ -79,22 +81,16 @@ pub trait ConfigureEvmBuilder { /// Trait for configuring the EVM for executing full blocks. pub trait EvmFactory { - /// Associated type for the default external context that should be configured for the EVM. - type DefaultExternalContext<'a>; - - /// Provides the default external context. - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a>; - /// Returns new EVM with the given database /// /// This does not automatically configure the EVM with [`crate::ConfigureEvmEnv`] methods. It is /// up to the caller to call an appropriate method to fill the transaction and block /// environment before executing any transactions using the provided EVM. - fn evm(self, db: DB) -> Evm<'static, Self::DefaultExternalContext<'static>, DB> + fn evm(self, db: DB) -> Evm<'static, (), DB> where Self: Sized, { - RethEvmBuilder::new(db, self.default_external_context()).build() + RethEvmBuilder::new(db).build() } /// Returns a new EVM with the given database configured with the given environment settings, @@ -105,8 +101,8 @@ pub trait EvmFactory { &self, db: DB, env: EnvWithHandlerCfg, - ) -> Evm<'a, Self::DefaultExternalContext<'a>, DB> { - RethEvmBuilder::new(db, self.default_external_context()).with_env(env.into()).build() + ) -> Evm<'a, (), DB> { + RethEvmBuilder::new(db).with_env(env.into()).build() } /// Returns a new EVM with the given database configured with the given environment settings, @@ -125,9 +121,7 @@ pub trait EvmFactory { DB: Database, I: GetInspector, { - RethEvmBuilder::new(db, self.default_external_context()) - .with_env(env.into()) - .build_with_inspector(inspector) + RethEvmBuilder::new(db).with_env(env.into()).build_with_inspector(inspector) } /// Returns a new EVM with the given inspector. @@ -140,14 +134,8 @@ pub trait EvmFactory { DB: Database, I: GetInspector, { - RethEvmBuilder::new(db, self.default_external_context()).build_with_inspector(inspector) + RethEvmBuilder::new(db).build_with_inspector(inspector) } } -impl EvmFactory for RethEvmBuilder { - type DefaultExternalContext<'a> = EXT; - - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> { - self.external_context.clone() - } -} +impl EvmFactory for RethEvmBuilder {} diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 2eb5fc04c4132..cd49785efd45b 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -43,27 +43,20 @@ pub mod test_utils; /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { - /// Associated type for the default external context that should be configured for the EVM. - type DefaultExternalContext<'a>; - /// Returns new EVM with the given database /// /// This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It is up to /// the caller to call an appropriate method to fill the transaction and block environment /// before executing any transactions using the provided EVM. - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { - RethEvmBuilder::new(db, self.default_external_context()).build() + fn evm(&self, db: DB) -> Evm<'_, (), DB> { + RethEvmBuilder::new(db).build() } /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id. /// /// This will preserve any handler modifications - fn evm_with_env( - &self, - db: DB, - env: EnvWithHandlerCfg, - ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm_with_env(&self, db: DB, env: EnvWithHandlerCfg) -> Evm<'_, (), DB> { let mut evm = self.evm(db); evm.modify_spec_id(env.spec_id()); evm.context.evm.env = env.env; @@ -77,11 +70,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// # Caution /// /// This does not initialize the tx environment. - fn evm_for_block( - &self, - db: DB, - header: &Self::Header, - ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm_for_block(&self, db: DB, header: &Self::Header) -> Evm<'_, (), DB> { let EvmEnv { cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { cfg_env, handler_cfg }, block_env, @@ -127,11 +116,8 @@ pub trait ConfigureEvm: ConfigureEvmEnv { DB: Database, I: GetInspector, { - RethEvmBuilder::new(db, self.default_external_context()).build_with_inspector(inspector) + RethEvmBuilder::new(db).build_with_inspector(inspector) } - - /// Provides the default external context. - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a>; } impl<'b, T> ConfigureEvm for &'b T @@ -139,29 +125,15 @@ where T: ConfigureEvm, &'b T: ConfigureEvmEnv
, { - type DefaultExternalContext<'a> = T::DefaultExternalContext<'a>; - - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> { - (*self).default_external_context() - } - - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm(&self, db: DB) -> Evm<'_, (), DB> { (*self).evm(db) } - fn evm_for_block( - &self, - db: DB, - header: &Self::Header, - ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm_for_block(&self, db: DB, header: &Self::Header) -> Evm<'_, (), DB> { (*self).evm_for_block(db, header) } - fn evm_with_env( - &self, - db: DB, - env: EnvWithHandlerCfg, - ) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm_with_env(&self, db: DB, env: EnvWithHandlerCfg) -> Evm<'_, (), DB> { (*self).evm_with_env(db, env) } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index f457c69b29096..9f7ead251562b 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -168,9 +168,7 @@ impl ConfigureEvmEnv for OpEvmConfig { } impl ConfigureEvm for OpEvmConfig { - type DefaultExternalContext<'a> = (); - - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm(&self, db: DB) -> Evm<'_, (), DB> { EvmBuilder::default().with_db(db).optimism().build() } @@ -186,8 +184,6 @@ impl ConfigureEvm for OpEvmConfig { .append_handler_register(inspector_handle_register) .build() } - - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> {} } #[cfg(test)] diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 4d2fea296e62d..3490914b67bce 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -117,9 +117,7 @@ impl ConfigureEvmEnv for MyEvmConfig { } impl ConfigureEvm for MyEvmConfig { - type DefaultExternalContext<'a> = (); - - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm(&self, db: DB) -> Evm<'_, (), DB> { EvmBuilder::default() .with_db(db) // add additional precompiles @@ -140,8 +138,6 @@ impl ConfigureEvm for MyEvmConfig { .append_handler_register(inspector_handle_register) .build() } - - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> {} } /// Builds a regular ethereum block executor that uses the custom EVM. diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index e02a28a4a3190..1fb4dbefb3a99 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -179,9 +179,7 @@ impl ConfigureEvmEnv for MyEvmConfig { } impl ConfigureEvm for MyEvmConfig { - type DefaultExternalContext<'a> = (); - - fn evm(&self, db: DB) -> Evm<'_, Self::DefaultExternalContext<'_>, DB> { + fn evm(&self, db: DB) -> Evm<'_, (), DB> { let new_cache = self.precompile_cache.clone(); EvmBuilder::default() .with_db(db) @@ -208,8 +206,6 @@ impl ConfigureEvm for MyEvmConfig { .append_handler_register(inspector_handle_register) .build() } - - fn default_external_context<'a>(&self) -> Self::DefaultExternalContext<'a> {} } /// Builds a regular ethereum block executor that uses the custom EVM. From 1267718c7e0f304ebbdde151140cffee4ddc11fa Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Tue, 14 Jan 2025 23:47:28 +0000 Subject: [PATCH 071/113] feat: introduce `StaticFileSegment::BlockMeta` (#13226) --- book/cli/reth/db/clear/static-file.md | 1 + book/cli/reth/db/get/static-file.md | 1 + crates/cli/commands/src/db/get.rs | 4 + .../static-file/src/static_file_producer.rs | 38 ++++- crates/static-file/types/src/lib.rs | 50 +++++-- crates/static-file/types/src/segment.rs | 83 ++++++----- crates/storage/db/src/static_file/masks.rs | 18 ++- .../provider/src/providers/static_file/jar.rs | 46 +++++- .../src/providers/static_file/manager.rs | 139 ++++++++++++------ .../src/providers/static_file/writer.rs | 77 +++++++++- 10 files changed, 346 insertions(+), 111 deletions(-) diff --git a/book/cli/reth/db/clear/static-file.md b/book/cli/reth/db/clear/static-file.md index c830af259c951..78f2b9cbceaa6 100644 --- a/book/cli/reth/db/clear/static-file.md +++ b/book/cli/reth/db/clear/static-file.md @@ -14,6 +14,7 @@ Arguments: - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables - transactions: Static File segment responsible for the `Transactions` table - receipts: Static File segment responsible for the `Receipts` table + - block-meta: Static File segment responsible for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals` tables Options: --instance diff --git a/book/cli/reth/db/get/static-file.md b/book/cli/reth/db/get/static-file.md index a50da0c0e45d5..8c15191384f5d 100644 --- a/book/cli/reth/db/get/static-file.md +++ b/book/cli/reth/db/get/static-file.md @@ -14,6 +14,7 @@ Arguments: - headers: Static File segment responsible for the `CanonicalHeaders`, `Headers`, `HeaderTerminalDifficulties` tables - transactions: Static File segment responsible for the `Transactions` table - receipts: Static File segment responsible for the `Receipts` table + - block-meta: Static File segment responsible for the `BlockBodyIndices`, `BlockOmmers`, `BlockWithdrawals` tables The key to get content for diff --git a/crates/cli/commands/src/db/get.rs b/crates/cli/commands/src/db/get.rs index 13b7b70347e2c..5fb234f0b89c4 100644 --- a/crates/cli/commands/src/db/get.rs +++ b/crates/cli/commands/src/db/get.rs @@ -72,6 +72,7 @@ impl Command { StaticFileSegment::Receipts => { (table_key::(&key)?, >>::MASK) } + StaticFileSegment::BlockMeta => todo!(), }; let content = tool.provider_factory.static_file_provider().find_static_file( @@ -113,6 +114,9 @@ impl Command { )?; println!("{}", serde_json::to_string_pretty(&receipt)?); } + StaticFileSegment::BlockMeta => { + todo!() + } } } } diff --git a/crates/static-file/static-file/src/static_file_producer.rs b/crates/static-file/static-file/src/static_file_producer.rs index fcbbb9e3b0a39..3f9cbd3cbfe39 100644 --- a/crates/static-file/static-file/src/static_file_producer.rs +++ b/crates/static-file/static-file/src/static_file_producer.rs @@ -187,6 +187,7 @@ where headers: stages_checkpoints[0], receipts: stages_checkpoints[1], transactions: stages_checkpoints[2], + block_meta: stages_checkpoints[2], }; let targets = self.get_static_file_targets(highest_static_files)?; self.run(targets)?; @@ -226,6 +227,9 @@ where finalized_block_number, ) }), + block_meta: finalized_block_numbers.block_meta.and_then(|finalized_block_number| { + self.get_static_file_target(highest_static_files.block_meta, finalized_block_number) + }), }; trace!( @@ -322,6 +326,7 @@ mod tests { headers: Some(1), receipts: Some(1), transactions: Some(1), + block_meta: None, }) .expect("get static file targets"); assert_eq!( @@ -329,13 +334,19 @@ mod tests { StaticFileTargets { headers: Some(0..=1), receipts: Some(0..=1), - transactions: Some(0..=1) + transactions: Some(0..=1), + block_meta: None } ); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(1), receipts: Some(1), transactions: Some(1) } + HighestStaticFiles { + headers: Some(1), + receipts: Some(1), + transactions: Some(1), + block_meta: None + } ); let targets = static_file_producer @@ -343,6 +354,7 @@ mod tests { headers: Some(3), receipts: Some(3), transactions: Some(3), + block_meta: None, }) .expect("get static file targets"); assert_eq!( @@ -350,13 +362,19 @@ mod tests { StaticFileTargets { headers: Some(2..=3), receipts: Some(2..=3), - transactions: Some(2..=3) + transactions: Some(2..=3), + block_meta: None } ); assert_matches!(static_file_producer.run(targets), Ok(_)); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { + headers: Some(3), + receipts: Some(3), + transactions: Some(3), + block_meta: None + } ); let targets = static_file_producer @@ -364,6 +382,7 @@ mod tests { headers: Some(4), receipts: Some(4), transactions: Some(4), + block_meta: None, }) .expect("get static file targets"); assert_eq!( @@ -371,7 +390,8 @@ mod tests { StaticFileTargets { headers: Some(4..=4), receipts: Some(4..=4), - transactions: Some(4..=4) + transactions: Some(4..=4), + block_meta: None } ); assert_matches!( @@ -380,7 +400,12 @@ mod tests { ); assert_eq!( provider_factory.static_file_provider().get_highest_static_files(), - HighestStaticFiles { headers: Some(3), receipts: Some(3), transactions: Some(3) } + HighestStaticFiles { + headers: Some(3), + receipts: Some(3), + transactions: Some(3), + block_meta: None + } ); } @@ -408,6 +433,7 @@ mod tests { headers: Some(1), receipts: Some(1), transactions: Some(1), + block_meta: None, }) .expect("get static file targets"); assert_matches!(locked_producer.run(targets.clone()), Ok(_)); diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 7a9980b355952..1618d6443e329 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -33,6 +33,9 @@ pub struct HighestStaticFiles { /// Highest static file block of transactions, inclusive. /// If [`None`], no static file is available. pub transactions: Option, + /// Highest static file block of transactions, inclusive. + /// If [`None`], no static file is available. + pub block_meta: Option, } impl HighestStaticFiles { @@ -42,6 +45,7 @@ impl HighestStaticFiles { StaticFileSegment::Headers => self.headers, StaticFileSegment::Transactions => self.transactions, StaticFileSegment::Receipts => self.receipts, + StaticFileSegment::BlockMeta => self.block_meta, } } @@ -51,17 +55,23 @@ impl HighestStaticFiles { StaticFileSegment::Headers => &mut self.headers, StaticFileSegment::Transactions => &mut self.transactions, StaticFileSegment::Receipts => &mut self.receipts, + StaticFileSegment::BlockMeta => &mut self.block_meta, } } + /// Returns an iterator over all static file segments + fn iter(&self) -> impl Iterator> { + [self.headers, self.transactions, self.receipts, self.block_meta].into_iter() + } + /// Returns the minimum block of all segments. pub fn min_block_num(&self) -> Option { - [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).min() + self.iter().flatten().min() } /// Returns the maximum block of all segments. pub fn max_block_num(&self) -> Option { - [self.headers, self.transactions, self.receipts].iter().filter_map(|&option| option).max() + self.iter().flatten().max() } } @@ -74,12 +84,17 @@ pub struct StaticFileTargets { pub receipts: Option>, /// Targeted range of transactions. pub transactions: Option>, + /// Targeted range of block meta. + pub block_meta: Option>, } impl StaticFileTargets { /// Returns `true` if any of the targets are [Some]. pub const fn any(&self) -> bool { - self.headers.is_some() || self.receipts.is_some() || self.transactions.is_some() + self.headers.is_some() || + self.receipts.is_some() || + self.transactions.is_some() || + self.block_meta.is_some() } /// Returns `true` if all targets are either [`None`] or has beginning of the range equal to the @@ -89,6 +104,7 @@ impl StaticFileTargets { (self.headers.as_ref(), static_files.headers), (self.receipts.as_ref(), static_files.receipts), (self.transactions.as_ref(), static_files.transactions), + (self.block_meta.as_ref(), static_files.block_meta), ] .iter() .all(|(target_block_range, highest_static_fileted_block)| { @@ -118,8 +134,12 @@ mod tests { #[test] fn test_highest_static_files_highest() { - let files = - HighestStaticFiles { headers: Some(100), receipts: Some(200), transactions: None }; + let files = HighestStaticFiles { + headers: Some(100), + receipts: Some(200), + transactions: None, + block_meta: None, + }; // Test for headers segment assert_eq!(files.highest(StaticFileSegment::Headers), Some(100)); @@ -146,12 +166,20 @@ mod tests { // Modify transactions value *files.as_mut(StaticFileSegment::Transactions) = Some(350); assert_eq!(files.transactions, Some(350)); + + // Modify block meta value + *files.as_mut(StaticFileSegment::BlockMeta) = Some(350); + assert_eq!(files.block_meta, Some(350)); } #[test] fn test_highest_static_files_min() { - let files = - HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: None }; + let files = HighestStaticFiles { + headers: Some(300), + receipts: Some(100), + transactions: None, + block_meta: None, + }; // Minimum value among the available segments assert_eq!(files.min_block_num(), Some(100)); @@ -163,8 +191,12 @@ mod tests { #[test] fn test_highest_static_files_max() { - let files = - HighestStaticFiles { headers: Some(300), receipts: Some(100), transactions: Some(500) }; + let files = HighestStaticFiles { + headers: Some(300), + receipts: Some(100), + transactions: Some(500), + block_meta: Some(500), + }; // Maximum value among the available segments assert_eq!(files.max_block_num(), Some(500)); diff --git a/crates/static-file/types/src/segment.rs b/crates/static-file/types/src/segment.rs index ed001a20707c5..fe07f28a31856 100644 --- a/crates/static-file/types/src/segment.rs +++ b/crates/static-file/types/src/segment.rs @@ -3,7 +3,7 @@ use alloy_primitives::TxNumber; use derive_more::Display; use serde::{Deserialize, Serialize}; use std::{ops::RangeInclusive, str::FromStr}; -use strum::{AsRefStr, EnumIter, EnumString}; +use strum::{AsRefStr, EnumString}; #[derive( Debug, @@ -17,7 +17,6 @@ use strum::{AsRefStr, EnumIter, EnumString}; Deserialize, Serialize, EnumString, - EnumIter, AsRefStr, Display, )] @@ -34,6 +33,10 @@ pub enum StaticFileSegment { #[strum(serialize = "receipts")] /// Static File segment responsible for the `Receipts` table. Receipts, + #[strum(serialize = "blockmeta")] + /// Static File segment responsible for the `BlockBodyIndices`, `BlockOmmers`, + /// `BlockWithdrawals` tables. + BlockMeta, } impl StaticFileSegment { @@ -43,9 +46,17 @@ impl StaticFileSegment { Self::Headers => "headers", Self::Transactions => "transactions", Self::Receipts => "receipts", + Self::BlockMeta => "blockmeta", } } + /// Returns an iterator over all segments. + pub fn iter() -> impl Iterator { + // The order of segments is significant and must be maintained to ensure correctness. For + // example, Transactions require BlockBodyIndices from Blockmeta to be sound. + [Self::Headers, Self::BlockMeta, Self::Transactions, Self::Receipts].into_iter() + } + /// Returns the default configuration of the segment. pub const fn config(&self) -> SegmentConfig { SegmentConfig { compression: Compression::Lz4 } @@ -54,7 +65,7 @@ impl StaticFileSegment { /// Returns the number of columns for the segment pub const fn columns(&self) -> usize { match self { - Self::Headers => 3, + Self::Headers | Self::BlockMeta => 3, Self::Transactions | Self::Receipts => 1, } } @@ -118,16 +129,25 @@ impl StaticFileSegment { matches!(self, Self::Headers) } + /// Returns `true` if the segment is `StaticFileSegment::BlockMeta`. + pub const fn is_block_meta(&self) -> bool { + matches!(self, Self::BlockMeta) + } + /// Returns `true` if the segment is `StaticFileSegment::Receipts`. pub const fn is_receipts(&self) -> bool { matches!(self, Self::Receipts) } - /// Returns `true` if the segment is `StaticFileSegment::Receipts` or - /// `StaticFileSegment::Transactions`. + /// Returns `true` if a segment row is linked to a transaction. pub const fn is_tx_based(&self) -> bool { matches!(self, Self::Receipts | Self::Transactions) } + + /// Returns `true` if a segment row is linked to a block. + pub const fn is_block_based(&self) -> bool { + matches!(self, Self::Headers | Self::BlockMeta) + } } /// A segment header that contains information common to all segments. Used for storage. @@ -228,40 +248,32 @@ impl SegmentHeader { /// Increments tx end range depending on segment pub fn increment_tx(&mut self) { - match self.segment { - StaticFileSegment::Headers => (), - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { - if let Some(tx_range) = &mut self.tx_range { - tx_range.end += 1; - } else { - self.tx_range = Some(SegmentRangeInclusive::new(0, 0)); - } + if self.segment.is_tx_based() { + if let Some(tx_range) = &mut self.tx_range { + tx_range.end += 1; + } else { + self.tx_range = Some(SegmentRangeInclusive::new(0, 0)); } } } /// Removes `num` elements from end of tx or block range. pub fn prune(&mut self, num: u64) { - match self.segment { - StaticFileSegment::Headers => { - if let Some(range) = &mut self.block_range { - if num > range.end - range.start { - self.block_range = None; - } else { - range.end = range.end.saturating_sub(num); - } - }; - } - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { - if let Some(range) = &mut self.tx_range { - if num > range.end - range.start { - self.tx_range = None; - } else { - range.end = range.end.saturating_sub(num); - } - }; + if self.segment.is_block_based() { + if let Some(range) = &mut self.block_range { + if num > range.end - range.start { + self.block_range = None; + } else { + range.end = range.end.saturating_sub(num); + } + }; + } else if let Some(range) = &mut self.tx_range { + if num > range.end - range.start { + self.tx_range = None; + } else { + range.end = range.end.saturating_sub(num); } - }; + } } /// Sets a new `block_range`. @@ -286,10 +298,10 @@ impl SegmentHeader { /// Returns the row offset which depends on whether the segment is block or transaction based. pub fn start(&self) -> Option { - match self.segment { - StaticFileSegment::Headers => self.block_start(), - StaticFileSegment::Transactions | StaticFileSegment::Receipts => self.tx_start(), + if self.segment.is_block_based() { + return self.block_start() } + self.tx_start() } } @@ -355,7 +367,6 @@ mod tests { use super::*; use alloy_primitives::hex; use reth_nippy_jar::NippyJar; - use strum::IntoEnumIterator; #[test] fn test_filename() { diff --git a/crates/storage/db/src/static_file/masks.rs b/crates/storage/db/src/static_file/masks.rs index 17833e7ee2935..f89a0eac1d4e2 100644 --- a/crates/storage/db/src/static_file/masks.rs +++ b/crates/storage/db/src/static_file/masks.rs @@ -1,10 +1,10 @@ use crate::{ add_static_file_mask, static_file::mask::{ColumnSelectorOne, ColumnSelectorTwo}, - HeaderTerminalDifficulties, + BlockBodyIndices, BlockWithdrawals, HeaderTerminalDifficulties, }; use alloy_primitives::BlockHash; -use reth_db_api::table::Table; +use reth_db_api::{models::StoredBlockOmmers, table::Table}; // HEADER MASKS add_static_file_mask! { @@ -42,3 +42,17 @@ add_static_file_mask! { #[doc = "Mask for selecting a single transaction from Transactions static file segment"] TransactionMask, T, 0b1 } + +// BLOCK_META MASKS +add_static_file_mask! { + #[doc = "Mask for a `StoredBlockBodyIndices` from BlockMeta static file segment"] + BodyIndicesMask, ::Value, 0b001 +} +add_static_file_mask! { + #[doc = "Mask for a `StoredBlockOmmers` from BlockMeta static file segment"] + OmmersMask, StoredBlockOmmers, 0b010 +} +add_static_file_mask! { + #[doc = "Mask for a `StoredBlockWithdrawals` from BlockMeta static file segment"] + WithdrawalsMask, ::Value, 0b100 +} diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index d4a7bbf345400..0ff9ed20ac173 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -7,18 +7,21 @@ use crate::{ TransactionsProvider, }; use alloy_consensus::transaction::TransactionMeta; -use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber}; +use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals, BlockHashOrNumber}; use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash, TxNumber, B256, U256}; use reth_chainspec::ChainInfo; use reth_db::{ + models::StoredBlockBodyIndices, static_file::{ - BlockHashMask, HeaderMask, HeaderWithHashMask, ReceiptMask, StaticFileCursor, - TDWithHashMask, TotalDifficultyMask, TransactionMask, + BlockHashMask, BodyIndicesMask, HeaderMask, HeaderWithHashMask, OmmersMask, ReceiptMask, + StaticFileCursor, TDWithHashMask, TotalDifficultyMask, TransactionMask, WithdrawalsMask, }, table::{Decompress, Value}, }; -use reth_node_types::NodePrimitives; -use reth_primitives_traits::{SealedHeader, SignedTransaction}; +use reth_node_types::{FullNodePrimitives, NodePrimitives}; +use reth_primitives::SealedHeader; +use reth_primitives_traits::SignedTransaction; +use reth_storage_api::{BlockBodyIndicesProvider, OmmersProvider, WithdrawalsProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ fmt::Debug, @@ -351,3 +354,36 @@ impl WithdrawalsProvider for StaticFileJarProvider<'_, N> { + fn withdrawals_by_block( + &self, + id: BlockHashOrNumber, + _: u64, + ) -> ProviderResult> { + if let Some(num) = id.as_number() { + return Ok(self.cursor()?.get_one::(num.into())?.map(|s| s.withdrawals)) + } + // Only accepts block number queries + Err(ProviderError::UnsupportedProvider) + } +} + +impl> OmmersProvider for StaticFileJarProvider<'_, N> { + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + if let Some(num) = id.as_number() { + return Ok(self + .cursor()? + .get_one::>(num.into())? + .map(|s| s.ommers)) + } + // Only accepts block number queries + Err(ProviderError::UnsupportedProvider) + } +} + +impl BlockBodyIndicesProvider for StaticFileJarProvider<'_, N> { + fn block_body_indices(&self, num: u64) -> ProviderResult> { + self.cursor()?.get_one::(num.into()) + } +} diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index e81c42284d412..f501d64d435c3 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -48,7 +48,6 @@ use std::{ path::{Path, PathBuf}, sync::{mpsc, Arc}, }; -use strum::IntoEnumIterator; use tracing::{info, trace, warn}; /// Alias type for a map that can be queried for block ranges from a transaction @@ -682,6 +681,11 @@ impl StaticFileProvider { }; for segment in StaticFileSegment::iter() { + // Not integrated yet + if segment.is_block_meta() { + continue + } + if has_receipt_pruning && segment.is_receipts() { // Pruned nodes (including full node) do not store receipts as static files. continue @@ -776,6 +780,13 @@ impl StaticFileProvider { highest_tx, highest_block, )?, + StaticFileSegment::BlockMeta => self + .ensure_invariants::<_, tables::BlockBodyIndices>( + provider, + segment, + highest_block, + highest_block, + )?, } { update_unwind_target(unwind); } @@ -825,41 +836,46 @@ impl StaticFileProvider { where Provider: DBProvider + BlockReader + StageCheckpointReader, { - let highest_static_file_entry = highest_static_file_entry.unwrap_or_default(); - let highest_static_file_block = highest_static_file_block.unwrap_or_default(); let mut db_cursor = provider.tx_ref().cursor_read::()?; if let Some((db_first_entry, _)) = db_cursor.first()? { - // If there is a gap between the entry found in static file and - // database, then we have most likely lost static file data and need to unwind so we can - // load it again - if !(db_first_entry <= highest_static_file_entry || - highest_static_file_entry + 1 == db_first_entry) + if let (Some(highest_entry), Some(highest_block)) = + (highest_static_file_entry, highest_static_file_block) { - info!( - target: "reth::providers::static_file", - ?db_first_entry, - ?highest_static_file_entry, - unwind_target = highest_static_file_block, - ?segment, - "Setting unwind target." - ); - return Ok(Some(highest_static_file_block)) + // If there is a gap between the entry found in static file and + // database, then we have most likely lost static file data and need to unwind so we + // can load it again + if !(db_first_entry <= highest_entry || highest_entry + 1 == db_first_entry) { + info!( + target: "reth::providers::static_file", + ?db_first_entry, + ?highest_entry, + unwind_target = highest_block, + ?segment, + "Setting unwind target." + ); + return Ok(Some(highest_block)) + } } if let Some((db_last_entry, _)) = db_cursor.last()? { - if db_last_entry > highest_static_file_entry { + if highest_static_file_entry + .is_none_or(|highest_entry| db_last_entry > highest_entry) + { return Ok(None) } } } + let highest_static_file_entry = highest_static_file_entry.unwrap_or_default(); + let highest_static_file_block = highest_static_file_block.unwrap_or_default(); + // If static file entry is ahead of the database entries, then ensure the checkpoint block // number matches. let checkpoint_block_number = provider .get_stage_checkpoint(match segment { StaticFileSegment::Headers => StageId::Headers, - StaticFileSegment::Transactions => StageId::Bodies, + StaticFileSegment::Transactions | StaticFileSegment::BlockMeta => StageId::Bodies, StaticFileSegment::Receipts => StageId::Execution, })? .unwrap_or_default() @@ -890,8 +906,11 @@ impl StaticFileProvider { ); let mut writer = self.latest_writer(segment)?; if segment.is_headers() { + // TODO(joshie): is_block_meta writer.prune_headers(highest_static_file_block - checkpoint_block_number)?; } else if let Some(block) = provider.block_body_indices(checkpoint_block_number)? { + // todo joshie: is querying block_body_indices a potential issue once bbi is moved + // to sf as well let number = highest_static_file_entry - block.last_tx_num(); if segment.is_receipts() { writer.prune_receipts(number, checkpoint_block_number)?; @@ -928,6 +947,7 @@ impl StaticFileProvider { headers: self.get_highest_static_file_block(StaticFileSegment::Headers), receipts: self.get_highest_static_file_block(StaticFileSegment::Receipts), transactions: self.get_highest_static_file_block(StaticFileSegment::Transactions), + block_meta: self.get_highest_static_file_block(StaticFileSegment::BlockMeta), } } @@ -970,11 +990,10 @@ impl StaticFileProvider { F: FnMut(&mut StaticFileCursor<'_>, u64) -> ProviderResult>, P: FnMut(&T) -> bool, { - let get_provider = |start: u64| match segment { - StaticFileSegment::Headers => { + let get_provider = |start: u64| { + if segment.is_block_based() { self.get_segment_provider_from_block(segment, start, None) - } - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { + } else { self.get_segment_provider_from_transaction(segment, start, None) } }; @@ -1046,11 +1065,10 @@ impl StaticFileProvider { F: Fn(&mut StaticFileCursor<'_>, u64) -> ProviderResult> + 'a, T: std::fmt::Debug, { - let get_provider = move |start: u64| match segment { - StaticFileSegment::Headers => { + let get_provider = move |start: u64| { + if segment.is_block_based() { self.get_segment_provider_from_block(segment, start, None) - } - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { + } else { self.get_segment_provider_from_transaction(segment, start, None) } }; @@ -1098,11 +1116,10 @@ impl StaticFileProvider { FD: Fn() -> ProviderResult>, { // If there is, check the maximum block or transaction number of the segment. - let static_file_upper_bound = match segment { - StaticFileSegment::Headers => self.get_highest_static_file_block(segment), - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { - self.get_highest_static_file_tx(segment) - } + let static_file_upper_bound = if segment.is_block_based() { + self.get_highest_static_file_block(segment) + } else { + self.get_highest_static_file_tx(segment) }; if static_file_upper_bound @@ -1140,11 +1157,10 @@ impl StaticFileProvider { let mut data = Vec::new(); // If there is, check the maximum block or transaction number of the segment. - if let Some(static_file_upper_bound) = match segment { - StaticFileSegment::Headers => self.get_highest_static_file_block(segment), - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { - self.get_highest_static_file_tx(segment) - } + if let Some(static_file_upper_bound) = if segment.is_block_based() { + self.get_highest_static_file_block(segment) + } else { + self.get_highest_static_file_tx(segment) } { if block_or_tx_range.start <= static_file_upper_bound { let end = block_or_tx_range.end.min(static_file_upper_bound + 1); @@ -1665,25 +1681,56 @@ impl> impl WithdrawalsProvider for StaticFileProvider { fn withdrawals_by_block( &self, - _id: BlockHashOrNumber, - _timestamp: u64, + id: BlockHashOrNumber, + timestamp: u64, ) -> ProviderResult> { - // Required data not present in static_files + if let Some(num) = id.as_number() { + return self + .get_segment_provider_from_block(StaticFileSegment::BlockMeta, num, None) + .and_then(|provider| provider.withdrawals_by_block(id, timestamp)) + .or_else(|err| { + if let ProviderError::MissingStaticFileBlock(_, _) = err { + Ok(None) + } else { + Err(err) + } + }) + } + // Only accepts block number queries Err(ProviderError::UnsupportedProvider) } } impl> OmmersProvider for StaticFileProvider { - fn ommers(&self, _id: BlockHashOrNumber) -> ProviderResult>> { - // Required data not present in static_files + fn ommers(&self, id: BlockHashOrNumber) -> ProviderResult>> { + if let Some(num) = id.as_number() { + return self + .get_segment_provider_from_block(StaticFileSegment::BlockMeta, num, None) + .and_then(|provider| provider.ommers(id)) + .or_else(|err| { + if let ProviderError::MissingStaticFileBlock(_, _) = err { + Ok(None) + } else { + Err(err) + } + }) + } + // Only accepts block number queries Err(ProviderError::UnsupportedProvider) } } -impl BlockBodyIndicesProvider for StaticFileProvider { - fn block_body_indices(&self, _num: u64) -> ProviderResult> { - // Required data not present in static_files - Err(ProviderError::UnsupportedProvider) +impl BlockBodyIndicesProvider for StaticFileProvider { + fn block_body_indices(&self, num: u64) -> ProviderResult> { + self.get_segment_provider_from_block(StaticFileSegment::BlockMeta, num, None) + .and_then(|provider| provider.block_body_indices(num)) + .or_else(|err| { + if let ProviderError::MissingStaticFileBlock(_, _) = err { + Ok(None) + } else { + Err(err) + } + }) } } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 3939e8b89456d..c558f43c7e585 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -6,6 +6,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber, TxNumber, U256}; use parking_lot::{lock_api::RwLockWriteGuard, RawRwLock, RwLock}; use reth_codecs::Compact; +use reth_db::models::{StoredBlockBodyIndices, StoredBlockOmmers, StoredBlockWithdrawals}; use reth_db_api::models::CompactU256; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_node_types::NodePrimitives; @@ -32,6 +33,7 @@ pub(crate) struct StaticFileWriters { headers: RwLock>>, transactions: RwLock>>, receipts: RwLock>>, + block_meta: RwLock>>, } impl Default for StaticFileWriters { @@ -40,6 +42,7 @@ impl Default for StaticFileWriters { headers: Default::default(), transactions: Default::default(), receipts: Default::default(), + block_meta: Default::default(), } } } @@ -54,6 +57,7 @@ impl StaticFileWriters { StaticFileSegment::Headers => self.headers.write(), StaticFileSegment::Transactions => self.transactions.write(), StaticFileSegment::Receipts => self.receipts.write(), + StaticFileSegment::BlockMeta => self.block_meta.write(), }; if write_guard.is_none() { @@ -230,6 +234,7 @@ impl StaticFileProviderRW { StaticFileSegment::Receipts => { self.prune_receipt_data(to_delete, last_block_number.expect("should exist"))? } + StaticFileSegment::BlockMeta => todo!(), } } @@ -393,13 +398,10 @@ impl StaticFileProviderRW { let mut remaining_rows = num_rows; let segment = self.writer.user_header().segment(); while remaining_rows > 0 { - let len = match segment { - StaticFileSegment::Headers => { - self.writer.user_header().block_len().unwrap_or_default() - } - StaticFileSegment::Transactions | StaticFileSegment::Receipts => { - self.writer.user_header().tx_len().unwrap_or_default() - } + let len = if segment.is_block_based() { + self.writer.user_header().block_len().unwrap_or_default() + } else { + self.writer.user_header().tx_len().unwrap_or_default() }; if remaining_rows >= len { @@ -555,6 +557,61 @@ impl StaticFileProviderRW { Ok(()) } + /// Appends [`StoredBlockBodyIndices`], [`StoredBlockOmmers`] and [`StoredBlockWithdrawals`] to + /// static file. + /// + /// It **CALLS** `increment_block()` since it's a block based segment. + pub fn append_eth_block_meta( + &mut self, + body_indices: &StoredBlockBodyIndices, + ommers: &StoredBlockOmmers, + withdrawals: &StoredBlockWithdrawals, + expected_block_number: BlockNumber, + ) -> ProviderResult<()> + where + N::BlockHeader: Compact, + { + self.append_block_meta(body_indices, ommers, withdrawals, expected_block_number) + } + + /// Appends [`StoredBlockBodyIndices`] and any other two arbitrary types belonging to the block + /// body to static file. + /// + /// It **CALLS** `increment_block()` since it's a block based segment. + pub fn append_block_meta( + &mut self, + body_indices: &StoredBlockBodyIndices, + field1: &F1, + field2: &F2, + expected_block_number: BlockNumber, + ) -> ProviderResult<()> + where + N::BlockHeader: Compact, + F1: Compact, + F2: Compact, + { + let start = Instant::now(); + self.ensure_no_queued_prune()?; + + debug_assert!(self.writer.user_header().segment() == StaticFileSegment::BlockMeta); + + self.increment_block(expected_block_number)?; + + self.append_column(body_indices)?; + self.append_column(field1)?; + self.append_column(field2)?; + + if let Some(metrics) = &self.metrics { + metrics.record_segment_operation( + StaticFileSegment::BlockMeta, + StaticFileProviderOperation::Append, + Some(start.elapsed()), + ); + } + + Ok(()) + } + /// Appends transaction to static file. /// /// It **DOES NOT CALL** `increment_block()`, it should be handled elsewhere. There might be @@ -682,6 +739,12 @@ impl StaticFileProviderRW { self.queue_prune(to_delete, None) } + /// Adds an instruction to prune `to_delete` bloc_ meta rows during commit. + pub fn prune_block_meta(&mut self, to_delete: u64) -> ProviderResult<()> { + debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::BlockMeta); + self.queue_prune(to_delete, None) + } + /// Adds an instruction to prune `to_delete` elements during commit. /// /// Note: `last_block` refers to the block the unwinds ends at if dealing with transaction-based From 83b2fb9b413b7386d77414e573a8874fb9d83151 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Jan 2025 02:12:43 +0100 Subject: [PATCH 072/113] feat: add SealedBlock in reth-primitives-traits (#13735) --- Cargo.lock | 461 +++++----- bin/reth-bench/src/bench/new_payload_fcu.rs | 21 +- bin/reth-bench/src/bench/new_payload_only.rs | 4 +- .../src/commands/debug_cmd/build_block.rs | 19 +- bin/reth/src/commands/debug_cmd/execution.rs | 6 +- .../commands/debug_cmd/in_memory_merkle.rs | 45 +- bin/reth/src/commands/debug_cmd/merkle.rs | 12 +- book/sources/exex/tracking-state/src/bin/2.rs | 10 +- crates/chain-state/Cargo.toml | 1 + crates/chain-state/src/in_memory.rs | 59 +- crates/chain-state/src/notifications.rs | 40 +- crates/chain-state/src/test_utils.rs | 26 +- crates/cli/commands/src/import.rs | 4 +- .../commands/src/init_state/without_evm.rs | 14 +- crates/consensus/common/Cargo.toml | 6 +- crates/consensus/common/src/validation.rs | 28 +- crates/consensus/consensus/src/lib.rs | 28 +- crates/consensus/consensus/src/noop.rs | 13 +- crates/consensus/consensus/src/test_utils.rs | 13 +- crates/e2e-test-utils/src/node.rs | 4 +- .../engine/invalid-block-hooks/src/witness.rs | 16 +- crates/engine/primitives/src/event.rs | 6 +- .../primitives/src/invalid_block_hook.rs | 8 +- crates/engine/primitives/src/lib.rs | 6 +- crates/engine/service/src/service.rs | 11 +- crates/engine/tree/src/backfill.rs | 2 +- crates/engine/tree/src/download.rs | 41 +- crates/engine/tree/src/engine.rs | 4 +- crates/engine/tree/src/test_utils.rs | 2 +- crates/engine/tree/src/tree/block_buffer.rs | 31 +- crates/engine/tree/src/tree/error.rs | 20 +- .../tree/src/tree/invalid_block_hook.rs | 6 +- .../engine/tree/src/tree/invalid_headers.rs | 2 +- crates/engine/tree/src/tree/mod.rs | 128 ++- crates/engine/util/Cargo.toml | 1 + crates/engine/util/src/reorg.rs | 4 +- crates/ethereum/consensus/src/lib.rs | 21 +- crates/ethereum/consensus/src/validation.rs | 4 +- crates/ethereum/engine-primitives/src/lib.rs | 4 +- crates/ethereum/evm/src/execute.rs | 68 +- crates/ethereum/node/tests/e2e/dev.rs | 4 +- crates/ethereum/payload/src/lib.rs | 6 +- crates/evm/execution-types/src/chain.rs | 157 ++-- crates/evm/src/execute.rs | 31 +- crates/evm/src/metrics.rs | 18 +- crates/evm/src/noop.rs | 6 +- crates/evm/src/state_change.rs | 3 +- crates/evm/src/test_utils.rs | 6 +- crates/exex/exex/src/backfill/job.rs | 28 +- crates/exex/exex/src/backfill/stream.rs | 7 +- crates/exex/exex/src/backfill/test_utils.rs | 29 +- crates/exex/exex/src/manager.rs | 36 +- crates/exex/exex/src/notifications.rs | 37 +- crates/exex/exex/src/wal/mod.rs | 21 +- crates/exex/exex/src/wal/storage.rs | 15 +- crates/exex/test-utils/Cargo.toml | 1 + crates/exex/test-utils/src/lib.rs | 15 +- crates/exex/types/src/notification.rs | 14 +- crates/net/downloaders/src/bodies/bodies.rs | 159 ++-- crates/net/downloaders/src/bodies/noop.rs | 17 +- crates/net/downloaders/src/bodies/queue.rs | 26 +- crates/net/downloaders/src/bodies/request.rs | 46 +- crates/net/downloaders/src/bodies/task.rs | 44 +- .../net/downloaders/src/bodies/test_utils.rs | 17 +- crates/net/downloaders/src/file_client.rs | 39 +- crates/net/downloaders/src/headers/noop.rs | 7 +- .../src/headers/reverse_headers.rs | 11 +- crates/net/downloaders/src/headers/task.rs | 9 +- .../net/downloaders/src/headers/test_utils.rs | 2 +- crates/net/network/src/fetch/client.rs | 5 + crates/net/p2p/src/bodies/downloader.rs | 14 +- crates/net/p2p/src/bodies/response.rs | 31 +- crates/net/p2p/src/full_block.rs | 39 +- crates/net/p2p/src/headers/downloader.rs | 7 +- crates/net/p2p/src/headers/error.rs | 3 +- crates/net/p2p/src/lib.rs | 25 +- crates/net/p2p/src/test_utils/full_block.rs | 7 +- crates/net/p2p/src/test_utils/headers.rs | 12 +- crates/node/builder/src/components/builder.rs | 9 +- crates/node/builder/src/components/mod.rs | 14 +- crates/node/builder/src/setup.rs | 11 +- crates/node/core/src/utils.rs | 17 +- crates/optimism/chainspec/src/lib.rs | 14 +- crates/optimism/consensus/Cargo.toml | 26 +- crates/optimism/consensus/src/lib.rs | 58 +- crates/optimism/evm/src/execute.rs | 33 +- crates/optimism/evm/src/lib.rs | 14 +- crates/optimism/node/src/engine.rs | 6 +- crates/optimism/node/src/txpool.rs | 5 +- crates/optimism/payload/Cargo.toml | 1 + crates/optimism/payload/src/builder.rs | 4 +- crates/optimism/payload/src/payload.rs | 12 +- crates/optimism/rpc/src/eth/pending_block.rs | 7 +- crates/payload/basic/src/stack.rs | 4 +- crates/payload/builder/src/lib.rs | 4 +- crates/payload/builder/src/test_utils.rs | 3 +- crates/payload/primitives/src/traits.rs | 4 +- crates/payload/validator/src/lib.rs | 10 +- crates/primitives-traits/src/block/body.rs | 35 +- crates/primitives-traits/src/block/error.rs | 33 + crates/primitives-traits/src/block/mod.rs | 125 ++- .../primitives-traits/src/block/recovered.rs | 598 ++++++++++++ crates/primitives-traits/src/block/sealed.rs | 462 ++++++++++ crates/primitives-traits/src/header/sealed.rs | 124 ++- crates/primitives-traits/src/lib.rs | 30 +- crates/primitives-traits/src/node.rs | 7 +- .../src/serde_bincode_compat.rs | 81 +- .../src/transaction/signed.rs | 20 + crates/primitives/src/alloy_compat.rs | 40 +- crates/primitives/src/block.rs | 867 +----------------- crates/primitives/src/lib.rs | 18 +- crates/primitives/src/traits.rs | 85 -- crates/prune/prune/src/segments/mod.rs | 8 +- crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-builder/tests/it/auth.rs | 6 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 16 +- crates/rpc/rpc-engine-api/tests/it/payload.rs | 10 +- crates/rpc/rpc-eth-api/src/helpers/block.rs | 35 +- crates/rpc/rpc-eth-api/src/helpers/fee.rs | 6 +- .../rpc-eth-api/src/helpers/pending_block.rs | 11 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 20 +- .../rpc-eth-api/src/helpers/transaction.rs | 6 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 73 +- crates/rpc/rpc-eth-types/src/fee_history.rs | 36 +- crates/rpc/rpc-eth-types/src/logs_utils.rs | 6 +- crates/rpc/rpc-eth-types/src/pending_block.rs | 8 +- crates/rpc/rpc-eth-types/src/simulate.rs | 6 +- crates/rpc/rpc-types-compat/src/block.rs | 82 +- .../rpc-types-compat/src/engine/payload.rs | 28 +- crates/rpc/rpc/src/debug.rs | 8 +- crates/rpc/rpc/src/eth/filter.rs | 4 +- crates/rpc/rpc/src/validation.rs | 17 +- crates/stages/stages/benches/setup/mod.rs | 11 +- crates/stages/stages/src/lib.rs | 2 +- crates/stages/stages/src/sets.rs | 13 +- crates/stages/stages/src/stages/bodies.rs | 17 +- crates/stages/stages/src/stages/execution.rs | 55 +- .../stages/src/stages/hashing_account.rs | 4 +- crates/stages/stages/src/stages/headers.rs | 15 +- crates/stages/stages/src/stages/merkle.rs | 19 +- crates/stages/stages/src/stages/mod.rs | 12 +- .../stages/stages/src/test_utils/test_db.rs | 2 +- crates/storage/db/Cargo.toml | 1 + .../src/providers/blockchain_provider.rs | 99 +- .../provider/src/providers/consistent.rs | 68 +- .../provider/src/providers/database/mod.rs | 38 +- .../src/providers/database/provider.rs | 51 +- .../src/providers/static_file/manager.rs | 19 +- .../provider/src/providers/static_file/mod.rs | 2 +- .../storage/provider/src/test_utils/blocks.rs | 68 +- .../storage/provider/src/test_utils/mock.rs | 22 +- crates/storage/provider/src/traits/block.rs | 8 +- .../provider/src/traits/header_sync_gap.rs | 4 +- crates/storage/storage-api/src/block.rs | 58 +- crates/storage/storage-api/src/noop.rs | 18 +- .../transaction-pool/src/blobstore/tracker.rs | 11 +- crates/transaction-pool/src/lib.rs | 7 +- crates/transaction-pool/src/maintain.rs | 15 +- crates/transaction-pool/src/pool/mod.rs | 7 +- crates/transaction-pool/src/traits.rs | 21 +- crates/transaction-pool/src/validate/eth.rs | 7 +- crates/transaction-pool/src/validate/mod.rs | 12 +- crates/transaction-pool/src/validate/task.rs | 7 +- .../src/mined_sidecar.rs | 22 +- .../custom-beacon-withdrawals/src/main.rs | 18 +- examples/custom-dev-node/src/main.rs | 2 +- examples/custom-engine-types/src/main.rs | 6 +- .../custom-payload-builder/src/generator.rs | 3 +- examples/db-access/src/main.rs | 6 +- testing/ef-tests/src/cases/blockchain_test.rs | 11 +- testing/testing-utils/src/generators.rs | 15 +- 171 files changed, 3231 insertions(+), 2866 deletions(-) create mode 100644 crates/primitives-traits/src/block/error.rs create mode 100644 crates/primitives-traits/src/block/recovered.rs create mode 100644 crates/primitives-traits/src/block/sealed.rs delete mode 100644 crates/primitives/src/traits.rs diff --git a/Cargo.lock b/Cargo.lock index 8cae6fe6a8c68..7f095efdb8779 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.55" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e39f295f876b61a1222d937e1dd31f965e4a1acc3bba98e448dd7e84b1a4566" +checksum = "da226340862e036ab26336dc99ca85311c6b662267c1440e1733890fd688802c" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -170,7 +170,7 @@ dependencies = [ "alloy-transport", "futures", "futures-util", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -203,7 +203,7 @@ dependencies = [ "crc", "rand 0.8.5", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -291,7 +291,7 @@ dependencies = [ "alloy-sol-types", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tracing", ] @@ -317,7 +317,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -345,7 +345,7 @@ dependencies = [ "rand 0.8.5", "serde_json", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.9", "tracing", "url", ] @@ -415,7 +415,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", "url", @@ -460,7 +460,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -549,7 +549,7 @@ dependencies = [ "ethereum_ssz_derive", "serde", "serde_with", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -602,7 +602,7 @@ dependencies = [ "jsonrpsee-types", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -630,7 +630,7 @@ dependencies = [ "alloy-serde", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -668,7 +668,7 @@ dependencies = [ "auto_impl", "elliptic-curve", "k256", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -686,7 +686,7 @@ dependencies = [ "coins-bip39", "k256", "rand 0.8.5", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -700,7 +700,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -716,7 +716,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", "syn-solidity", "tiny-keccak", ] @@ -732,7 +732,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", "syn-solidity", ] @@ -771,7 +771,7 @@ dependencies = [ "futures-utils-wasm", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tower 0.5.2", "tracing", @@ -938,7 +938,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -1136,7 +1136,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -1172,7 +1172,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -1183,7 +1183,7 @@ checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -1221,7 +1221,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -1318,7 +1318,7 @@ version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -1327,7 +1327,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -1353,9 +1353,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.7.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be3f42a67d6d345ecd59f675f3f012d6974981560836e938c22b424b85ce1be" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" dependencies = [ "arbitrary", "serde", @@ -1419,7 +1419,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c340fe0f0b267787095cbe35240c6786ff19da63ec7b69367ba338eace8169b" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "boa_interner", "boa_macros", "boa_string", @@ -1435,7 +1435,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f620c3f06f51e65c0504ddf04978be1b814ac6586f0b45f6019801ab5efd37f9" dependencies = [ "arrayvec", - "bitflags 2.7.0", + "bitflags 2.6.0", "boa_ast", "boa_gc", "boa_interner", @@ -1469,7 +1469,7 @@ dependencies = [ "static_assertions", "tap", "thin-vec", - "thiserror 2.0.11", + "thiserror 2.0.9", "time", ] @@ -1510,7 +1510,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", "synstructure", ] @@ -1520,7 +1520,7 @@ version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cc142dac798cdc6e2dbccfddeb50f36d2523bb977a976e19bdb3ae19b740804" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "boa_ast", "boa_interner", "boa_macros", @@ -1632,7 +1632,7 @@ checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -1720,9 +1720,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.8" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0cf6e91fde44c773c6ee7ec6bba798504641a8bc2eb7e37a04ffbf4dfaa55a" +checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" dependencies = [ "jobserver", "libc", @@ -1821,9 +1821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.26" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" +checksum = "9560b07a799281c7e0958b9296854d6fafd4c5f31444a7e5bb1ad6dde5ccf1bd" dependencies = [ "clap_builder", "clap_derive", @@ -1831,9 +1831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.26" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" +checksum = "874e0dd3eb68bf99058751ac9712f622e61e6f393a94f7128fa26e3f02f5c7cd" dependencies = [ "anstream", "anstyle", @@ -1850,7 +1850,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2214,7 +2214,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "crossterm_winapi", "mio 1.0.3", "parking_lot", @@ -2326,7 +2326,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2350,7 +2350,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2361,7 +2361,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2470,7 +2470,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2491,7 +2491,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", "unicode-xid", ] @@ -2605,7 +2605,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2686,7 +2686,7 @@ dependencies = [ "revm", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "walkdir", ] @@ -2751,7 +2751,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2762,7 +2762,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2782,7 +2782,7 @@ checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2838,7 +2838,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -2862,7 +2862,7 @@ dependencies = [ "reth-node-ethereum", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -2951,7 +2951,7 @@ dependencies = [ "reth-tracing", "reth-trie-db", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", ] @@ -3408,7 +3408,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -3719,7 +3719,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", "tinyvec", "tokio", "tracing", @@ -3743,7 +3743,7 @@ dependencies = [ "resolv-conf", "serde", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -4090,7 +4090,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -4147,7 +4147,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -4260,15 +4260,15 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.7" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf9fed6d91cfb734e7476a06bde8300a1b94e217e1b523b6f0cd1a01998c71d" +checksum = "894813a444908c0c8c0e221b041771d107c4a21de1d317dc49bcc66e3c9e5b3f" dependencies = [ "darling", "indoc", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -4517,7 +4517,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -4719,7 +4719,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "libc", "redox_syscall", ] @@ -4790,9 +4790,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.15" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" @@ -4854,32 +4854,6 @@ dependencies = [ "libc", ] -[[package]] -name = "maili-protocol" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428caa534dd054a449e64d8007d0fd0a15519d1033b272d37d02b74a29cf69f7" -dependencies = [ - "alloc-no-stdlib", - "alloy-consensus", - "alloy-eips", - "alloy-primitives", - "alloy-rlp", - "alloy-serde", - "alloy-sol-types", - "async-trait", - "brotli", - "derive_more", - "miniz_oxide", - "op-alloy-consensus", - "op-alloy-genesis", - "rand 0.8.5", - "serde", - "thiserror 2.0.11", - "tracing", - "unsigned-varint", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -4938,7 +4912,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -5177,7 +5151,7 @@ version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "filetime", "fsevent-sys", "inotify", @@ -5328,7 +5302,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -5381,9 +5355,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442518bf0ef88f4d79409527565b8cdee235c891f2e2a829497caec5ed9d8d1c" +checksum = "250244eadaf1a25e0e2ad263110ad2d1b43c2e57ddf4c025e71552d98196a8d3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5394,14 +5368,14 @@ dependencies = [ "derive_more", "serde", "serde_with", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] name = "op-alloy-genesis" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2af7fee1fa297569199b524493e50355eab3f1bff75cef492036eb4a3ffb5e" +checksum = "98334a9cdccc5878e9d5c48afc9cc1b84da58dbc68d41f9488d8f71688b495d3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5409,14 +5383,14 @@ dependencies = [ "alloy-sol-types", "serde", "serde_repr", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] name = "op-alloy-network" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e7e9fc656dfa8cc3b6e799da23e100b3d47e31ec6b5a4ed9d44e11f0967ad8" +checksum = "1dd588157ac14db601d6497b81ae738b2581c60886fc592976fab6c282619604" dependencies = [ "alloy-consensus", "alloy-network", @@ -5429,32 +5403,47 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a144b1ed079913b11c0640f4eaa3d2ac1bdb6cc35e3658a1640e88b241e0c32" +checksum = "753762429c31f838b59c886b31456c9bf02fd38fb890621665523a9087ae06ae" dependencies = [ - "maili-protocol", + "alloc-no-stdlib", + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "async-trait", + "brotli", + "derive_more", + "miniz_oxide", + "op-alloy-consensus", + "op-alloy-genesis", + "serde", + "thiserror 2.0.9", + "tracing", + "unsigned-varint", ] [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ff030fa1051bb38a0b526727aec511c0172d6f074a0d63cfedf522b11cc8b09" +checksum = "1f483fb052ef807682ae5b5729c3a61a092ee4f7334e6e6055de67e9f28ef880" dependencies = [ "alloy-eips", "alloy-primitives", "jsonrpsee", - "op-alloy-protocol", "op-alloy-rpc-types", "op-alloy-rpc-types-engine", ] [[package]] name = "op-alloy-rpc-types" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50223d61cad040db6721bcc2d489c924c1691ce3f5e674d4d8776131dab786a0" +checksum = "37b1d3872021aa28b10fc6cf8252e792e802d89e8b2cdaa57dcb9243c461b286" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5471,9 +5460,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.9.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e2419373bae23ea3f6cf5a49c624d9b644061e2e929d4f9629cbcbffa4964d" +checksum = "c43f00d4060a6a38f5bf0a8182b4cc4c7071e2bc96942f414619251b522169eb" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5483,7 +5472,7 @@ dependencies = [ "op-alloy-consensus", "op-alloy-protocol", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -5657,7 +5646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.11", + "thiserror 2.0.9", "ucd-trie", ] @@ -5701,7 +5690,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -5730,7 +5719,7 @@ checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -5877,7 +5866,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "483f8c21f64f3ea09fe0f30f5d48c3e8eefe5dac9129f0075f76593b4c1da705" dependencies = [ "proc-macro2", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -5928,14 +5917,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -5946,7 +5935,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "chrono", "flate2", "hex", @@ -5961,7 +5950,7 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc5b72d8145275d844d4b5f6d4e1eef00c8cd889edb6035c21675d1bb1f45c9f" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "hex", "procfs-core 0.17.0", "rustix", @@ -5973,7 +5962,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "chrono", "hex", ] @@ -5984,7 +5973,7 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "hex", ] @@ -5996,7 +5985,7 @@ checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.7.0", + "bitflags 2.6.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -6026,7 +6015,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -6081,7 +6070,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -6100,7 +6089,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.11", + "thiserror 2.0.9", "tinyvec", "tracing", "web-time", @@ -6231,7 +6220,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdef7f9be5c0122f890d58bdf4d964349ba6a6161f705907526d891efabba57d" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "cassowary", "compact_str", "crossterm", @@ -6252,7 +6241,7 @@ version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", ] [[package]] @@ -6287,7 +6276,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", ] [[package]] @@ -6543,7 +6532,7 @@ dependencies = [ "reth-rpc-types-compat", "reth-tracing", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tower 0.4.13", "tracing", @@ -6701,7 +6690,7 @@ dependencies = [ "secp256k1", "serde", "snmalloc-rs", - "thiserror 2.0.11", + "thiserror 2.0.9", "tikv-jemallocator", "tracy-client", ] @@ -6737,7 +6726,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -6843,7 +6832,7 @@ dependencies = [ "sysinfo", "tempfile", "test-fuzz", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -6901,7 +6890,7 @@ dependencies = [ "reth-trie-db", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tracing", ] @@ -6943,7 +6932,7 @@ dependencies = [ "schnellru", "secp256k1", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tracing", @@ -6968,7 +6957,7 @@ dependencies = [ "reth-network-peers", "reth-tracing", "secp256k1", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -6995,7 +6984,7 @@ dependencies = [ "secp256k1", "serde", "serde_with", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tracing", @@ -7033,7 +7022,7 @@ dependencies = [ "reth-testing-utils", "reth-tracing", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tokio-util", @@ -7109,7 +7098,7 @@ dependencies = [ "secp256k1", "sha2 0.10.8", "sha3", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tokio-util", @@ -7163,7 +7152,7 @@ dependencies = [ "reth-primitives-traits", "reth-trie", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", ] @@ -7190,7 +7179,7 @@ dependencies = [ "reth-prune", "reth-stages-api", "reth-tasks", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", ] @@ -7249,7 +7238,7 @@ dependencies = [ "reth-trie-sparse", "revm-primitives", "schnellru", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -7274,6 +7263,7 @@ dependencies = [ "reth-fs-util", "reth-payload-validator", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-types-compat", @@ -7294,7 +7284,7 @@ dependencies = [ "reth-execution-errors", "reth-fs-util", "reth-storage-errors", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -7327,7 +7317,7 @@ dependencies = [ "serde", "snap", "test-fuzz", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tokio-util", @@ -7356,7 +7346,7 @@ dependencies = [ "reth-primitives", "reth-primitives-traits", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -7543,7 +7533,7 @@ dependencies = [ "reth-prune-types", "reth-storage-errors", "revm-primitives", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -7632,12 +7622,13 @@ dependencies = [ "reth-node-ethereum", "reth-payload-builder", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-tasks", "reth-transaction-pool", "reth-trie-db", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", ] @@ -7664,7 +7655,7 @@ version = "1.1.5" dependencies = [ "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -7707,7 +7698,7 @@ dependencies = [ "rand 0.8.5", "reth-tracing", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tokio-util", @@ -7719,7 +7710,7 @@ dependencies = [ name = "reth-libmdbx" version = "1.1.5" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "byteorder", "codspeed-criterion-compat", "dashmap", @@ -7732,7 +7723,7 @@ dependencies = [ "reth-mdbx-sys", "smallvec", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.9", "tracing", ] @@ -7771,7 +7762,7 @@ dependencies = [ "reqwest", "reth-tracing", "serde_with", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -7831,7 +7822,7 @@ dependencies = [ "serial_test", "smallvec", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tokio-util", @@ -7856,7 +7847,7 @@ dependencies = [ "reth-network-types", "reth-tokio-util", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", ] @@ -7894,7 +7885,7 @@ dependencies = [ "secp256k1", "serde_json", "serde_with", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "url", ] @@ -7925,7 +7916,7 @@ dependencies = [ "reth-fs-util", "serde", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.9", "tracing", "zstd", ] @@ -8055,7 +8046,7 @@ dependencies = [ "serde", "shellexpand", "strum", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "toml", "tracing", @@ -8187,7 +8178,7 @@ dependencies = [ "reth-optimism-forks", "reth-primitives-traits", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -8284,7 +8275,7 @@ dependencies = [ "reth-revm", "revm", "revm-primitives", - "thiserror 2.0.11", + "thiserror 2.0.9", "tracing", ] @@ -8384,13 +8375,14 @@ dependencies = [ "reth-payload-primitives", "reth-payload-util", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-revm", "reth-rpc-types-compat", "reth-transaction-pool", "revm", "sha2 0.10.8", - "thiserror 2.0.11", + "thiserror 2.0.9", "tracing", ] @@ -8461,7 +8453,7 @@ dependencies = [ "reth-transaction-pool", "revm", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -8528,7 +8520,7 @@ dependencies = [ "reth-primitives", "revm-primitives", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", ] @@ -8627,7 +8619,7 @@ dependencies = [ "serde_json", "serde_with", "test-fuzz", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -8706,7 +8698,7 @@ dependencies = [ "reth-tokio-util", "reth-tracing", "rustc-hash 2.1.0", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -8726,7 +8718,7 @@ dependencies = [ "serde", "serde_json", "test-fuzz", - "thiserror 2.0.11", + "thiserror 2.0.9", "toml", ] @@ -8811,7 +8803,7 @@ dependencies = [ "revm-primitives", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tower 0.4.13", @@ -8889,6 +8881,7 @@ dependencies = [ "reth-node-core", "reth-payload-builder", "reth-primitives", + "reth-primitives-traits", "reth-provider", "reth-rpc", "reth-rpc-api", @@ -8903,7 +8896,7 @@ dependencies = [ "reth-transaction-pool", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-util", "tower 0.4.13", @@ -8942,7 +8935,7 @@ dependencies = [ "reth-tokio-util", "reth-transaction-pool", "serde", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -9026,7 +9019,7 @@ dependencies = [ "schnellru", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tracing", @@ -9126,7 +9119,7 @@ dependencies = [ "reth-trie", "reth-trie-db", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -9154,7 +9147,7 @@ dependencies = [ "reth-static-file-types", "reth-testing-utils", "reth-tokio-util", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tracing", @@ -9248,7 +9241,7 @@ dependencies = [ "reth-fs-util", "reth-primitives-traits", "reth-static-file-types", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -9262,7 +9255,7 @@ dependencies = [ "pin-project", "rayon", "reth-metrics", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tracing", "tracing-futures", @@ -9316,7 +9309,7 @@ dependencies = [ "aquamarine", "assert_matches", "auto_impl", - "bitflags 2.7.0", + "bitflags 2.6.0", "codspeed-criterion-compat", "futures-util", "metrics", @@ -9347,7 +9340,7 @@ dependencies = [ "serde_json", "smallvec", "tempfile", - "thiserror 2.0.11", + "thiserror 2.0.9", "tokio", "tokio-stream", "tracing", @@ -9462,7 +9455,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "reth-trie-db", - "thiserror 2.0.11", + "thiserror 2.0.9", "tracing", ] @@ -9487,7 +9480,7 @@ dependencies = [ "reth-trie", "reth-trie-common", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -9529,7 +9522,7 @@ dependencies = [ "colorchoice", "revm", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.9", ] [[package]] @@ -9572,7 +9565,7 @@ dependencies = [ "alloy-eip7702", "alloy-primitives", "auto_impl", - "bitflags 2.7.0", + "bitflags 2.6.0", "bitvec", "c-kzg", "cfg-if", @@ -9674,9 +9667,9 @@ dependencies = [ [[package]] name = "roaring" -version = "0.10.10" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a652edd001c53df0b3f96a36a8dc93fce6866988efc16808235653c6bcac8bf2" +checksum = "41589aba99537475bf697f2118357cad1c31590c5a1b9f6d9fc4ad6d07503661" dependencies = [ "bytemuck", "byteorder", @@ -9723,7 +9716,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.96", + "syn 2.0.95", "unicode-ident", ] @@ -9811,7 +9804,7 @@ version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -9820,9 +9813,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.21" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "log", "once_cell", @@ -10041,7 +10034,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -10055,7 +10048,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.7.0", + "bitflags 2.6.0", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -10128,7 +10121,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10163,7 +10156,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10214,7 +10207,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10247,7 +10240,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10549,7 +10542,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10573,9 +10566,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "symbolic-common" -version = "12.13.1" +version = "12.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf08b42a6f9469bd8584daee39a1352c8133ccabc5151ccccb15896ef047d99a" +checksum = "cd33e73f154e36ec223c18013f7064a2c120f1162fc086ac9933542def186b00" dependencies = [ "debugid", "memmap2", @@ -10585,9 +10578,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.13.1" +version = "12.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f73b5a5bd4da72720c45756a2d11edf110116b87f998bda59b97be8c2c7cf1" +checksum = "89e51191290147f071777e37fe111800bb82a9059f9c95b19d2dd41bfeddf477" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -10607,9 +10600,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.96" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -10625,7 +10618,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10645,7 +10638,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10723,7 +10716,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10756,11 +10749,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.9", ] [[package]] @@ -10771,18 +10764,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -10915,9 +10908,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -10933,13 +10926,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -11073,7 +11066,7 @@ checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", "base64 0.22.1", - "bitflags 2.7.0", + "bitflags 2.6.0", "bytes", "futures-core", "futures-util", @@ -11140,7 +11133,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -11435,9 +11428,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.11.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b913a3b5fe84142e269d63cc62b64319ccaf89b748fc31fe025177f767a756c4" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom 0.2.15", ] @@ -11476,7 +11469,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -11546,7 +11539,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -11581,7 +11574,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11747,7 +11740,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -11758,7 +11751,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -11769,7 +11762,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -11780,7 +11773,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -11972,9 +11965,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.24" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" +checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" dependencies = [ "memchr", ] @@ -12055,7 +12048,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", "synstructure", ] @@ -12077,7 +12070,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -12097,7 +12090,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", "synstructure", ] @@ -12118,7 +12111,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] @@ -12140,7 +12133,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.95", ] [[package]] diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 2866cf8fb45c0..12caecbd033ec 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -12,13 +12,14 @@ use crate::{ valid_payload::{call_forkchoice_updated, call_new_payload}, }; use alloy_primitives::B256; -use alloy_provider::Provider; +use alloy_provider::{network::AnyRpcBlock, Provider}; use alloy_rpc_types_engine::ForkchoiceState; use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; use reth_primitives::SealedBlock; +use reth_primitives_traits::SealedHeader; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -46,7 +47,7 @@ impl Command { let block_res = block_provider.get_block_by_number(next_block.into(), true.into()).await; let block = block_res.unwrap().unwrap(); - let block: SealedBlock = block.try_into().unwrap(); + let block = from_any_rpc_block(block); let head_block_hash = block.hash(); let safe_block_hash = block_provider .get_block_by_number(block.number.saturating_sub(32).into(), false.into()); @@ -161,3 +162,19 @@ impl Command { Ok(()) } } + +// TODO(mattsse): integrate in alloy +pub(crate) fn from_any_rpc_block(block: AnyRpcBlock) -> SealedBlock { + let block = block.inner; + let block_hash = block.header.hash; + let block = block.try_map_transactions(|tx| tx.try_into()).unwrap(); + + SealedBlock::from_sealed_parts( + SealedHeader::new(block.header.inner.into_header_with_defaults(), block_hash), + reth_primitives::BlockBody { + transactions: block.transactions.into_transactions().collect(), + ommers: Default::default(), + withdrawals: block.withdrawals.map(|w| w.into_inner().into()), + }, + ) +} diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 4485e3fa79ec1..f72b9f2d5d6a7 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -3,6 +3,7 @@ use crate::{ bench::{ context::BenchContext, + new_payload_fcu::from_any_rpc_block, output::{ NewPayloadResult, TotalGasOutput, TotalGasRow, GAS_OUTPUT_SUFFIX, NEW_PAYLOAD_OUTPUT_SUFFIX, @@ -16,7 +17,6 @@ use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; use reth_node_core::args::BenchmarkArgs; -use reth_primitives::SealedBlock; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::time::Instant; use tracing::{debug, info}; @@ -46,7 +46,7 @@ impl Command { let block_res = block_provider.get_block_by_number(next_block.into(), true.into()).await; let block = block_res.unwrap().unwrap(); - let block: SealedBlock = block.try_into().unwrap(); + let block = from_any_rpc_block(block); next_block += 1; sender.send(block).await.unwrap(); diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 40110fe849888..396fe621451ff 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -1,5 +1,5 @@ //! Command for debugging block building. -use alloy_consensus::TxEip4844; +use alloy_consensus::{BlockHeader, TxEip4844}; use alloy_eips::{ eip2718::Encodable2718, eip4844::{env_settings::EnvKzgSettings, BlobTransactionSidecar}, @@ -24,10 +24,8 @@ use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthExecutorProvider}; -use reth_primitives::{ - BlockExt, EthPrimitives, SealedBlockFor, SealedBlockWithSenders, SealedHeader, Transaction, - TransactionSigned, -}; +use reth_primitives::{EthPrimitives, SealedBlock, SealedHeader, Transaction, TransactionSigned}; +use reth_primitives_traits::Block as _; use reth_provider::{ providers::{BlockchainProvider, ProviderNodeTypes}, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, @@ -86,7 +84,7 @@ impl> Command { fn lookup_best_block>( &self, factory: ProviderFactory, - ) -> RethResult>>> { + ) -> RethResult>>> { let provider = factory.provider()?; let best_number = @@ -241,17 +239,14 @@ impl> Command { consensus.validate_header(block.sealed_header())?; consensus.validate_block_pre_execution(block)?; - let senders = block.senders().expect("sender recovery failed"); - let block_with_senders = - SealedBlockWithSenders::>::new(block.clone(), senders).unwrap(); + let block_with_senders = block.clone().try_recover().unwrap(); let state_provider = blockchain_db.latest()?; let db = StateProviderDatabase::new(&state_provider); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); - let block_execution_output = - executor.execute(&block_with_senders.clone().unseal())?; + let block_execution_output = executor.execute(&block_with_senders)?; let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number)); debug!(target: "reth::cli", ?execution_outcome, "Executed block"); @@ -262,7 +257,7 @@ impl> Command { hashed_post_state.clone(), )?; - if state_root != block_with_senders.state_root { + if state_root != block_with_senders.state_root() { eyre::bail!( "state root mismatch. expected: {}. got: {}", block_with_senders.state_root, diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index cc0c701067507..ef21f144a38a0 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -1,6 +1,6 @@ //! Command for debugging execution. -use crate::{args::NetworkArgs, utils::get_single_header}; +use crate::{api::BlockTy, args::NetworkArgs, utils::get_single_header}; use alloy_eips::BlockHashOrNumber; use alloy_primitives::{BlockNumber, B256}; use clap::Parser; @@ -64,7 +64,7 @@ impl> Command { &self, config: &Config, client: Client, - consensus: Arc>, + consensus: Arc, Error = ConsensusError>>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, static_file_producer: StaticFileProducer>, @@ -172,7 +172,7 @@ impl> Command { let Environment { provider_factory, config, data_dir } = self.env.init::(AccessRights::RW)?; - let consensus: Arc> = + let consensus: Arc, Error = ConsensusError>> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); // Configure and build network diff --git a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs index d4c0f3c6c4088..30ba4ec9070fb 100644 --- a/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs +++ b/bin/reth/src/commands/debug_cmd/in_memory_merkle.rs @@ -1,9 +1,11 @@ //! Command for debugging in-memory merkle trie calculation. use crate::{ + api::BlockTy, args::NetworkArgs, utils::{get_single_body, get_single_header}, }; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; @@ -18,9 +20,9 @@ use reth_evm::execute::{BlockExecutorProvider, Executor}; use reth_execution_types::ExecutionOutcome; use reth_network::{BlockDownloaderProvider, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_api::{BlockTy, NodePrimitives}; +use reth_node_api::NodePrimitives; use reth_node_ethereum::{consensus::EthBeaconConsensus, EthExecutorProvider}; -use reth_primitives::{BlockExt, EthPrimitives}; +use reth_primitives::{EthPrimitives, SealedBlock}; use reth_provider::{ providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, HashedPostStateProvider, HashingWriter, LatestStateProviderRef, OriginalValuesKnown, @@ -135,25 +137,19 @@ impl> Command { let client = fetch_client.clone(); let chain = provider_factory.chain_spec(); let consensus = Arc::new(EthBeaconConsensus::new(chain.clone())); - let block = (move || get_single_body(client.clone(), header.clone(), consensus.clone())) - .retry(backoff) - .notify( - |err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying..."), - ) - .await?; + let block: SealedBlock> = (move || { + get_single_body(client.clone(), header.clone(), consensus.clone()) + }) + .retry(backoff) + .notify(|err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying...")) + .await?; let state_provider = LatestStateProviderRef::new(&provider); let db = StateProviderDatabase::new(&state_provider); let executor = EthExecutorProvider::ethereum(provider_factory.chain_spec()).executor(db); - let block_execution_output = executor.execute( - &block - .clone() - .unseal::>() - .with_recovered_senders() - .ok_or(BlockValidationError::SenderRecoveryError)?, - )?; - let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number)); + let block_execution_output = executor.execute(&block.clone().try_recover()?)?; + let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number())); // Unpacked `BundleState::state_root_slow` function let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( @@ -161,7 +157,7 @@ impl> Command { state_provider.hashed_post_state(execution_outcome.state()), )?; - if in_memory_state_root == block.state_root { + if in_memory_state_root == block.state_root() { info!(target: "reth::cli", state_root = ?in_memory_state_root, "Computed in-memory state root matches"); return Ok(()) } @@ -170,28 +166,27 @@ impl> Command { // Insert block, state and hashes provider_rw.insert_historical_block( - block - .clone() - .try_seal_with_senders() - .map_err(|_| BlockValidationError::SenderRecoveryError)?, + block.clone().try_recover().map_err(|_| BlockValidationError::SenderRecoveryError)?, )?; provider_rw.write_state( &execution_outcome, OriginalValuesKnown::No, StorageLocation::Database, )?; - let storage_lists = provider_rw.changed_storages_with_range(block.number..=block.number)?; + let storage_lists = + provider_rw.changed_storages_with_range(block.number..=block.number())?; let storages = provider_rw.plain_state_storages(storage_lists)?; provider_rw.insert_storage_for_hashing(storages)?; - let account_lists = provider_rw.changed_accounts_with_range(block.number..=block.number)?; + let account_lists = + provider_rw.changed_accounts_with_range(block.number..=block.number())?; let accounts = provider_rw.basic_accounts(account_lists)?; provider_rw.insert_account_for_hashing(accounts)?; let (state_root, incremental_trie_updates) = StateRoot::incremental_root_with_updates( provider_rw.tx_ref(), - block.number..=block.number, + block.number..=block.number(), )?; - if state_root != block.state_root { + if state_root != block.state_root() { eyre::bail!( "Computed incremental state root mismatch. Expected: {:?}. Got: {:?}", block.state_root, diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index acc346d9e3ad9..d7f3516500a6a 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -1,5 +1,6 @@ //! Command for debugging merkle tree calculation. use crate::{args::NetworkArgs, utils::get_single_header}; +use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use backon::{ConstantBuilder, Retryable}; use clap::Parser; @@ -128,7 +129,7 @@ impl> Command { info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); // build the full block client - let consensus: Arc> = + let consensus: Arc, Error = ConsensusError>> = Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); let block_range_client = FullBlockClient::new(fetch_client, consensus); @@ -153,18 +154,17 @@ impl> Command { for block in blocks.into_iter().rev() { let block_number = block.number; - let sealed_block = block - .try_seal_with_senders::>() - .map_err(|block| eyre::eyre!("Error sealing block with senders: {block:?}"))?; + let sealed_block = + block.try_recover().map_err(|_| eyre::eyre!("Error sealing block with senders"))?; trace!(target: "reth::cli", block_number, "Executing block"); provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?; - td += sealed_block.difficulty; + td += sealed_block.difficulty(); let mut executor = executor_provider.batch_executor(StateProviderDatabase::new( LatestStateProviderRef::new(&provider_rw), )); - executor.execute_and_verify_one(&sealed_block.clone().unseal())?; + executor.execute_and_verify_one(&sealed_block)?; let execution_outcome = executor.finalize(); provider_rw.write_state( diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/sources/exex/tracking-state/src/bin/2.rs index 44b023967a8cf..dba9714c52785 100644 --- a/book/sources/exex/tracking-state/src/bin/2.rs +++ b/book/sources/exex/tracking-state/src/bin/2.rs @@ -6,7 +6,11 @@ use std::{ use alloy_primitives::BlockNumber; use futures_util::{FutureExt, TryStreamExt}; -use reth::{api::FullNodeComponents, builder::NodeTypes, primitives::EthPrimitives}; +use reth::{ + api::{BlockBody, FullNodeComponents}, + builder::NodeTypes, + primitives::EthPrimitives, +}; use reth_exex::{ExExContext, ExExEvent}; use reth_node_ethereum::EthereumNode; use reth_tracing::tracing::info; @@ -36,7 +40,7 @@ impl>> Fut while let Some(notification) = ready!(this.ctx.notifications.try_next().poll_unpin(cx))? { if let Some(reverted_chain) = notification.reverted_chain() { this.transactions = this.transactions.saturating_sub( - reverted_chain.blocks_iter().map(|b| b.body().transactions.len() as u64).sum(), + reverted_chain.blocks_iter().map(|b| b.body().transaction_count() as u64).sum(), ); } @@ -45,7 +49,7 @@ impl>> Fut this.transactions += committed_chain .blocks_iter() - .map(|b| b.body().transactions.len() as u64) + .map(|b| b.body().transaction_count() as u64) .sum::(); this.ctx diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 2b06bd93707b4..639b211d54ec6 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -47,6 +47,7 @@ alloy-signer-local = { workspace = true, optional = true } rand = { workspace = true, optional = true } [dev-dependencies] +reth-primitives-traits = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index fac148d4a716d..59e01055f7a43 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -12,10 +12,9 @@ use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_metrics::{metrics::Gauge, Metrics}; use reth_primitives::{ - BlockWithSenders, EthPrimitives, NodePrimitives, Receipts, SealedBlock, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, + EthPrimitives, NodePrimitives, Receipts, RecoveredBlock, SealedBlock, SealedHeader, }; -use reth_primitives_traits::{Block, BlockBody as _, SignedTransaction}; +use reth_primitives_traits::{BlockBody as _, SignedTransaction}; use reth_storage_api::StateProviderBox; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::{collections::BTreeMap, sync::Arc, time::Instant}; @@ -160,7 +159,7 @@ impl CanonicalInMemoryStateInner { } type PendingBlockAndReceipts = - (SealedBlockFor<::Block>, Vec>); + (SealedBlock<::Block>, Vec>); /// This type is responsible for providing the blocks, receipts, and state for /// all canonical blocks not on disk yet and keeps track of the block range that @@ -471,17 +470,17 @@ impl CanonicalInMemoryState { } /// Returns the `SealedBlock` corresponding to the pending state. - pub fn pending_block(&self) -> Option> { + pub fn pending_block(&self) -> Option> { self.pending_state().map(|block_state| block_state.block_ref().block().clone()) } - /// Returns the `SealedBlockWithSenders` corresponding to the pending state. - pub fn pending_block_with_senders(&self) -> Option> + /// Returns the `RecoveredBlock` corresponding to the pending state. + pub fn pending_recovered_block(&self) -> Option> where N::SignedTx: SignedTransaction, { self.pending_state() - .and_then(|block_state| block_state.block_ref().block().clone().seal_with_senders()) + .and_then(|block_state| block_state.block_ref().block().clone().try_recover().ok()) } /// Returns a tuple with the `SealedBlock` corresponding to the pending @@ -636,19 +635,11 @@ impl BlockState { &self.block } - /// Returns the block with senders for the state. - pub fn block_with_senders(&self) -> BlockWithSenders { + /// Returns a clone of the block with recovered senders for the state. + pub fn clone_recovered_block(&self) -> RecoveredBlock { let block = self.block.block().clone(); let senders = self.block.senders().clone(); - let (header, body) = block.split(); - BlockWithSenders::new_unchecked(N::Block::new(header.unseal(), body), senders) - } - - /// Returns the sealed block with senders for the state. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { - let block = self.block.block().clone(); - let senders = self.block.senders().clone(); - SealedBlockWithSenders::new_unchecked(block, senders) + RecoveredBlock::new_sealed(block, senders) } /// Returns the hash of executed block that determines the state. @@ -803,7 +794,7 @@ impl BlockState { #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct ExecutedBlock { /// Sealed block the rest of fields refer to. - pub block: Arc>, + pub block: Arc>, /// Block's senders. pub senders: Arc>, /// Block's execution outcome. @@ -817,7 +808,7 @@ pub struct ExecutedBlock { impl ExecutedBlock { /// [`ExecutedBlock`] constructor. pub const fn new( - block: Arc>, + block: Arc>, senders: Arc>, execution_output: Arc>, hashed_state: Arc, @@ -827,7 +818,7 @@ impl ExecutedBlock { } /// Returns a reference to the executed block. - pub fn block(&self) -> &SealedBlockFor { + pub fn block(&self) -> &SealedBlock { &self.block } @@ -836,11 +827,11 @@ impl ExecutedBlock { &self.senders } - /// Returns a [`SealedBlockWithSenders`] + /// Returns a [`RecoveredBlock`] /// /// Note: this clones the block and senders. - pub fn sealed_block_with_senders(&self) -> SealedBlockWithSenders { - SealedBlockWithSenders::new_unchecked((*self.block).clone(), (*self.senders).clone()) + pub fn clone_recovered_block(&self) -> RecoveredBlock { + RecoveredBlock::new_sealed((*self.block).clone(), (*self.senders).clone()) } /// Returns a reference to the block's execution outcome @@ -899,7 +890,7 @@ impl> NewCanonicalChain { Self::Commit { new } => { let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { chain.append_block( - exec.sealed_block_with_senders(), + exec.clone_recovered_block(), exec.execution_outcome().clone(), ); chain @@ -909,14 +900,14 @@ impl> NewCanonicalChain { Self::Reorg { new, old } => { let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { chain.append_block( - exec.sealed_block_with_senders(), + exec.clone_recovered_block(), exec.execution_outcome().clone(), ); chain })); let old = Arc::new(old.iter().fold(Chain::default(), |mut chain, exec| { chain.append_block( - exec.sealed_block_with_senders(), + exec.clone_recovered_block(), exec.execution_outcome().clone(), ); chain @@ -930,7 +921,7 @@ impl> NewCanonicalChain { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlockFor { + pub fn tip(&self) -> &SealedBlock { match self { Self::Commit { new } | Self::Reorg { new, .. } => { new.last().expect("non empty blocks").block() @@ -1325,8 +1316,8 @@ mod tests { // Check the pending block with senders assert_eq!( - state.pending_block_with_senders().unwrap(), - block2.block().clone().seal_with_senders().unwrap() + state.pending_recovered_block().unwrap(), + block2.block().clone().try_recover().unwrap() ); // Check the pending block and receipts @@ -1529,7 +1520,7 @@ mod tests { chain_commit.to_chain_notification(), CanonStateNotification::Commit { new: Arc::new(Chain::new( - vec![block0.sealed_block_with_senders(), block1.sealed_block_with_senders()], + vec![block0.clone_recovered_block(), block1.clone_recovered_block()], sample_execution_outcome.clone(), None )) @@ -1546,12 +1537,12 @@ mod tests { chain_reorg.to_chain_notification(), CanonStateNotification::Reorg { old: Arc::new(Chain::new( - vec![block1.sealed_block_with_senders(), block2.sealed_block_with_senders()], + vec![block1.clone_recovered_block(), block2.clone_recovered_block()], sample_execution_outcome.clone(), None )), new: Arc::new(Chain::new( - vec![block1a.sealed_block_with_senders(), block2a.sealed_block_with_senders()], + vec![block1a.clone_recovered_block(), block2a.clone_recovered_block()], sample_execution_outcome, None )) diff --git a/crates/chain-state/src/notifications.rs b/crates/chain-state/src/notifications.rs index 808b6b42f8923..97a4faaefa60a 100644 --- a/crates/chain-state/src/notifications.rs +++ b/crates/chain-state/src/notifications.rs @@ -3,7 +3,7 @@ use alloy_eips::eip2718::Encodable2718; use derive_more::{Deref, DerefMut}; use reth_execution_types::{BlockReceipts, Chain}; -use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, RecoveredBlock, SealedHeader}; use reth_storage_api::NodePrimitivesProvider; use std::{ pin::Pin, @@ -123,7 +123,7 @@ impl CanonStateNotification { /// /// Returns the new tip for [`Self::Reorg`] and [`Self::Commit`] variants which commit at least /// 1 new block. - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &RecoveredBlock { match self { Self::Commit { new } | Self::Reorg { new, .. } => new.tip(), } @@ -221,7 +221,7 @@ mod tests { #[test] fn test_commit_notification() { - let block: SealedBlockWithSenders = Default::default(); + let block: RecoveredBlock = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); @@ -254,7 +254,7 @@ mod tests { #[test] fn test_reorg_notification() { - let block: SealedBlockWithSenders = Default::default(); + let block: RecoveredBlock = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -306,10 +306,12 @@ mod tests { let tx = TransactionSigned::default(); body.transactions.push(tx); - let block: SealedBlockWithSenders = - SealedBlock::new(SealedHeader::seal(alloy_consensus::Header::default()), body) - .seal_with_senders() - .unwrap(); + let block = SealedBlock::>::from_sealed_parts( + SealedHeader::seal_slow(alloy_consensus::Header::default()), + body, + ) + .try_recover() + .unwrap(); // Create a clone of the default block and customize it to act as block1. let mut block1 = block.clone(); @@ -372,10 +374,13 @@ mod tests { // Define block1 for the old chain segment, which will be reverted. let mut body = BlockBody::::default(); body.transactions.push(TransactionSigned::default()); - let mut old_block1: SealedBlockWithSenders = - SealedBlock::new(SealedHeader::seal(alloy_consensus::Header::default()), body) - .seal_with_senders() - .unwrap(); + let mut old_block1 = + SealedBlock::>::from_sealed_parts( + SealedHeader::seal_slow(alloy_consensus::Header::default()), + body, + ) + .try_recover() + .unwrap(); old_block1.set_block_number(1); old_block1.set_hash(B256::new([0x01; 32])); @@ -400,10 +405,13 @@ mod tests { // Define block2 for the new chain segment, which will be committed. let mut body = BlockBody::::default(); body.transactions.push(TransactionSigned::default()); - let mut new_block1: SealedBlockWithSenders = - SealedBlock::new(SealedHeader::seal(alloy_consensus::Header::default()), body) - .seal_with_senders() - .unwrap(); + let mut new_block1 = + SealedBlock::>::from_sealed_parts( + SealedHeader::seal_slow(alloy_consensus::Header::default()), + body, + ) + .try_recover() + .unwrap(); new_block1.set_block_number(2); new_block1.set_hash(B256::new([0x02; 32])); diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index a0ddeb8d0fe2c..cce88d713612e 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -17,8 +17,8 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, - BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, RecoveredTx, SealedBlock, - SealedBlockWithSenders, SealedHeader, Transaction, TransactionSigned, + BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, RecoveredBlock, RecoveredTx, + SealedBlock, SealedHeader, Transaction, TransactionSigned, }; use reth_primitives_traits::Account; use reth_storage_api::NodePrimitivesProvider; @@ -86,12 +86,12 @@ impl TestBlockBuilder { U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS) } - /// Generates a random [`SealedBlockWithSenders`]. + /// Generates a random [`RecoveredBlock`]. pub fn generate_random_block( &mut self, number: BlockNumber, parent_hash: B256, - ) -> SealedBlockWithSenders { + ) -> RecoveredBlock { let mut rng = thread_rng(); let mock_tx = |nonce: u64| -> RecoveredTx<_> { @@ -168,8 +168,8 @@ impl TestBlockBuilder { ..Default::default() }; - let block = SealedBlock::new( - SealedHeader::seal(header), + let block = SealedBlock::from_sealed_parts( + SealedHeader::seal_slow(header), BlockBody { transactions: transactions.into_iter().map(|tx| tx.into_tx()).collect(), ommers: Vec::new(), @@ -177,7 +177,8 @@ impl TestBlockBuilder { }, ); - SealedBlockWithSenders::new(block, vec![self.signer; num_txs as usize]).unwrap() + RecoveredBlock::try_recover_sealed_with_senders(block, vec![self.signer; num_txs as usize]) + .unwrap() } /// Creates a fork chain with the given base block. @@ -185,13 +186,13 @@ impl TestBlockBuilder { &mut self, base_block: &SealedBlock, length: u64, - ) -> Vec { + ) -> Vec> { let mut fork = Vec::with_capacity(length as usize); let mut parent = base_block.clone(); for _ in 0..length { let block = self.generate_random_block(parent.number + 1, parent.hash()); - parent = block.block.clone(); + parent = block.clone_sealed_block(); fork.push(block); } @@ -207,7 +208,7 @@ impl TestBlockBuilder { ) -> ExecutedBlock { let block_with_senders = self.generate_random_block(block_number, parent_hash); - let (block, senders) = block_with_senders.split(); + let (block, senders) = block_with_senders.split_sealed(); ExecutedBlock::new( Arc::new(block), Arc::new(senders), @@ -258,7 +259,10 @@ impl TestBlockBuilder { /// Returns the execution outcome for a block created with this builder. /// In order to properly include the bundle state, the signer balance is /// updated. - pub fn get_execution_outcome(&mut self, block: SealedBlockWithSenders) -> ExecutionOutcome { + pub fn get_execution_outcome( + &mut self, + block: RecoveredBlock, + ) -> ExecutionOutcome { let receipts = block .body() .transactions diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 7f5fd2d2f1b38..1f297ad33bdde 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -20,7 +20,7 @@ use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_api::{BlockTy, BodyTy, HeaderTy}; +use reth_node_api::BlockTy; use reth_node_core::version::SHORT_VERSION; use reth_node_events::node::NodeEvent; use reth_provider::{ @@ -169,7 +169,7 @@ pub fn build_import_pipeline( ) -> eyre::Result<(Pipeline, impl Stream>)> where N: ProviderNodeTypes + CliNodeTypes, - C: Consensus, BodyTy, Error = ConsensusError> + 'static, + C: Consensus, Error = ConsensusError> + 'static, E: BlockExecutorProvider, { if !file_client.has_canonical_blocks() { diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index f8f72709a7e17..7f94a8168e19e 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,16 +1,14 @@ +use alloy_consensus::{BlockHeader, Header}; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::Decodable; - -use alloy_consensus::{BlockHeader, Header}; use reth_codecs::Compact; use reth_node_builder::NodePrimitives; -use reth_primitives::{SealedBlock, SealedBlockWithSenders, SealedHeader, StaticFileSegment}; +use reth_primitives::{SealedBlock, SealedHeader, StaticFileSegment}; use reth_provider::{ providers::StaticFileProvider, BlockWriter, StageCheckpointWriter, StaticFileProviderFactory, StaticFileWriter, StorageLocation, }; use reth_stages::{StageCheckpoint, StageId}; - use std::{fs::File, io::Read, path::PathBuf}; use tracing::info; @@ -69,8 +67,12 @@ where + StaticFileProviderFactory>, { provider_rw.insert_block( - SealedBlockWithSenders::new(SealedBlock::new(header.clone(), Default::default()), vec![]) - .expect("no senders or txes"), + SealedBlock::<::Block>::from_sealed_parts( + header.clone(), + Default::default(), + ) + .try_recover() + .expect("no senders or txes"), StorageLocation::Database, )?; diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 448b3a16830f9..75dc4c00cf689 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -14,7 +14,6 @@ workspace = true # reth reth-chainspec.workspace = true reth-consensus.workspace = true -reth-primitives.workspace = true # ethereum alloy-primitives.workspace = true @@ -23,6 +22,7 @@ alloy-consensus.workspace = true alloy-eips.workspace = true [dev-dependencies] +reth-primitives.workspace = true alloy-consensus.workspace = true rand.workspace = true @@ -34,6 +34,6 @@ std = [ "alloy-primitives/std", "reth-chainspec/std", "reth-consensus/std", - "reth-primitives/std", - "reth-primitives-traits/std" + "reth-primitives-traits/std", + "reth-primitives/std" ] diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 9e6a2ad90173e..530c508766b2d 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -1,11 +1,14 @@ //! Collection of methods for block validation. -use alloy_consensus::{constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{ + constants::MAXIMUM_EXTRA_DATA_SIZE, BlockHeader as _, EMPTY_OMMER_ROOT_HASH, +}; use alloy_eips::{calc_next_block_base_fee, eip4844::DATA_GAS_PER_BLOB, eip7840::BlobParams}; use reth_chainspec::{EthChainSpec, EthereumHardfork, EthereumHardforks}; use reth_consensus::ConsensusError; -use reth_primitives::SealedBlock; -use reth_primitives_traits::{BlockBody, GotExpected, SealedHeader}; +use reth_primitives_traits::{ + Block, BlockBody, BlockHeader, GotExpected, SealedBlock, SealedHeader, +}; /// Gas used needs to be less than gas limit. Gas used is going to be checked after execution. #[inline] @@ -38,8 +41,8 @@ pub fn validate_header_base_fee( /// /// [EIP-4895]: https://eips.ethereum.org/EIPS/eip-4895 #[inline] -pub fn validate_shanghai_withdrawals( - block: &SealedBlock, +pub fn validate_shanghai_withdrawals( + block: &SealedBlock, ) -> Result<(), ConsensusError> { let withdrawals = block.body().withdrawals().ok_or(ConsensusError::BodyWithdrawalsMissing)?; let withdrawals_root = alloy_consensus::proofs::calculate_withdrawals_root(withdrawals); @@ -59,9 +62,7 @@ pub fn validate_shanghai_withdrawals( /// /// [EIP-4844]: https://eips.ethereum.org/EIPS/eip-4844 #[inline] -pub fn validate_cancun_gas( - block: &SealedBlock, -) -> Result<(), ConsensusError> { +pub fn validate_cancun_gas(block: &SealedBlock) -> Result<(), ConsensusError> { // Check that the blob gas used in the header matches the sum of the blob gas used by each // blob tx let header_blob_gas_used = block.blob_gas_used().ok_or(ConsensusError::BlobGasUsedMissing)?; @@ -127,13 +128,12 @@ where /// - Compares the transactions root in the block header to the block body /// - Pre-execution transaction validation /// - (Optionally) Compares the receipts root in the block header to the block body -pub fn validate_block_pre_execution( - block: &SealedBlock, +pub fn validate_block_pre_execution( + block: &SealedBlock, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> where - H: BlockHeader, - B: BlockBody, + B: Block, ChainSpec: EthereumHardforks, { // Check ommers hash @@ -377,15 +377,13 @@ mod tests { transactions_root: proofs::calculate_transaction_root(&[transaction.clone()]), ..Default::default() }; - let header = SealedHeader::seal(header); - let body = BlockBody { transactions: vec![transaction], ommers: vec![], withdrawals: Some(Withdrawals::default()), }; - let block = SealedBlock::new(header, body); + let block = SealedBlock::seal_slow(alloy_consensus::Block { header, body }); // 10 blobs times the blob gas per blob. let expected_blob_gas_used = 10 * DATA_GAS_PER_BLOB; diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 1de99d8278f59..aa14806e4383f 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -16,10 +16,10 @@ use alloy_consensus::Header; use alloy_eips::eip7685::Requests; use alloy_primitives::{BlockHash, BlockNumber, Bloom, B256, U256}; use reth_primitives::{ - BlockBody, BlockWithSenders, EthPrimitives, GotExpected, GotExpectedBoxed, - InvalidTransactionError, NodePrimitives, Receipt, SealedBlock, SealedHeader, + EthPrimitives, GotExpected, GotExpectedBoxed, InvalidTransactionError, NodePrimitives, Receipt, + RecoveredBlock, SealedBlock, SealedHeader, }; -use reth_primitives_traits::constants::MINIMUM_GAS_LIMIT; +use reth_primitives_traits::{constants::MINIMUM_GAS_LIMIT, Block}; /// A consensus implementation that does nothing. pub mod noop; @@ -47,9 +47,7 @@ impl<'a, R> PostExecutionInput<'a, R> { /// [`Consensus`] implementation which knows full node primitives and is able to validation block's /// execution outcome. #[auto_impl::auto_impl(&, Arc)] -pub trait FullConsensus: - AsConsensus -{ +pub trait FullConsensus: AsConsensus { /// Validate a block considering world state, i.e. things that can not be checked before /// execution. /// @@ -58,22 +56,22 @@ pub trait FullConsensus: /// Note: validating blocks does not include other validations of the Consensus fn validate_block_post_execution( &self, - block: &BlockWithSenders, + block: &RecoveredBlock, input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), ConsensusError>; } /// Consensus is a protocol that chooses canonical chain. #[auto_impl::auto_impl(&, Arc)] -pub trait Consensus: AsHeaderValidator { +pub trait Consensus: AsHeaderValidator { /// The error type related to consensus. type Error; /// Ensures that body field values match the header. fn validate_body_against_header( &self, - body: &B, - header: &SealedHeader, + body: &B::Body, + header: &SealedHeader, ) -> Result<(), Self::Error>; /// Validate a block disregarding world state, i.e. things that can be checked before sender @@ -85,7 +83,7 @@ pub trait Consensus: AsHeaderValidator { /// **This should not be called for the genesis block**. /// /// Note: validating blocks does not include other validations of the Consensus - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), Self::Error>; + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), Self::Error>; } /// HeaderValidator is a protocol that validates headers and their relationships. @@ -170,15 +168,15 @@ impl, H> AsHeaderValidator for T { } /// Helper trait to cast `Arc` to `Arc` -pub trait AsConsensus: Consensus { +pub trait AsConsensus: Consensus { /// Converts the [`Arc`] of self to [`Arc`] of [`HeaderValidator`] - fn as_consensus<'a>(self: Arc) -> Arc + 'a> + fn as_consensus<'a>(self: Arc) -> Arc + 'a> where Self: 'a; } -impl, H, B> AsConsensus for T { - fn as_consensus<'a>(self: Arc) -> Arc + 'a> +impl, B: Block> AsConsensus for T { + fn as_consensus<'a>(self: Arc) -> Arc + 'a> where Self: 'a, { diff --git a/crates/consensus/consensus/src/noop.rs b/crates/consensus/consensus/src/noop.rs index ea269c07dada9..3df809ebe1081 100644 --- a/crates/consensus/consensus/src/noop.rs +++ b/crates/consensus/consensus/src/noop.rs @@ -1,6 +1,7 @@ use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; -use reth_primitives::{BlockWithSenders, NodePrimitives, SealedBlock, SealedHeader}; +use reth_primitives::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::Block; /// A Consensus implementation that does nothing. #[derive(Debug, Copy, Clone, Default)] @@ -29,18 +30,18 @@ impl HeaderValidator for NoopConsensus { } } -impl Consensus for NoopConsensus { +impl Consensus for NoopConsensus { type Error = ConsensusError; fn validate_body_against_header( &self, - _body: &B, - _header: &SealedHeader, + _body: &B::Body, + _header: &SealedHeader, ) -> Result<(), Self::Error> { Ok(()) } - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { Ok(()) } } @@ -48,7 +49,7 @@ impl Consensus for NoopConsensus { impl FullConsensus for NoopConsensus { fn validate_block_post_execution( &self, - _block: &BlockWithSenders, + _block: &RecoveredBlock, _input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), Self::Error> { Ok(()) diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index 3f26222c4b905..d8a94d270a9c3 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,7 +1,8 @@ use crate::{Consensus, ConsensusError, FullConsensus, HeaderValidator, PostExecutionInput}; use alloy_primitives::U256; use core::sync::atomic::{AtomicBool, Ordering}; -use reth_primitives::{BlockWithSenders, NodePrimitives, SealedBlock, SealedHeader}; +use reth_primitives::{NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::Block; /// Consensus engine implementation for testing #[derive(Debug)] @@ -49,7 +50,7 @@ impl TestConsensus { impl FullConsensus for TestConsensus { fn validate_block_post_execution( &self, - _block: &BlockWithSenders, + _block: &RecoveredBlock, _input: PostExecutionInput<'_, N::Receipt>, ) -> Result<(), ConsensusError> { if self.fail_validation() { @@ -60,13 +61,13 @@ impl FullConsensus for TestConsensus { } } -impl Consensus for TestConsensus { +impl Consensus for TestConsensus { type Error = ConsensusError; fn validate_body_against_header( &self, - _body: &B, - _header: &SealedHeader, + _body: &B::Body, + _header: &SealedHeader, ) -> Result<(), Self::Error> { if self.fail_body_against_header() { Err(ConsensusError::BaseFeeMissing) @@ -75,7 +76,7 @@ impl Consensus for TestConsensus { } } - fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), Self::Error> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 29dbc9274591e..e21b85e78a116 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -11,7 +11,7 @@ use eyre::Ok; use futures_util::Future; use reth_chainspec::EthereumHardforks; use reth_network_api::test_utils::PeersHandleProvider; -use reth_node_api::{Block, BlockTy, EngineTypes, FullNodeComponents}; +use reth_node_api::{Block, BlockBody, BlockTy, EngineTypes, FullNodeComponents}; use reth_node_builder::{rpc::RethRpcAddOns, FullNode, NodeTypes, NodeTypesWithEngine}; use reth_node_core::primitives::SignedTransaction; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; @@ -232,7 +232,7 @@ where // get head block from notifications stream and verify the tx has been pushed to the // pool is actually present in the canonical block let head = self.engine_api.canonical_stream.next().await.unwrap(); - let tx = head.tip().transactions().first(); + let tx = head.tip().body().transactions().first(); assert_eq!(tx.unwrap().tx_hash().as_slice(), tip_tx_hash.as_slice()); loop { diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index b7a06951fb47c..b7e3b4f191267 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -8,8 +8,8 @@ use reth_engine_primitives::InvalidBlockHook; use reth_evm::{ state_change::post_block_balance_increments, system_calls::SystemCaller, ConfigureEvm, }; -use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; -use reth_primitives_traits::SignedTransaction; +use reth_primitives::{NodePrimitives, RecoveredBlock, SealedHeader}; +use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_provider::{BlockExecutionOutput, ChainSpecProvider, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, db::states::bundle_state::BundleRetention, DatabaseCommit, @@ -58,7 +58,7 @@ where fn on_invalid_block( &self, parent_header: &SealedHeader, - block: &SealedBlockWithSenders, + block: &RecoveredBlock, output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) -> eyre::Result<()> @@ -87,7 +87,7 @@ where // Re-execute all of the transactions in the block to load all touched accounts into // the cache DB. - for tx in block.transactions() { + for tx in block.body().transactions() { self.evm_config.fill_tx_env( evm.tx_mut(), tx, @@ -101,10 +101,8 @@ where // use U256::MAX here for difficulty, because fetching it is annoying // NOTE: This is not mut because we are not doing the DAO irregular state change here - let balance_increments = post_block_balance_increments( - self.provider.chain_spec().as_ref(), - &block.clone().unseal().block, - ); + let balance_increments = + post_block_balance_increments(self.provider.chain_spec().as_ref(), block); // increment balances db.increment_balances(balance_increments)?; @@ -302,7 +300,7 @@ where fn on_invalid_block( &self, parent_header: &SealedHeader, - block: &SealedBlockWithSenders, + block: &RecoveredBlock, output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { diff --git a/crates/engine/primitives/src/event.rs b/crates/engine/primitives/src/event.rs index fdf5b73f1ecaf..20b41d4140993 100644 --- a/crates/engine/primitives/src/event.rs +++ b/crates/engine/primitives/src/event.rs @@ -4,7 +4,7 @@ use crate::ForkchoiceStatus; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_engine::ForkchoiceState; -use reth_primitives::{EthPrimitives, SealedBlockFor}; +use reth_primitives::{EthPrimitives, SealedBlock}; use reth_primitives_traits::{NodePrimitives, SealedHeader}; use std::{ fmt::{Display, Formatter, Result}, @@ -18,9 +18,9 @@ pub enum BeaconConsensusEngineEvent { /// The fork choice state was updated, and the current fork choice status ForkchoiceUpdated(ForkchoiceState, ForkchoiceStatus), /// A block was added to the fork chain. - ForkBlockAdded(Arc>, Duration), + ForkBlockAdded(Arc>, Duration), /// A block was added to the canonical chain, and the elapsed time validating the block - CanonicalBlockAdded(Arc>, Duration), + CanonicalBlockAdded(Arc>, Duration), /// A canonical chain was committed, and the elapsed time committing the data CanonicalChainCommitted(Box>, Duration), /// The consensus engine is involved in live sync, and has specific progress diff --git a/crates/engine/primitives/src/invalid_block_hook.rs b/crates/engine/primitives/src/invalid_block_hook.rs index cfd127ae6f4c4..c407df2ef4bb4 100644 --- a/crates/engine/primitives/src/invalid_block_hook.rs +++ b/crates/engine/primitives/src/invalid_block_hook.rs @@ -1,6 +1,6 @@ use alloy_primitives::B256; use reth_execution_types::BlockExecutionOutput; -use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, RecoveredBlock, SealedHeader}; use reth_trie::updates::TrieUpdates; /// An invalid block hook. @@ -9,7 +9,7 @@ pub trait InvalidBlockHook: Send + Sync { fn on_invalid_block( &self, parent_header: &SealedHeader, - block: &SealedBlockWithSenders, + block: &RecoveredBlock, output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ); @@ -20,7 +20,7 @@ where N: NodePrimitives, F: Fn( &SealedHeader, - &SealedBlockWithSenders, + &RecoveredBlock, &BlockExecutionOutput, Option<(&TrieUpdates, B256)>, ) + Send @@ -29,7 +29,7 @@ where fn on_invalid_block( &self, parent_header: &SealedHeader, - block: &SealedBlockWithSenders, + block: &RecoveredBlock, output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index e0b465e985968..1def1a67e2a50 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -33,7 +33,7 @@ pub use reth_payload_primitives::{ PayloadTypes, }; use reth_payload_primitives::{InvalidPayloadAttributesError, PayloadAttributes}; -use reth_primitives::{NodePrimitives, SealedBlockFor}; +use reth_primitives::{NodePrimitives, SealedBlock}; use reth_primitives_traits::Block; use serde::{de::DeserializeOwned, ser::Serialize}; @@ -86,7 +86,7 @@ pub trait EngineTypes: /// Converts a [`BuiltPayload`] into an [`ExecutionPayload`] and [`ExecutionPayloadSidecar`]. fn block_to_payload( - block: SealedBlockFor< + block: SealedBlock< <::Primitives as NodePrimitives>::Block, >, ) -> (ExecutionPayload, ExecutionPayloadSidecar); @@ -109,7 +109,7 @@ pub trait PayloadValidator: fmt::Debug + Send + Sync + Unpin + 'static { &self, payload: ExecutionPayload, sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError>; + ) -> Result, PayloadError>; } /// Type that validates the payloads processed by the engine. diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index b099e56ae07bf..81c5ca5ad79ae 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -16,7 +16,7 @@ pub use reth_engine_tree::{ }; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::BlockClient; -use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine}; +use reth_node_types::{BlockTy, NodeTypes, NodeTypesWithEngine}; use reth_payload_builder::PayloadBuilderHandle; use reth_primitives::EthPrimitives; use reth_provider::{ @@ -52,10 +52,13 @@ type EngineServiceType = ChainOrchestrator< /// The type that drives the chain forward and communicates progress. #[pin_project] #[allow(missing_debug_implementations)] +// TODO(mattsse): remove hidde once fixed : +// otherwise rustdoc fails to resolve the alias +#[doc(hidden)] pub struct EngineService where N: EngineNodeTypes, - Client: BlockClient
, Body = BodyTy> + 'static, + Client: BlockClient> + 'static, E: BlockExecutorProvider + 'static, { orchestrator: EngineServiceType, @@ -65,7 +68,7 @@ where impl EngineService where N: EngineNodeTypes, - Client: BlockClient
, Body = BodyTy> + 'static, + Client: BlockClient> + 'static, E: BlockExecutorProvider + 'static, { /// Constructor for `EngineService`. @@ -133,7 +136,7 @@ where impl Stream for EngineService where N: EngineNodeTypes, - Client: BlockClient
, Body = BodyTy> + 'static, + Client: BlockClient> + 'static, E: BlockExecutorProvider + 'static, { type Item = ChainEvent>; diff --git a/crates/engine/tree/src/backfill.rs b/crates/engine/tree/src/backfill.rs index a0eb8dd957e1d..370ca7e52ec5a 100644 --- a/crates/engine/tree/src/backfill.rs +++ b/crates/engine/tree/src/backfill.rs @@ -274,7 +274,7 @@ mod tests { gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, ..Default::default() }; - let header = SealedHeader::seal(header); + let header = SealedHeader::seal_slow(header); insert_headers_into_client(&client, header, 0..total_blocks); let tip = client.highest_block().expect("there should be blocks here").hash(); diff --git a/crates/engine/tree/src/download.rs b/crates/engine/tree/src/download.rs index 1359843c0a354..f365b87018ec1 100644 --- a/crates/engine/tree/src/download.rs +++ b/crates/engine/tree/src/download.rs @@ -9,7 +9,7 @@ use reth_network_p2p::{ full_block::{FetchFullBlockFuture, FetchFullBlockRangeFuture, FullBlockClient}, BlockClient, }; -use reth_primitives::{SealedBlockFor, SealedBlockWithSenders}; +use reth_primitives::{RecoveredBlock, SealedBlock}; use reth_primitives_traits::Block; use std::{ cmp::{Ordering, Reverse}, @@ -45,7 +45,7 @@ pub enum DownloadAction { #[derive(Debug)] pub enum DownloadOutcome { /// Downloaded blocks. - Blocks(Vec>), + Blocks(Vec>), /// New download started. NewDownloadStarted { /// How many blocks are pending in this download. @@ -69,7 +69,7 @@ where inflight_block_range_requests: Vec>, /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for /// ordering. This means the blocks will be popped from the heap with ascending block numbers. - set_buffered_blocks: BinaryHeap>>, + set_buffered_blocks: BinaryHeap>>, /// Engine download metrics. metrics: BlockDownloaderMetrics, /// Pending events to be emitted. @@ -78,14 +78,11 @@ where impl BasicBlockDownloader where - Client: BlockClient
+ 'static, + Client: BlockClient + 'static, B: Block, { /// Create a new instance - pub fn new( - client: Client, - consensus: Arc>, - ) -> Self { + pub fn new(client: Client, consensus: Arc>) -> Self { Self { full_block_client: FullBlockClient::new(client, consensus), inflight_full_block_requests: Vec::new(), @@ -192,7 +189,7 @@ where impl BlockDownloader for BasicBlockDownloader where - Client: BlockClient
, + Client: BlockClient, B: Block, { type Block = B; @@ -233,9 +230,7 @@ where .into_iter() .map(|b| { let senders = b.senders().unwrap_or_default(); - OrderedSealedBlockWithSenders(SealedBlockWithSenders::new_unchecked( - b, senders, - )) + OrderedRecoveredBlock(RecoveredBlock::new_sealed(b, senders)) }) .map(Reverse), ); @@ -252,7 +247,7 @@ where } // drain all unique element of the block buffer if there are any - let mut downloaded_blocks: Vec> = + let mut downloaded_blocks: Vec> = Vec::with_capacity(self.set_buffered_blocks.len()); while let Some(block) = self.set_buffered_blocks.pop() { // peek ahead and pop duplicates @@ -269,32 +264,32 @@ where } } -/// A wrapper type around [`SealedBlockWithSenders`] that implements the [Ord] +/// A wrapper type around [`RecoveredBlock`] that implements the [Ord] /// trait by block number. #[derive(Debug, Clone, PartialEq, Eq)] -struct OrderedSealedBlockWithSenders(SealedBlockWithSenders); +struct OrderedRecoveredBlock(RecoveredBlock); -impl PartialOrd for OrderedSealedBlockWithSenders { +impl PartialOrd for OrderedRecoveredBlock { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedSealedBlockWithSenders { +impl Ord for OrderedRecoveredBlock { fn cmp(&self, other: &Self) -> Ordering { self.0.number().cmp(&other.0.number()) } } -impl From> for OrderedSealedBlockWithSenders { - fn from(block: SealedBlockFor) -> Self { +impl From> for OrderedRecoveredBlock { + fn from(block: SealedBlock) -> Self { let senders = block.senders().unwrap_or_default(); - Self(SealedBlockWithSenders::new_unchecked(block, senders)) + Self(RecoveredBlock::new_sealed(block, senders)) } } -impl From> for SealedBlockWithSenders { - fn from(value: OrderedSealedBlockWithSenders) -> Self { +impl From> for RecoveredBlock { + fn from(value: OrderedRecoveredBlock) -> Self { value.0 } } @@ -348,7 +343,7 @@ mod tests { gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, ..Default::default() }; - let header = SealedHeader::seal(header); + let header = SealedHeader::seal_slow(header); insert_headers_into_client(&client, header, 0..total_blocks); let consensus = Arc::new(EthBeaconConsensus::new(chain_spec)); diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index fa92cba28f8d1..238846d4f0d62 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -9,7 +9,7 @@ use alloy_primitives::B256; use futures::{Stream, StreamExt}; use reth_chain_state::ExecutedBlock; use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineTypes}; -use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; +use reth_primitives::{NodePrimitives, RecoveredBlock}; use reth_primitives_traits::Block; use std::{ collections::HashSet, @@ -306,7 +306,7 @@ pub enum FromEngine { /// Request from the engine. Request(Req), /// Downloaded blocks from the network. - DownloadedBlocks(Vec>), + DownloadedBlocks(Vec>), } impl Display for FromEngine { diff --git a/crates/engine/tree/src/test_utils.rs b/crates/engine/tree/src/test_utils.rs index c1b534ebf5eb6..de6485ce9c088 100644 --- a/crates/engine/tree/src/test_utils.rs +++ b/crates/engine/tree/src/test_utils.rs @@ -76,7 +76,7 @@ pub fn insert_headers_into_client( header.parent_hash = hash; header.number += 1; header.timestamp += 1; - sealed_header = SealedHeader::seal(header); + sealed_header = SealedHeader::seal_slow(header); client.insert(sealed_header.clone(), body.clone()); } } diff --git a/crates/engine/tree/src/tree/block_buffer.rs b/crates/engine/tree/src/tree/block_buffer.rs index 675e1b5c32555..0d022f32de0cd 100644 --- a/crates/engine/tree/src/tree/block_buffer.rs +++ b/crates/engine/tree/src/tree/block_buffer.rs @@ -2,7 +2,7 @@ use crate::tree::metrics::BlockBufferMetrics; use alloy_consensus::BlockHeader; use alloy_primitives::{BlockHash, BlockNumber}; use reth_network::cache::LruCache; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::RecoveredBlock; use reth_primitives_traits::Block; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -20,7 +20,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; #[derive(Debug)] pub(super) struct BlockBuffer { /// All blocks in the buffer stored by their block hash. - pub(crate) blocks: HashMap>, + pub(crate) blocks: HashMap>, /// Map of any parent block hash (even the ones not currently in the buffer) /// to the buffered children. /// Allows connecting buffered blocks by parent. @@ -50,12 +50,12 @@ impl BlockBuffer { } /// Return reference to the requested block. - pub(super) fn block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub(super) fn block(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { self.blocks.get(hash) } /// Return a reference to the lowest ancestor of the given block in the buffer. - pub(super) fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + pub(super) fn lowest_ancestor(&self, hash: &BlockHash) -> Option<&RecoveredBlock> { let mut current_block = self.blocks.get(hash)?; while let Some(parent) = self.blocks.get(¤t_block.parent_hash()) { current_block = parent; @@ -64,7 +64,7 @@ impl BlockBuffer { } /// Insert a correct block inside the buffer. - pub(super) fn insert_block(&mut self, block: SealedBlockWithSenders) { + pub(super) fn insert_block(&mut self, block: RecoveredBlock) { let hash = block.hash(); self.parent_to_child.entry(block.parent_hash()).or_default().insert(hash); @@ -90,7 +90,7 @@ impl BlockBuffer { pub(super) fn remove_block_with_children( &mut self, parent_hash: &BlockHash, - ) -> Vec> { + ) -> Vec> { let removed = self .remove_block(parent_hash) .into_iter() @@ -149,7 +149,7 @@ impl BlockBuffer { /// This method will only remove the block if it's present inside `self.blocks`. /// The block might be missing from other collections, the method will only ensure that it has /// been removed. - fn remove_block(&mut self, hash: &BlockHash) -> Option> { + fn remove_block(&mut self, hash: &BlockHash) -> Option> { let block = self.blocks.remove(hash)?; self.remove_from_earliest_blocks(block.number(), hash); self.remove_from_parent(block.parent_hash(), hash); @@ -158,7 +158,7 @@ impl BlockBuffer { } /// Remove all children and their descendants for the given blocks and return them. - fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { + fn remove_children(&mut self, parent_hashes: Vec) -> Vec> { // remove all parent child connection and all the child children blocks that are connected // to the discarded parent blocks. let mut remove_parent_children = parent_hashes; @@ -184,15 +184,19 @@ mod tests { use super::*; use alloy_eips::BlockNumHash; use alloy_primitives::BlockHash; - use reth_primitives::SealedBlockWithSenders; + use reth_primitives::RecoveredBlock; use reth_testing_utils::generators::{self, random_block, BlockParams, Rng}; use std::collections::HashMap; /// Create random block with specified number and parent hash. - fn create_block(rng: &mut R, number: u64, parent: BlockHash) -> SealedBlockWithSenders { + fn create_block( + rng: &mut R, + number: u64, + parent: BlockHash, + ) -> RecoveredBlock { let block = random_block(rng, number, BlockParams { parent: Some(parent), ..Default::default() }); - block.seal_with_senders().unwrap() + block.try_recover().unwrap() } /// Assert that all buffer collections have the same data length. @@ -210,7 +214,10 @@ mod tests { } /// Assert that the block was removed from all buffer collections. - fn assert_block_removal(buffer: &BlockBuffer, block: &SealedBlockWithSenders) { + fn assert_block_removal( + buffer: &BlockBuffer, + block: &RecoveredBlock, + ) { assert!(!buffer.blocks.contains_key(&block.hash())); assert!(buffer .parent_to_child diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs index 54c274abbf68b..f681cd2f0fe5d 100644 --- a/crates/engine/tree/src/tree/error.rs +++ b/crates/engine/tree/src/tree/error.rs @@ -4,7 +4,7 @@ use alloy_consensus::BlockHeader; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; use reth_evm::execute::InternalBlockExecutionError; -use reth_primitives::SealedBlockFor; +use reth_primitives::SealedBlock; use reth_primitives_traits::{Block, BlockBody}; use tokio::sync::oneshot::error::TryRecvError; @@ -27,7 +27,7 @@ pub enum AdvancePersistenceError { .block.parent_hash(), .kind)] struct InsertBlockErrorData { - block: SealedBlockFor, + block: SealedBlock, #[source] kind: InsertBlockErrorKind, } @@ -45,11 +45,11 @@ impl std::fmt::Debug for InsertBlockErrorData { } impl InsertBlockErrorData { - const fn new(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Self { + const fn new(block: SealedBlock, kind: InsertBlockErrorKind) -> Self { Self { block, kind } } - fn boxed(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Box { + fn boxed(block: SealedBlock, kind: InsertBlockErrorKind) -> Box { Box::new(Self::new(block, kind)) } } @@ -65,23 +65,23 @@ pub struct InsertBlockError { impl InsertBlockError { /// Create a new `InsertInvalidBlockErrorTwo` - pub fn new(block: SealedBlockFor, kind: InsertBlockErrorKind) -> Self { + pub fn new(block: SealedBlock, kind: InsertBlockErrorKind) -> Self { Self { inner: InsertBlockErrorData::boxed(block, kind) } } /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn consensus_error(error: ConsensusError, block: SealedBlockFor) -> Self { + pub fn consensus_error(error: ConsensusError, block: SealedBlock) -> Self { Self::new(block, InsertBlockErrorKind::Consensus(error)) } /// Create a new `InsertInvalidBlockError` from a consensus error - pub fn sender_recovery_error(block: SealedBlockFor) -> Self { + pub fn sender_recovery_error(block: SealedBlock) -> Self { Self::new(block, InsertBlockErrorKind::SenderRecovery) } /// Consumes the error and returns the block that resulted in the error #[inline] - pub fn into_block(self) -> SealedBlockFor { + pub fn into_block(self) -> SealedBlock { self.inner.block } @@ -93,13 +93,13 @@ impl InsertBlockError { /// Returns the block that resulted in the error #[inline] - pub const fn block(&self) -> &SealedBlockFor { + pub const fn block(&self) -> &SealedBlock { &self.inner.block } /// Consumes the type and returns the block and error kind. #[inline] - pub fn split(self) -> (SealedBlockFor, InsertBlockErrorKind) { + pub fn split(self) -> (SealedBlock, InsertBlockErrorKind) { let inner = *self.inner; (inner.block, inner.kind) } diff --git a/crates/engine/tree/src/tree/invalid_block_hook.rs b/crates/engine/tree/src/tree/invalid_block_hook.rs index 7c7b0631dd202..339ef9c7dde1a 100644 --- a/crates/engine/tree/src/tree/invalid_block_hook.rs +++ b/crates/engine/tree/src/tree/invalid_block_hook.rs @@ -1,6 +1,6 @@ use alloy_primitives::B256; use reth_engine_primitives::InvalidBlockHook; -use reth_primitives::{NodePrimitives, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{NodePrimitives, RecoveredBlock, SealedHeader}; use reth_provider::BlockExecutionOutput; use reth_trie::updates::TrieUpdates; @@ -13,7 +13,7 @@ impl InvalidBlockHook for NoopInvalidBlockHook { fn on_invalid_block( &self, _parent_header: &SealedHeader, - _block: &SealedBlockWithSenders, + _block: &RecoveredBlock, _output: &BlockExecutionOutput, _trie_updates: Option<(&TrieUpdates, B256)>, ) { @@ -33,7 +33,7 @@ impl InvalidBlockHook for InvalidBlockHooks { fn on_invalid_block( &self, parent_header: &SealedHeader, - block: &SealedBlockWithSenders, + block: &RecoveredBlock, output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, ) { diff --git a/crates/engine/tree/src/tree/invalid_headers.rs b/crates/engine/tree/src/tree/invalid_headers.rs index 8472d44a32387..de47c1731245b 100644 --- a/crates/engine/tree/src/tree/invalid_headers.rs +++ b/crates/engine/tree/src/tree/invalid_headers.rs @@ -111,7 +111,7 @@ mod tests { fn test_hit_eviction() { let mut cache = InvalidHeaderCache::new(10); let header = Header::default(); - let header = SealedHeader::seal(header); + let header = SealedHeader::seal_slow(header); cache.insert(header.block_with_parent()); assert_eq!(cache.headers.get(&header.hash()).unwrap().hit_count, 0); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 3fff0562a80bd..cff2dfdebbd31 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -36,8 +36,7 @@ use reth_payload_builder::PayloadBuilderHandle; use reth_payload_builder_primitives::PayloadBuilder; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ - EthPrimitives, GotExpected, NodePrimitives, SealedBlockFor, SealedBlockWithSenders, - SealedHeader, + EthPrimitives, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, }; use reth_primitives_traits::Block; use reth_provider::{ @@ -147,7 +146,7 @@ impl TreeState { } /// Returns the block by hash. - fn block_by_hash(&self, hash: B256) -> Option>> { + fn block_by_hash(&self, hash: B256) -> Option>> { self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) } @@ -737,7 +736,7 @@ where /// block request processing isn't blocked for a long time. fn on_downloaded( &mut self, - mut blocks: Vec>, + mut blocks: Vec>, ) -> Result, InsertBlockFatalError> { if blocks.is_empty() { // nothing to execute @@ -1603,11 +1602,11 @@ where .provider .sealed_block_with_senders(hash.into(), TransactionVariant::WithHash)? .ok_or_else(|| ProviderError::HeaderNotFound(hash.into()))? - .split(); + .split_sealed(); let execution_output = self .provider - .get_state(block.number())? - .ok_or_else(|| ProviderError::StateForNumberNotFound(block.number()))?; + .get_state(block.header().number())? + .ok_or_else(|| ProviderError::StateForNumberNotFound(block.header().number()))?; let hashed_state = self.provider.hashed_post_state(execution_output.state()); Ok(Some(ExecutedBlock { @@ -1650,7 +1649,7 @@ where .tree_state .block_by_hash(hash) // TODO: clone for compatibility. should we return an Arc here? - .map(|block| block.as_ref().clone().unseal()); + .map(|block| block.as_ref().clone().into_block()); } Ok(block) } @@ -1798,11 +1797,10 @@ where /// Validate if block is correct and satisfies all the consensus rules that concern the header /// and block body itself. - fn validate_block( - &self, - block: &SealedBlockWithSenders, - ) -> Result<(), ConsensusError> { - if let Err(e) = self.consensus.validate_header_with_total_difficulty(block, U256::MAX) { + fn validate_block(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { + if let Err(e) = + self.consensus.validate_header_with_total_difficulty(block.header(), U256::MAX) + { error!( target: "engine::tree", ?block, @@ -1817,7 +1815,7 @@ where return Err(e) } - if let Err(e) = self.consensus.validate_block_pre_execution(block) { + if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); return Err(e) } @@ -1870,21 +1868,21 @@ where /// Returns an error if sender recovery failed or inserting into the buffer failed. fn buffer_block_without_senders( &mut self, - block: SealedBlockFor, + block: SealedBlock, ) -> Result<(), InsertBlockError> { - match block.try_seal_with_senders() { + match block.try_recover() { Ok(block) => self.buffer_block(block), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + Err(err) => Err(InsertBlockError::sender_recovery_error(err.into_inner())), } } /// Pre-validates the block and inserts it into the buffer. fn buffer_block( &mut self, - block: SealedBlockWithSenders, + block: RecoveredBlock, ) -> Result<(), InsertBlockError> { if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)) + return Err(InsertBlockError::consensus_error(err, block.into_sealed_block())) } self.state.buffer.insert_block(block); Ok(()) @@ -2139,7 +2137,7 @@ where #[instrument(level = "trace", skip_all, fields(block_hash = %block.hash(), block_num = %block.number(),), target = "engine::tree")] fn on_downloaded_block( &mut self, - block: SealedBlockWithSenders, + block: RecoveredBlock, ) -> Result, InsertBlockFatalError> { let block_num_hash = block.num_hash(); let lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_num_hash.hash); @@ -2194,25 +2192,25 @@ where fn insert_block_without_senders( &mut self, - block: SealedBlockFor, + block: SealedBlock, ) -> Result> { - match block.try_seal_with_senders() { + match block.try_recover() { Ok(block) => self.insert_block(block), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + Err(err) => Err(InsertBlockError::sender_recovery_error(err.into_inner())), } } fn insert_block( &mut self, - block: SealedBlockWithSenders, + block: RecoveredBlock, ) -> Result> { self.insert_block_inner(block.clone()) - .map_err(|kind| InsertBlockError::new(block.block, kind)) + .map_err(|kind| InsertBlockError::new(block.into_sealed_block(), kind)) } fn insert_block_inner( &mut self, - block: SealedBlockWithSenders, + block: RecoveredBlock, ) -> Result { debug!(target: "engine::tree", block=?block.num_hash(), parent = ?block.parent_hash(), state_root = ?block.state_root(), "Inserting new block into tree"); @@ -2270,8 +2268,7 @@ where let block_number = block.number(); let block_hash = block.hash(); - let sealed_block = Arc::new(block.block.clone()); - let block = block.unseal(); + let sealed_block = Arc::new(block.clone_sealed_block()); let persistence_not_in_progress = !self.persistence_state.in_progress(); @@ -2341,12 +2338,7 @@ where PostExecutionInput::new(&output.receipts, &output.requests), ) { // call post-block hook - self.invalid_block_hook.on_invalid_block( - &parent_block, - &block.clone().seal_slow(), - &output, - None, - ); + self.invalid_block_hook.on_invalid_block(&parent_block, &block, &output, None); return Err(err.into()) } @@ -2458,7 +2450,7 @@ where // call post-block hook self.invalid_block_hook.on_invalid_block( &parent_block, - &block.clone().seal_slow(), + &block, &output, Some((&trie_output, state_root)), ); @@ -2841,7 +2833,8 @@ mod tests { use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{EthEngineTypes, EthereumEngineValidator}; use reth_evm::test_utils::MockExecutorProvider; - use reth_primitives::{Block, BlockExt, EthPrimitives}; + use reth_primitives::{Block, EthPrimitives}; + use reth_primitives_traits::Block as _; use reth_provider::test_utils::MockEthProvider; use reth_rpc_types_compat::engine::{block_to_payload_v1, payload::block_to_payload_v3}; use reth_trie::{updates::TrieUpdates, HashedPostState}; @@ -2951,7 +2944,7 @@ mod tests { let (from_tree_tx, from_tree_rx) = unbounded_channel(); let header = chain_spec.genesis_header().clone(); - let header = SealedHeader::seal(header); + let header = SealedHeader::seal_slow(header); let engine_api_tree_state = EngineApiTreeState::new(10, 10, header.num_hash()); let canonical_in_memory_state = CanonicalInMemoryState::with_head(header, None, None); @@ -3023,12 +3016,7 @@ mod tests { self.persist_blocks( blocks .into_iter() - .map(|b| { - SealedBlockWithSenders::new_unchecked( - (*b.block).clone(), - b.senders().clone(), - ) - }) + .map(|b| RecoveredBlock::new_sealed(b.block().clone(), b.senders().clone())) .collect(), ); @@ -3049,7 +3037,7 @@ mod tests { fn insert_block( &mut self, - block: SealedBlockWithSenders, + block: RecoveredBlock, ) -> Result> { let execution_outcome = self.block_builder.get_execution_outcome(block.clone()); self.extend_execution_outcome([execution_outcome]); @@ -3114,8 +3102,8 @@ mod tests { } } - async fn send_new_payload(&mut self, block: SealedBlockWithSenders) { - let payload = block_to_payload_v3(block.block.clone()); + async fn send_new_payload(&mut self, block: RecoveredBlock) { + let payload = block_to_payload_v3(block.clone_sealed_block()); self.tree .on_new_payload( payload.into(), @@ -3129,7 +3117,7 @@ mod tests { async fn insert_chain( &mut self, - chain: impl IntoIterator + Clone, + chain: impl IntoIterator> + Clone, ) { for block in chain.clone() { self.insert_block(block.clone()).unwrap(); @@ -3151,16 +3139,16 @@ mod tests { async fn check_fork_chain_insertion( &mut self, - chain: impl IntoIterator + Clone, + chain: impl IntoIterator> + Clone, ) { for block in chain { - self.check_fork_block_added(block.block.hash()).await; + self.check_fork_block_added(block.hash()).await; } } async fn check_canon_chain_insertion( &mut self, - chain: impl IntoIterator + Clone, + chain: impl IntoIterator> + Clone, ) { for block in chain.clone() { self.check_canon_block_added(block.hash()).await; @@ -3186,33 +3174,35 @@ mod tests { block, _, )) => { - assert!(block.hash() == expected_hash); + assert_eq!(block.hash(), expected_hash); } _ => panic!("Unexpected event: {:#?}", event), } } - fn persist_blocks(&self, blocks: Vec) { + fn persist_blocks(&self, blocks: Vec>) { let mut block_data: Vec<(B256, Block)> = Vec::with_capacity(blocks.len()); let mut headers_data: Vec<(B256, Header)> = Vec::with_capacity(blocks.len()); for block in &blocks { - let unsealed_block = block.clone().unseal(); - block_data.push((block.hash(), unsealed_block.clone().block)); - headers_data.push((block.hash(), unsealed_block.header.clone())); + block_data.push((block.hash(), block.clone_block())); + headers_data.push((block.hash(), block.header().clone())); } self.provider.extend_blocks(block_data); self.provider.extend_headers(headers_data); } - fn setup_range_insertion_for_valid_chain(&mut self, chain: Vec) { + fn setup_range_insertion_for_valid_chain( + &mut self, + chain: Vec>, + ) { self.setup_range_insertion_for_chain(chain, None) } fn setup_range_insertion_for_invalid_chain( &mut self, - chain: Vec, + chain: Vec>, index: usize, ) { self.setup_range_insertion_for_chain(chain, Some(index)) @@ -3220,7 +3210,7 @@ mod tests { fn setup_range_insertion_for_chain( &mut self, - chain: Vec, + chain: Vec>, invalid_index: Option, ) { // setting up execution outcomes for the chain, the blocks will be @@ -3392,7 +3382,7 @@ mod tests { // ensure block is buffered let buffered = test_harness.tree.state.buffer.block(&hash).unwrap(); - assert_eq!(buffered.block, sealed); + assert_eq!(buffered.clone_sealed_block(), sealed); } #[test] @@ -3732,7 +3722,7 @@ mod tests { for block in &chain_a { test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { - block: Arc::new(block.block.clone()), + block: Arc::new(block.clone_sealed_block()), senders: Arc::new(block.senders().to_vec()), execution_output: Arc::new(ExecutionOutcome::default()), hashed_state: Arc::new(HashedPostState::default()), @@ -3743,7 +3733,7 @@ mod tests { for block in &chain_b { test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { - block: Arc::new(block.block.clone()), + block: Arc::new(block.clone_sealed_block()), senders: Arc::new(block.senders().to_vec()), execution_output: Arc::new(ExecutionOutcome::default()), hashed_state: Arc::new(HashedPostState::default()), @@ -3755,19 +3745,19 @@ mod tests { let mut expected_new = Vec::new(); for block in &chain_b { // reorg to chain from block b - let result = test_harness.tree.on_new_head(block.block.hash()).unwrap(); + let result = test_harness.tree.on_new_head(block.hash()).unwrap(); assert_matches!(result, Some(NewCanonicalChain::Reorg { .. })); expected_new.push(block); if let Some(NewCanonicalChain::Reorg { new, old }) = result { assert_eq!(new.len(), expected_new.len()); for (index, block) in expected_new.iter().enumerate() { - assert_eq!(new[index].block.hash(), block.block.hash()); + assert_eq!(new[index].block.hash(), block.hash()); } assert_eq!(old.len(), chain_a.len()); for (index, block) in chain_a.iter().enumerate() { - assert_eq!(old[index].block.hash(), block.block.hash()); + assert_eq!(old[index].block.hash(), block.hash()); } } @@ -3936,9 +3926,7 @@ mod tests { let backfill_tip_block = main_chain[(backfill_finished_block_number - 1) as usize].clone(); // add block to mock provider to enable persistence clean up. - test_harness - .provider - .add_block(backfill_tip_block.hash(), backfill_tip_block.block.unseal()); + test_harness.provider.add_block(backfill_tip_block.hash(), backfill_tip_block.into_block()); test_harness.tree.on_engine_message(FromEngine::Event(backfill_finished)).unwrap(); let event = test_harness.from_tree_rx.recv().await.unwrap(); @@ -4154,7 +4142,7 @@ mod tests { // extend base chain let extension_chain = test_harness.block_builder.create_fork(old_head, 5); - let fork_block = extension_chain.last().unwrap().block.clone(); + let fork_block = extension_chain.last().unwrap().clone_sealed_block(); test_harness.setup_range_insertion_for_valid_chain(extension_chain.clone()); test_harness.insert_chain(extension_chain).await; @@ -4252,7 +4240,7 @@ mod tests { // extend base chain let extension_chain = test_harness.block_builder.create_fork(old_head, 5); - let fork_block = extension_chain.last().unwrap().block.clone(); + let fork_block = extension_chain.last().unwrap().clone_sealed_block(); test_harness.insert_chain(extension_chain).await; // fcu to old_head @@ -4322,7 +4310,7 @@ mod tests { test_harness.send_new_payload(block.clone()).await; if index < side_chain.len() - invalid_index - 1 { - test_harness.send_fcu(block.block.hash(), ForkchoiceStatus::Valid).await; + test_harness.send_fcu(block.hash(), ForkchoiceStatus::Valid).await; } } diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 54f9321f239ab..1d1a524e2b1ab 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-errors.workspace = true reth-consensus-common.workspace = true reth-fs-util.workspace = true diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index fc38f15be2b68..2136c92a014ee 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -19,9 +19,9 @@ use reth_evm::{ }; use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ - proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, Receipt, - Receipts, + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, Receipt, Receipts, }; +use reth_primitives_traits::block::Block as _; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index b81ee1d5c4484..4d2daaaaeada6 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -8,7 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_consensus::{BlockHeader, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::EMPTY_OMMER_ROOT_HASH; use alloy_eips::{eip7840::BlobParams, merge::ALLOWED_FUTURE_BLOCK_TIME_SECONDS}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardforks}; @@ -21,10 +21,10 @@ use reth_consensus_common::validation::{ validate_against_parent_timestamp, validate_block_pre_execution, validate_body_against_header, validate_header_base_fee, validate_header_extra_data, validate_header_gas, }; -use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt, SealedBlock, SealedHeader}; +use reth_primitives::{NodePrimitives, Receipt, RecoveredBlock, SealedBlock, SealedHeader}; use reth_primitives_traits::{ constants::{GAS_LIMIT_BOUND_DIVISOR, MINIMUM_GAS_LIMIT}, - BlockBody, + Block, BlockHeader, }; use std::{fmt::Debug, sync::Arc, time::SystemTime}; @@ -103,30 +103,29 @@ where { fn validate_block_post_execution( &self, - block: &BlockWithSenders, + block: &RecoveredBlock, input: PostExecutionInput<'_>, ) -> Result<(), ConsensusError> { validate_block_post_execution(block, &self.chain_spec, input.receipts, input.requests) } } -impl Consensus +impl Consensus for EthBeaconConsensus where - H: BlockHeader, - B: BlockBody, + B: Block, { type Error = ConsensusError; fn validate_body_against_header( &self, - body: &B, - header: &SealedHeader, + body: &B::Body, + header: &SealedHeader, ) -> Result<(), Self::Error> { validate_body_against_header(body, header.header()) } - fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), Self::Error> { + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), Self::Error> { validate_block_pre_execution(block, &self.chain_spec) } } @@ -361,7 +360,7 @@ mod tests { }; assert_eq!( - EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::seal(header,)), + EthBeaconConsensus::new(chain_spec).validate_header(&SealedHeader::seal_slow(header,)), Ok(()) ); } diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index b9b38b6d51c26..8d6d3854924a9 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -3,7 +3,7 @@ use alloy_eips::eip7685::Requests; use alloy_primitives::{Bloom, B256}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; -use reth_primitives::{gas_spent_by_transactions, BlockWithSenders, GotExpected, Receipt}; +use reth_primitives::{gas_spent_by_transactions, GotExpected, Receipt, RecoveredBlock}; use reth_primitives_traits::Block; /// Validate a block with regard to execution results: @@ -11,7 +11,7 @@ use reth_primitives_traits::Block; /// - Compares the receipts root in the block header to the block body /// - Compares the gas used in the block header to the actual gas usage after execution pub fn validate_block_post_execution( - block: &BlockWithSenders, + block: &RecoveredBlock, chain_spec: &ChainSpec, receipts: &[Receipt], requests: &Requests, diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 350780d0bdad3..f4723bf39f282 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -22,7 +22,7 @@ use reth_payload_primitives::{ PayloadOrAttributes, PayloadTypes, }; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::{Block, NodePrimitives, SealedBlock, SealedBlockFor}; +use reth_primitives::{Block, NodePrimitives, SealedBlock}; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::sync::Arc; @@ -54,7 +54,7 @@ where type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; fn block_to_payload( - block: SealedBlockFor< + block: SealedBlock< <::Primitives as NodePrimitives>::Block, >, ) -> (ExecutionPayload, ExecutionPayloadSidecar) { diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 0d793fc8a8b43..99975734ea0e4 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -5,7 +5,7 @@ use crate::{ EthEvmConfig, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::Transaction; +use alloy_consensus::{BlockHeader, Transaction}; use alloy_eips::{eip6110, eip7685::Requests}; use core::fmt::Display; use reth_chainspec::{ChainSpec, EthereumHardfork, EthereumHardforks, MAINNET}; @@ -21,7 +21,8 @@ use reth_evm::{ system_calls::{OnStateHook, SystemCaller}, ConfigureEvm, TxEnvOverrides, }; -use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; +use reth_primitives::{EthPrimitives, Receipt, RecoveredBlock}; +use reth_primitives_traits::BlockBody; use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, @@ -129,31 +130,34 @@ where self.tx_env_overrides = Some(tx_env_overrides); } - fn apply_pre_execution_changes(&mut self, block: &BlockWithSenders) -> Result<(), Self::Error> { + fn apply_pre_execution_changes( + &mut self, + block: &RecoveredBlock, + ) -> Result<(), Self::Error> { // Set state clear flag if the block is after the Spurious Dragon hardfork. let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + (*self.chain_spec).is_spurious_dragon_active_at_block(block.number()); self.state.set_state_clear_flag(state_clear_flag); - let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); + let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); - self.system_caller.apply_pre_execution_changes(&block.header, &mut evm)?; + self.system_caller.apply_pre_execution_changes(block.header(), &mut evm)?; Ok(()) } fn execute_transactions( &mut self, - block: &BlockWithSenders, + block: &RecoveredBlock, ) -> Result, Self::Error> { - let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); + let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body.transactions.len()); + let mut receipts = Vec::with_capacity(block.body().transaction_count()); for (sender, transaction) in block.transactions_with_sender() { // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. - let block_available_gas = block.header.gas_limit - cumulative_gas_used; + let block_available_gas = block.gas_limit() - cumulative_gas_used; if transaction.gas_limit() > block_available_gas { return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { transaction_gas_limit: transaction.gas_limit(), @@ -204,10 +208,10 @@ where fn apply_post_execution_changes( &mut self, - block: &BlockWithSenders, + block: &RecoveredBlock, receipts: &[Receipt], ) -> Result { - let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); + let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); let requests = if self.chain_spec.is_prague_active_at_timestamp(block.timestamp) { // Collect all EIP-6110 deposits @@ -227,10 +231,10 @@ where }; drop(evm); - let mut balance_increments = post_block_balance_increments(&self.chain_spec, &block.block); + let mut balance_increments = post_block_balance_increments(&self.chain_spec, block); // Irregular state change at Ethereum DAO hardfork - if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number) { + if self.chain_spec.fork(EthereumHardfork::Dao).transitions_at_block(block.number()) { // drain balances from hardcoded addresses. let drained_balance: u128 = self .state @@ -267,7 +271,7 @@ where fn validate_block_post_execution( &self, - block: &BlockWithSenders, + block: &RecoveredBlock, receipts: &[Receipt], requests: &Requests, ) -> Result<(), ConsensusError> { @@ -311,8 +315,8 @@ mod tests { BasicBlockExecutorProvider, BatchExecutor, BlockExecutorProvider, Executor, }; use reth_execution_types::BlockExecutionOutput; - use reth_primitives::{Account, Block, BlockBody, BlockExt, Transaction}; - use reth_primitives_traits::crypto::secp256k1::public_key_to_address; + use reth_primitives::{Account, Block, BlockBody, Transaction}; + use reth_primitives_traits::{crypto::secp256k1::public_key_to_address, Block as _}; use reth_revm::{ database::StateProviderDatabase, test_utils::StateProviderTest, TransitionState, }; @@ -388,7 +392,7 @@ mod tests { // attempt to execute a block without parent beacon block root, expect err let err = executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header: header.clone(), body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, @@ -409,7 +413,7 @@ mod tests { // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header: header.clone(), body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, @@ -469,7 +473,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail provider .batch_executor(StateProviderDatabase::new(&db)) - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, @@ -513,7 +517,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: BlockBody { transactions: vec![], ommers: vec![], withdrawals: None }, @@ -549,7 +553,7 @@ mod tests { // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); let _err = executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header: header.clone(), body: Default::default() }, vec![], )) @@ -564,7 +568,7 @@ mod tests { // now try to process the genesis block again, this time ensuring that a system contract // call does not occur executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: Default::default() }, vec![], )) @@ -612,7 +616,7 @@ mod tests { // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header: header.clone(), body: Default::default() }, vec![], )) @@ -682,7 +686,7 @@ mod tests { // attempt to execute an empty block, this should not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: Default::default() }, vec![], )) @@ -719,7 +723,7 @@ mod tests { // attempt to execute genesis block, this should not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: Default::default() }, vec![], )) @@ -763,7 +767,7 @@ mod tests { // attempt to execute the fork activation block, this should not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: Default::default() }, vec![], )) @@ -814,7 +818,7 @@ mod tests { // attempt to execute the fork activation block, this should not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: Default::default() }, vec![], )) @@ -847,7 +851,7 @@ mod tests { // attempt to execute the genesis block, this should not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: Default::default() }, vec![], )) @@ -876,7 +880,7 @@ mod tests { let header_hash = header.hash_slow(); executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: Default::default() }, vec![], )) @@ -908,7 +912,7 @@ mod tests { }; executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: Default::default() }, vec![], )) @@ -1111,7 +1115,7 @@ mod tests { let header = Header { timestamp: 1, number: 1, ..Header::default() }; - let block = &BlockWithSenders::new_unchecked( + let block = &RecoveredBlock::new_unhashed( Block { header, body: BlockBody { diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index cb8eb1556a4c3..d81037f171864 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -3,7 +3,7 @@ use alloy_genesis::Genesis; use alloy_primitives::{b256, hex}; use futures::StreamExt; use reth_chainspec::ChainSpec; -use reth_node_api::{FullNodeComponents, FullNodePrimitives, NodeTypes}; +use reth_node_api::{BlockBody, FullNodeComponents, FullNodePrimitives, NodeTypes}; use reth_node_builder::{ rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, }; @@ -65,7 +65,7 @@ where let head = notifications.next().await.unwrap(); - let tx = &head.tip().transactions()[0]; + let tx = &head.tip().body().transactions()[0]; assert_eq!(tx.trie_hash(), hash); println!("mined transaction: {hash}"); } diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index a4a02c3ef768f..4f5fe530161a3 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -30,10 +30,11 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, - Block, BlockBody, BlockExt, EthereumHardforks, InvalidTransactionError, Receipt, - TransactionSigned, + Block, BlockBody, EthereumHardforks, InvalidTransactionError, Receipt, TransactionSigned, }; +use reth_primitives_traits::Block as _; use reth_revm::database::StateProviderDatabase; +use reth_storage_api::StateProviderFactory; use reth_transaction_pool::{ error::InvalidPoolTransactionError, noop::NoopTransactionPool, BestTransactions, BestTransactionsAttributes, PoolTransaction, TransactionPool, ValidPoolTransaction, @@ -51,7 +52,6 @@ use tracing::{debug, trace, warn}; mod config; pub use config::*; -use reth_storage_api::StateProviderFactory; type BestTransactionsIter = Box< dyn BestTransactions::Transaction>>>, diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index e5f7270bfef71..67ed25133efd6 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -8,8 +8,7 @@ use alloy_primitives::{Address, BlockHash, BlockNumber, TxHash}; use core::{fmt, ops::RangeInclusive}; use reth_execution_errors::{BlockExecutionError, InternalBlockExecutionError}; use reth_primitives::{ - transaction::SignedTransactionIntoRecoveredExt, RecoveredTx, SealedBlockFor, - SealedBlockWithSenders, SealedHeader, + transaction::SignedTransactionIntoRecoveredExt, RecoveredBlock, RecoveredTx, SealedHeader, }; use reth_primitives_traits::{Block, BlockBody, NodePrimitives, SignedTransaction}; use reth_trie::updates::TrieUpdates; @@ -29,7 +28,7 @@ use revm::db::BundleState; #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Chain { /// All blocks in this chain. - blocks: BTreeMap>, + blocks: BTreeMap>, /// The outcome of block execution for this chain. /// /// This field contains the state of all accounts after the execution of all blocks in this @@ -60,11 +59,12 @@ impl Chain { /// /// A chain of blocks should not be empty. pub fn new( - blocks: impl IntoIterator>, + blocks: impl IntoIterator>, execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { - let blocks = blocks.into_iter().map(|b| (b.number(), b)).collect::>(); + let blocks = + blocks.into_iter().map(|b| (b.header().number(), b)).collect::>(); debug_assert!(!blocks.is_empty(), "Chain should have at least one block"); Self { blocks, execution_outcome, trie_updates } @@ -72,7 +72,7 @@ impl Chain { /// Create new Chain from a single block and its state. pub fn from_block( - block: SealedBlockWithSenders, + block: RecoveredBlock, execution_outcome: ExecutionOutcome, trie_updates: Option, ) -> Self { @@ -80,12 +80,12 @@ impl Chain { } /// Get the blocks in this chain. - pub const fn blocks(&self) -> &BTreeMap> { + pub const fn blocks(&self) -> &BTreeMap> { &self.blocks } /// Consumes the type and only returns the blocks in this chain. - pub fn into_blocks(self) -> BTreeMap> { + pub fn into_blocks(self) -> BTreeMap> { self.blocks } @@ -131,15 +131,7 @@ impl Chain { } /// Returns the block with matching hash. - pub fn block(&self, block_hash: BlockHash) -> Option<&SealedBlockFor> { - self.block_with_senders(block_hash).map(|block| &block.block) - } - - /// Returns the block with matching hash. - pub fn block_with_senders( - &self, - block_hash: BlockHash, - ) -> Option<&SealedBlockWithSenders> { + pub fn block_with_senders(&self, block_hash: BlockHash) -> Option<&RecoveredBlock> { self.blocks.iter().find_map(|(_num, block)| (block.hash() == block_hash).then_some(block)) } @@ -183,15 +175,14 @@ impl Chain { } /// Returns an iterator over all blocks in the chain with increasing block number. - pub fn blocks_iter(&self) -> impl Iterator> + '_ { + pub fn blocks_iter(&self) -> impl Iterator> + '_ { self.blocks().iter().map(|block| block.1) } /// Returns an iterator over all blocks and their receipts in the chain. pub fn blocks_and_receipts( &self, - ) -> impl Iterator, &Vec>)> + '_ - { + ) -> impl Iterator, &Vec>)> + '_ { self.blocks_iter().zip(self.block_receipts_iter()) } @@ -199,7 +190,10 @@ impl Chain { #[track_caller] pub fn fork_block(&self) -> ForkBlock { let first = self.first(); - ForkBlock { number: first.number().saturating_sub(1), hash: first.parent_hash() } + ForkBlock { + number: first.header().number().saturating_sub(1), + hash: first.header().parent_hash(), + } } /// Get the first block in this chain. @@ -208,7 +202,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &RecoveredBlock { self.blocks.first_key_value().expect("Chain should have at least one block").1 } @@ -218,7 +212,7 @@ impl Chain { /// /// If chain doesn't have any blocks. #[track_caller] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &RecoveredBlock { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -233,7 +227,7 @@ impl Chain { /// /// If chain doesn't have any blocks. pub fn range(&self) -> RangeInclusive { - self.first().number()..=self.tip().number() + self.first().header().number()..=self.tip().header().number() } /// Get all receipts for the given block. @@ -270,10 +264,10 @@ impl Chain { /// This method assumes that blocks attachment to the chain has already been validated. pub fn append_block( &mut self, - block: SealedBlockWithSenders, + block: RecoveredBlock, execution_outcome: ExecutionOutcome, ) { - self.blocks.insert(block.number(), block); + self.blocks.insert(block.header().number(), block); self.execution_outcome.extend(execution_outcome); self.trie_updates.take(); // reset } @@ -375,7 +369,7 @@ impl Chain { /// Wrapper type for `blocks` display in `Chain` #[derive(Debug)] pub struct DisplayBlocksChain<'a, B: reth_primitives_traits::Block>( - pub &'a BTreeMap>, + pub &'a BTreeMap>, ); impl fmt::Display for DisplayBlocksChain<'_, B> { @@ -396,7 +390,7 @@ impl fmt::Display for DisplayBlocksChain<'_, B /// All blocks in the chain #[derive(Clone, Debug, Default, PartialEq, Eq)] pub struct ChainBlocks<'a, B: Block> { - blocks: Cow<'a, BTreeMap>>, + blocks: Cow<'a, BTreeMap>>, } impl>> ChainBlocks<'_, B> { @@ -404,13 +398,13 @@ impl>> ChainBlocks<'_, /// /// Note: this always yields at least one block. #[inline] - pub fn into_blocks(self) -> impl Iterator> { + pub fn into_blocks(self) -> impl Iterator> { self.blocks.into_owned().into_values() } /// Creates an iterator over all blocks in the chain with increasing block number. #[inline] - pub fn iter(&self) -> impl Iterator)> { + pub fn iter(&self) -> impl Iterator)> { self.blocks.iter() } @@ -420,7 +414,7 @@ impl>> ChainBlocks<'_, /// /// Chains always have at least one block. #[inline] - pub fn tip(&self) -> &SealedBlockWithSenders { + pub fn tip(&self) -> &RecoveredBlock { self.blocks.last_key_value().expect("Chain should have at least one block").1 } @@ -430,7 +424,7 @@ impl>> ChainBlocks<'_, /// /// Chains always have at least one block. #[inline] - pub fn first(&self) -> &SealedBlockWithSenders { + pub fn first(&self) -> &RecoveredBlock { self.blocks.first_key_value().expect("Chain should have at least one block").1 } @@ -461,13 +455,15 @@ impl>> ChainBlocks<'_, /// Returns an iterator over all transaction hashes in the block #[inline] pub fn transaction_hashes(&self) -> impl Iterator + '_ { - self.blocks.values().flat_map(|block| block.transactions().iter().map(|tx| tx.trie_hash())) + self.blocks + .values() + .flat_map(|block| block.body().transactions().iter().map(|tx| tx.trie_hash())) } } impl IntoIterator for ChainBlocks<'_, B> { - type Item = (BlockNumber, SealedBlockWithSenders); - type IntoIter = std::collections::btree_map::IntoIter>; + type Item = (BlockNumber, RecoveredBlock); + type IntoIter = std::collections::btree_map::IntoIter>; fn into_iter(self) -> Self::IntoIter { #[allow(clippy::unnecessary_to_owned)] @@ -536,9 +532,7 @@ pub(super) mod serde_bincode_compat { use crate::ExecutionOutcome; use alloc::borrow::Cow; use alloy_primitives::BlockNumber; - use reth_primitives::{ - serde_bincode_compat::SealedBlockWithSenders, EthPrimitives, NodePrimitives, - }; + use reth_primitives::{serde_bincode_compat::RecoveredBlock, EthPrimitives, NodePrimitives}; use reth_primitives_traits::{serde_bincode_compat::SerdeBincodeCompat, Block}; use reth_trie_common::serde_bincode_compat::updates::TrieUpdates; use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; @@ -563,21 +557,25 @@ pub(super) mod serde_bincode_compat { #[derive(Debug, Serialize, Deserialize)] pub struct Chain<'a, N = EthPrimitives> where - N: NodePrimitives, + N: NodePrimitives< + Block: Block + 'static, + >, { - blocks: SealedBlocksWithSenders<'a, N::Block>, + blocks: RecoveredBlocks<'a, N::Block>, execution_outcome: Cow<'a, ExecutionOutcome>, trie_updates: Option>, } #[derive(Debug)] - struct SealedBlocksWithSenders<'a, B: reth_primitives_traits::Block>( - Cow<'a, BTreeMap>>, - ); + struct RecoveredBlocks< + 'a, + B: reth_primitives_traits::Block + + 'static, + >(Cow<'a, BTreeMap>>); - impl Serialize for SealedBlocksWithSenders<'_, B> + impl Serialize for RecoveredBlocks<'_, B> where - B: Block, + B: Block + 'static, { fn serialize(&self, serializer: S) -> Result where @@ -586,23 +584,23 @@ pub(super) mod serde_bincode_compat { let mut state = serializer.serialize_map(Some(self.0.len()))?; for (block_number, block) in self.0.iter() { - state.serialize_entry(block_number, &SealedBlockWithSenders::<'_>::from(block))?; + state.serialize_entry(block_number, &RecoveredBlock::<'_, B>::from(block))?; } state.end() } } - impl<'de, B> Deserialize<'de> for SealedBlocksWithSenders<'_, B> + impl<'de, B> Deserialize<'de> for RecoveredBlocks<'_, B> where - B: Block, + B: Block + 'static, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { Ok(Self(Cow::Owned( - BTreeMap::>::deserialize(deserializer) + BTreeMap::>::deserialize(deserializer) .map(|blocks| blocks.into_iter().map(|(n, b)| (n, b.into())).collect())?, ))) } @@ -610,11 +608,13 @@ pub(super) mod serde_bincode_compat { impl<'a, N> From<&'a super::Chain> for Chain<'a, N> where - N: NodePrimitives, + N: NodePrimitives< + Block: Block + 'static, + >, { fn from(value: &'a super::Chain) -> Self { Self { - blocks: SealedBlocksWithSenders(Cow::Borrowed(&value.blocks)), + blocks: RecoveredBlocks(Cow::Borrowed(&value.blocks)), execution_outcome: Cow::Borrowed(&value.execution_outcome), trie_updates: value.trie_updates.as_ref().map(Into::into), } @@ -623,7 +623,9 @@ pub(super) mod serde_bincode_compat { impl<'a, N> From> for super::Chain where - N: NodePrimitives, + N: NodePrimitives< + Block: Block + 'static, + >, { fn from(value: Chain<'a, N>) -> Self { Self { @@ -634,8 +636,13 @@ pub(super) mod serde_bincode_compat { } } - impl SerializeAs for Chain<'_> { - fn serialize_as(source: &super::Chain, serializer: S) -> Result + impl SerializeAs> for Chain<'_, N> + where + N: NodePrimitives< + Block: Block + 'static, + >, + { + fn serialize_as(source: &super::Chain, serializer: S) -> Result where S: Serializer, { @@ -643,8 +650,13 @@ pub(super) mod serde_bincode_compat { } } - impl<'de> DeserializeAs<'de, super::Chain> for Chain<'de> { - fn deserialize_as(deserializer: D) -> Result + impl<'de, N> DeserializeAs<'de, super::Chain> for Chain<'de, N> + where + N: NodePrimitives< + Block: Block + 'static, + >, + { + fn deserialize_as(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, { @@ -654,14 +666,13 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { + use super::super::{serde_bincode_compat, Chain}; use arbitrary::Arbitrary; use rand::Rng; - use reth_primitives::SealedBlockWithSenders; + use reth_primitives::RecoveredBlock; use serde::{Deserialize, Serialize}; use serde_with::serde_as; - use super::super::{serde_bincode_compat, Chain}; - #[test] fn test_chain_bincode_roundtrip() { #[serde_as] @@ -675,10 +686,8 @@ pub(super) mod serde_bincode_compat { rand::thread_rng().fill(bytes.as_mut_slice()); let data = Data { chain: Chain::new( - vec![SealedBlockWithSenders::arbitrary(&mut arbitrary::Unstructured::new( - &bytes, - )) - .unwrap()], + vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap()], Default::default(), None, ), @@ -716,7 +725,7 @@ mod tests { #[test] fn chain_append() { - let block: SealedBlockWithSenders = Default::default(); + let block: RecoveredBlock = Default::default(); let block1_hash = B256::new([0x01; 32]); let block2_hash = B256::new([0x02; 32]); let block3_hash = B256::new([0x03; 32]); @@ -727,10 +736,10 @@ mod tests { let mut block3 = block.clone(); let mut block4 = block; - block1.block.set_hash(block1_hash); - block2.block.set_hash(block2_hash); - block3.block.set_hash(block3_hash); - block4.block.set_hash(block4_hash); + block1.set_hash(block1_hash); + block2.set_hash(block2_hash); + block3.set_hash(block3_hash); + block4.set_hash(block4_hash); block3.set_parent_hash(block2_hash); @@ -780,13 +789,13 @@ mod tests { vec![], ); - let mut block1: SealedBlockWithSenders = Default::default(); + let mut block1: RecoveredBlock = Default::default(); let block1_hash = B256::new([15; 32]); block1.set_block_number(1); block1.set_hash(block1_hash); block1.push_sender(Address::new([4; 20])); - let mut block2: SealedBlockWithSenders = Default::default(); + let mut block2: RecoveredBlock = Default::default(); let block2_hash = B256::new([16; 32]); block2.set_block_number(2); block2.set_hash(block2_hash); @@ -846,8 +855,8 @@ mod tests { #[test] fn receipts_by_block_hash() { - // Create a default SealedBlockWithSenders object - let block: SealedBlockWithSenders = Default::default(); + // Create a default RecoveredBlock object + let block: RecoveredBlock = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); @@ -858,8 +867,8 @@ mod tests { let mut block2 = block; // Set the hashes of block1 and block2 - block1.block.set_hash(block1_hash); - block2.block.set_hash(block2_hash); + block1.set_hash(block1_hash); + block2.set_hash(block2_hash); // Create a random receipt object, receipt1 let receipt1 = Receipt { diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 8bf40d38caa0e..2b221f14564a7 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -17,8 +17,7 @@ use alloy_primitives::{ }; use core::fmt::Display; use reth_consensus::ConsensusError; -use reth_primitives::{BlockWithSenders, NodePrimitives, Receipt}; -use reth_primitives_traits::Block; +use reth_primitives::{NodePrimitives, Receipt, RecoveredBlock}; use reth_prune_types::PruneModes; use reth_revm::batch::BlockBatchRecord; use revm::{ @@ -151,7 +150,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// the returned state. type Executor + Display>>: for<'a> Executor< DB, - Input<'a> = &'a BlockWithSenders<::Block>, + Input<'a> = &'a RecoveredBlock<::Block>, Output = BlockExecutionOutput<::Receipt>, Error = BlockExecutionError, >; @@ -159,7 +158,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// An executor that can execute a batch of blocks given a database. type BatchExecutor + Display>>: for<'a> BatchExecutor< DB, - Input<'a> = &'a BlockWithSenders<::Block>, + Input<'a> = &'a RecoveredBlock<::Block>, Output = ExecutionOutcome<::Receipt>, Error = BlockExecutionError, >; @@ -206,19 +205,19 @@ pub trait BlockExecutionStrategy { /// Applies any necessary changes before executing the block's transactions. fn apply_pre_execution_changes( &mut self, - block: &BlockWithSenders<::Block>, + block: &RecoveredBlock<::Block>, ) -> Result<(), Self::Error>; /// Executes all transactions in the block. fn execute_transactions( &mut self, - block: &BlockWithSenders<::Block>, + block: &RecoveredBlock<::Block>, ) -> Result::Receipt>, Self::Error>; /// Applies any necessary changes after executing the block's transactions. fn apply_post_execution_changes( &mut self, - block: &BlockWithSenders<::Block>, + block: &RecoveredBlock<::Block>, receipts: &[::Receipt], ) -> Result; @@ -240,7 +239,7 @@ pub trait BlockExecutionStrategy { /// Validate a block with regard to execution results. fn validate_block_post_execution( &self, - _block: &BlockWithSenders<::Block>, + _block: &RecoveredBlock<::Block>, _receipts: &[::Receipt], _requests: &Requests, ) -> Result<(), ConsensusError> { @@ -338,7 +337,7 @@ where S: BlockExecutionStrategy, DB: Database + Display>, { - type Input<'a> = &'a BlockWithSenders<::Block>; + type Input<'a> = &'a RecoveredBlock<::Block>; type Output = BlockExecutionOutput<::Receipt>; type Error = S::Error; @@ -425,7 +424,7 @@ where S: BlockExecutionStrategy, DB: Database + Display>, { - type Input<'a> = &'a BlockWithSenders<::Block>; + type Input<'a> = &'a RecoveredBlock<::Block>; type Output = ExecutionOutcome<::Receipt>; type Error = BlockExecutionError; @@ -548,7 +547,7 @@ mod tests { struct TestExecutor(PhantomData); impl Executor for TestExecutor { - type Input<'a> = &'a BlockWithSenders; + type Input<'a> = &'a RecoveredBlock; type Output = BlockExecutionOutput; type Error = BlockExecutionError; @@ -580,7 +579,7 @@ mod tests { } impl BatchExecutor for TestExecutor { - type Input<'a> = &'a BlockWithSenders; + type Input<'a> = &'a RecoveredBlock; type Output = ExecutionOutcome; type Error = BlockExecutionError; @@ -661,21 +660,21 @@ mod tests { fn apply_pre_execution_changes( &mut self, - _block: &BlockWithSenders, + _block: &RecoveredBlock, ) -> Result<(), Self::Error> { Ok(()) } fn execute_transactions( &mut self, - _block: &BlockWithSenders, + _block: &RecoveredBlock, ) -> Result, Self::Error> { Ok(self.execute_transactions_result.clone()) } fn apply_post_execution_changes( &mut self, - _block: &BlockWithSenders, + _block: &RecoveredBlock, _receipts: &[Receipt], ) -> Result { Ok(self.apply_post_execution_changes_result.clone()) @@ -697,7 +696,7 @@ mod tests { fn validate_block_post_execution( &self, - _block: &BlockWithSenders, + _block: &RecoveredBlock, _receipts: &[Receipt], _requests: &Requests, ) -> Result<(), ConsensusError> { diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 242ddfe5b79af..4787bf9ce5f79 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -7,7 +7,7 @@ use alloy_consensus::BlockHeader; use metrics::{Counter, Gauge, Histogram}; use reth_execution_types::BlockExecutionOutput; use reth_metrics::Metrics; -use reth_primitives::BlockWithSenders; +use reth_primitives::RecoveredBlock; use revm_primitives::EvmState; use std::time::Instant; @@ -68,7 +68,7 @@ pub struct ExecutorMetrics { } impl ExecutorMetrics { - fn metered(&self, block: &BlockWithSenders, f: F) -> R + fn metered(&self, block: &RecoveredBlock, f: F) -> R where F: FnOnce() -> R, B: reth_primitives_traits::Block, @@ -97,13 +97,13 @@ impl ExecutorMetrics { pub fn execute_metered<'a, E, DB, O, Error, B>( &self, executor: E, - input: &'a BlockWithSenders, + input: &'a RecoveredBlock, state_hook: Box, ) -> Result, Error> where E: Executor< DB, - Input<'a> = &'a BlockWithSenders, + Input<'a> = &'a RecoveredBlock, Output = BlockExecutionOutput, Error = Error, >, @@ -131,9 +131,9 @@ impl ExecutorMetrics { } /// Execute the given block and update metrics for the execution. - pub fn metered_one(&self, input: &BlockWithSenders, f: F) -> R + pub fn metered_one(&self, input: &RecoveredBlock, f: F) -> R where - F: FnOnce(&BlockWithSenders) -> R, + F: FnOnce(&RecoveredBlock) -> R, B: reth_primitives_traits::Block, { self.metered(input, || f(input)) @@ -158,7 +158,7 @@ mod tests { impl Executor<()> for MockExecutor { type Input<'a> - = &'a BlockWithSenders + = &'a RecoveredBlock where Self: 'a; type Output = BlockExecutionOutput<()>; @@ -229,7 +229,7 @@ mod tests { fn test_executor_metrics_hook_metrics_recorded() { let snapshotter = setup_test_recorder(); let metrics = ExecutorMetrics::default(); - let input = BlockWithSenders::default(); + let input = RecoveredBlock::default(); let (tx, _rx) = mpsc::channel(); let expected_output = 42; @@ -278,7 +278,7 @@ mod tests { #[test] fn test_executor_metrics_hook_called() { let metrics = ExecutorMetrics::default(); - let input = BlockWithSenders::default(); + let input = RecoveredBlock::default(); let (tx, rx) = mpsc::channel(); let expected_output = 42; diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 66041840ae773..27e2e96239297 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -4,7 +4,7 @@ use alloy_primitives::BlockNumber; use core::fmt::Display; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionOutput, ExecutionOutcome}; -use reth_primitives::{BlockWithSenders, NodePrimitives}; +use reth_primitives::{NodePrimitives, RecoveredBlock}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -45,7 +45,7 @@ impl BlockExecutorProvider for NoopBlockExecutorProvider

{ } impl Executor for NoopBlockExecutorProvider

{ - type Input<'a> = &'a BlockWithSenders; + type Input<'a> = &'a RecoveredBlock; type Output = BlockExecutionOutput; type Error = BlockExecutionError; @@ -77,7 +77,7 @@ impl Executor for NoopBlockExecutorProvider

{ } impl BatchExecutor for NoopBlockExecutorProvider

{ - type Input<'a> = &'a BlockWithSenders; + type Input<'a> = &'a RecoveredBlock; type Output = ExecutionOutcome; type Error = BlockExecutionError; diff --git a/crates/evm/src/state_change.rs b/crates/evm/src/state_change.rs index af14705d16867..a6f38f6268a3c 100644 --- a/crates/evm/src/state_change.rs +++ b/crates/evm/src/state_change.rs @@ -5,6 +5,7 @@ use alloy_eips::eip4895::Withdrawal; use alloy_primitives::{map::HashMap, Address}; use reth_chainspec::EthereumHardforks; use reth_consensus_common::calc; +use reth_primitives::SealedBlock; use reth_primitives_traits::BlockBody; /// Collect all balance changes at the end of the block. @@ -14,7 +15,7 @@ use reth_primitives_traits::BlockBody; #[inline] pub fn post_block_balance_increments( chain_spec: &ChainSpec, - block: &Block, + block: &SealedBlock, ) -> HashMap where ChainSpec: EthereumHardforks, diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index 762098a4871c1..2eaf7fdc5aa11 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -12,7 +12,7 @@ use alloy_primitives::BlockNumber; use parking_lot::Mutex; use reth_execution_errors::BlockExecutionError; use reth_execution_types::ExecutionOutcome; -use reth_primitives::{BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, Receipts}; +use reth_primitives::{EthPrimitives, NodePrimitives, Receipt, Receipts, RecoveredBlock}; use reth_prune_types::PruneModes; use reth_storage_errors::provider::ProviderError; use revm::State; @@ -55,7 +55,7 @@ impl BlockExecutorProvider for MockExecutorProvider { } impl Executor for MockExecutorProvider { - type Input<'a> = &'a BlockWithSenders; + type Input<'a> = &'a RecoveredBlock; type Output = BlockExecutionOutput; type Error = BlockExecutionError; @@ -97,7 +97,7 @@ impl Executor for MockExecutorProvider { } impl BatchExecutor for MockExecutorProvider { - type Input<'a> = &'a BlockWithSenders; + type Input<'a> = &'a RecoveredBlock; type Output = ExecutionOutcome; type Error = BlockExecutionError; diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 126a2562f7085..4f1ac8e97aa48 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -10,7 +10,7 @@ use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; use reth_node_api::{Block as _, BlockBody as _, NodePrimitives}; -use reth_primitives::{BlockExt, BlockWithSenders, Receipt}; +use reth_primitives::{Receipt, RecoveredBlock}; use reth_primitives_traits::{format_gas_throughput, SignedTransaction}; use reth_provider::{ BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, @@ -107,10 +107,9 @@ where let execute_start = Instant::now(); // Unseal the block for execution - let (block, senders) = block.split(); - let (header, body) = block.split(); - let (unsealed_header, hash) = header.split(); - let block = P::Block::new(unsealed_header, body).with_senders_unchecked(senders); + let (block, senders) = block.split_sealed(); + let (header, body) = block.split_sealed_header_body(); + let block = P::Block::new_sealed(header, body).with_senders(senders); executor.execute_and_verify_one(&block)?; execution_duration += execute_start.elapsed(); @@ -118,7 +117,7 @@ where // TODO(alexey): report gas metrics using `block.header.gas_used` // Seal the block back and save it - blocks.push(block.seal_unchecked(hash)); + blocks.push(block); // Check if we should commit now let bundle_size_hint = executor.size_hint().unwrap_or_default() as u64; @@ -151,7 +150,7 @@ where /// Single block Backfill job started for a specific range. /// /// It implements [`Iterator`] which executes a block each time the -/// iterator is advanced and yields ([`BlockWithSenders`], [`BlockExecutionOutput`]) +/// iterator is advanced and yields ([`RecoveredBlock`], [`BlockExecutionOutput`]) #[derive(Debug, Clone)] pub struct SingleBlockBackfillJob { pub(crate) executor: E, @@ -166,7 +165,7 @@ where P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<( - BlockWithSenders, + RecoveredBlock, BlockExecutionOutput<::Receipt>, )>; @@ -183,7 +182,11 @@ where /// Converts the single block backfill job into a stream. pub fn into_stream( self, - ) -> StreamBackfillJob)> { + ) -> StreamBackfillJob< + E, + P, + (RecoveredBlock, BlockExecutionOutput), + > { self.into() } @@ -192,7 +195,7 @@ where &self, block_number: u64, ) -> BackfillJobResult<( - BlockWithSenders, + RecoveredBlock, BlockExecutionOutput<::Receipt>, )> { // Fetch the block with senders for execution. @@ -206,7 +209,7 @@ where self.provider.history_by_block_number(block_number.saturating_sub(1))?, )); - trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.block.body().transactions().len(), "Executing block"); + trace!(target: "exex::backfill", number = block_number, txs = block_with_senders.body().transaction_count(), "Executing block"); let block_execution_output = executor.execute(&block_with_senders)?; @@ -310,8 +313,7 @@ mod tests { let (block, mut execution_output) = res?; execution_output.state.reverts.sort(); - let sealed_block_with_senders = blocks_and_execution_outcomes[i].0.clone(); - let expected_block = sealed_block_with_senders.unseal(); + let expected_block = blocks_and_execution_outcomes[i].0.clone(); let expected_output = &blocks_and_execution_outcomes[i].1; assert_eq!(block, expected_block); diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 2fef2dd57d14a..30b28b5c66dce 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -7,7 +7,7 @@ use futures::{ }; use reth_evm::execute::{BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider}; use reth_node_api::NodePrimitives; -use reth_primitives::{BlockWithSenders, EthPrimitives}; +use reth_primitives::{EthPrimitives, RecoveredBlock}; use reth_provider::{BlockReader, Chain, StateProviderFactory}; use reth_prune_types::PruneModes; use reth_stages_api::ExecutionStageThresholds; @@ -38,7 +38,7 @@ struct BackfillTaskOutput { type BackfillTasks = FuturesOrdered>>; type SingleBlockStreamItem = ( - BlockWithSenders<::Block>, + RecoveredBlock<::Block>, BlockExecutionOutput<::Receipt>, ); type BatchBlockStreamItem = Chain; @@ -278,8 +278,7 @@ mod tests { // execute first block let (block, mut execution_output) = backfill_stream.next().await.unwrap().unwrap(); execution_output.state.reverts.sort(); - let sealed_block_with_senders = blocks_and_execution_outcomes[0].0.clone(); - let expected_block = sealed_block_with_senders.unseal(); + let expected_block = blocks_and_execution_outcomes[0].0.clone(); let expected_output = &blocks_and_execution_outcomes[0].1; assert_eq!(block, expected_block); assert_eq!(&execution_output, expected_output); diff --git a/crates/exex/exex/src/backfill/test_utils.rs b/crates/exex/exex/src/backfill/test_utils.rs index 721071f081e1a..f64a09ab752e8 100644 --- a/crates/exex/exex/src/backfill/test_utils.rs +++ b/crates/exex/exex/src/backfill/test_utils.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use alloy_consensus::{constants::ETH_TO_WEI, Header, TxEip2930}; +use alloy_consensus::{constants::ETH_TO_WEI, BlockHeader, Header, TxEip2930}; use alloy_genesis::{Genesis, GenesisAccount}; use alloy_primitives::{b256, Address, TxKind, U256}; use eyre::OptionExt; @@ -8,9 +8,8 @@ use reth_chainspec::{ChainSpec, ChainSpecBuilder, EthereumHardfork, MAINNET, MIN use reth_evm::execute::{BatchExecutor, BlockExecutionOutput, BlockExecutorProvider, Executor}; use reth_evm_ethereum::execute::EthExecutorProvider; use reth_node_api::FullNodePrimitives; -use reth_primitives::{ - Block, BlockBody, BlockExt, BlockWithSenders, Receipt, SealedBlockWithSenders, Transaction, -}; +use reth_primitives::{Block, BlockBody, Receipt, RecoveredBlock, Transaction}; +use reth_primitives_traits::Block as _; use reth_provider::{ providers::ProviderNodeTypes, BlockWriter as _, ExecutionOutcome, LatestStateProviderRef, ProviderFactory, @@ -53,7 +52,7 @@ pub(crate) fn chain_spec(address: Address) -> Arc { pub(crate) fn execute_block_and_commit_to_database( provider_factory: &ProviderFactory, chain_spec: Arc, - block: &BlockWithSenders, + block: &RecoveredBlock, ) -> eyre::Result> where N: ProviderNodeTypes< @@ -73,13 +72,12 @@ where block_execution_output.state.reverts.sort(); // Convert the block execution output to an execution outcome for committing to the database - let execution_outcome = to_execution_outcome(block.number, &block_execution_output); + let execution_outcome = to_execution_outcome(block.number(), &block_execution_output); // Commit the block's execution outcome to the database let provider_rw = provider_factory.provider_rw()?; - let block = block.clone().seal_slow(); provider_rw.append_blocks_with_state( - vec![block], + vec![block.clone()], &execution_outcome, Default::default(), Default::default(), @@ -92,7 +90,8 @@ where fn blocks( chain_spec: Arc, key_pair: Keypair, -) -> eyre::Result<(BlockWithSenders, BlockWithSenders)> { +) -> eyre::Result<(RecoveredBlock, RecoveredBlock)> +{ // First block has a transaction that transfers some ETH to zero address let block1 = Block { header: Header { @@ -128,7 +127,7 @@ fn blocks( // Second block resends the same transaction with increased nonce let block2 = Block { header: Header { - parent_hash: block1.header.hash_slow(), + parent_hash: block1.hash(), receipts_root: b256!( "d3a6acf9a244d78b33831df95d472c4128ea85bf079a1d41e32ed0b7d2244c9e" ), @@ -164,7 +163,7 @@ pub(crate) fn blocks_and_execution_outputs( provider_factory: ProviderFactory, chain_spec: Arc, key_pair: Keypair, -) -> eyre::Result)>> +) -> eyre::Result, BlockExecutionOutput)>> where N: ProviderNodeTypes< Primitives: FullNodePrimitives< @@ -181,9 +180,6 @@ where let block_output2 = execute_block_and_commit_to_database(&provider_factory, chain_spec, &block2)?; - let block1 = block1.seal_slow(); - let block2 = block2.seal_slow(); - Ok(vec![(block1, block_output1), (block2, block_output2)]) } @@ -191,7 +187,7 @@ pub(crate) fn blocks_and_execution_outcome( provider_factory: ProviderFactory, chain_spec: Arc, key_pair: Keypair, -) -> eyre::Result<(Vec, ExecutionOutcome)> +) -> eyre::Result<(Vec>, ExecutionOutcome)> where N: ProviderNodeTypes, N::Primitives: @@ -207,9 +203,6 @@ where let mut execution_outcome = executor.execute_and_verify_batch(vec![&block1, &block2])?; execution_outcome.state_mut().reverts.sort(); - let block1 = block1.seal_slow(); - let block2 = block2.seal_slow(); - // Commit the block's execution outcome to the database let provider_rw = provider_factory.provider_rw()?; provider_rw.append_blocks_with_state( diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index b2817582760d9..0d232e3ef2240 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -664,7 +664,7 @@ mod tests { use reth_db_common::init::init_genesis; use reth_evm::test_utils::MockExecutorProvider; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::SealedBlockWithSenders; + use reth_primitives::RecoveredBlock; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockReader, BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, TransactionVariant, @@ -766,9 +766,9 @@ mod tests { ExExManager::new((), vec![exex_handle], 10, wal, empty_finalized_header_stream()); // Define the notification for testing - let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.set_hash(B256::new([0x01; 32])); - block1.block.set_block_number(10); + let mut block1: RecoveredBlock = Default::default(); + block1.set_hash(B256::new([0x01; 32])); + block1.set_block_number(10); let notification1 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), @@ -784,9 +784,9 @@ mod tests { assert_eq!(exex_manager.next_id, 1); // Push another notification - let mut block2: SealedBlockWithSenders = Default::default(); - block2.block.set_hash(B256::new([0x02; 32])); - block2.block.set_block_number(20); + let mut block2: RecoveredBlock = Default::default(); + block2.set_hash(B256::new([0x02; 32])); + block2.set_block_number(20); let notification2 = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block2.clone()], Default::default(), Default::default())), @@ -827,7 +827,7 @@ mod tests { ); // Push some notifications to fill part of the buffer - let mut block1: SealedBlockWithSenders = Default::default(); + let mut block1: RecoveredBlock = Default::default(); block1.set_hash(B256::new([0x01; 32])); block1.set_block_number(10); @@ -1116,13 +1116,13 @@ mod tests { assert_eq!(exex_handle.next_notification_id, 0); // Setup two blocks for the chain commit notification - let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.set_hash(B256::new([0x01; 32])); - block1.block.set_block_number(10); + let mut block1: RecoveredBlock = Default::default(); + block1.set_hash(B256::new([0x01; 32])); + block1.set_block_number(10); - let mut block2: SealedBlockWithSenders = Default::default(); - block2.block.set_hash(B256::new([0x02; 32])); - block2.block.set_block_number(11); + let mut block2: RecoveredBlock = Default::default(); + block2.set_hash(B256::new([0x02; 32])); + block2.set_block_number(11); // Setup a notification let notification = ExExNotification::ChainCommitted { @@ -1169,9 +1169,9 @@ mod tests { // Set finished_height to a value higher than the block tip exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); - let mut block1: SealedBlockWithSenders = Default::default(); - block1.block.set_hash(B256::new([0x01; 32])); - block1.block.set_block_number(10); + let mut block1: RecoveredBlock = Default::default(); + block1.set_hash(B256::new([0x01; 32])); + block1.set_block_number(10); let notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new(vec![block1.clone()], Default::default(), Default::default())), @@ -1300,7 +1300,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), ..Default::default() }, ) - .seal_with_senders::() + .try_recover() .unwrap(); let provider_rw = provider_factory.database_provider_rw().unwrap(); provider_rw.insert_block(block.clone(), StorageLocation::Database).unwrap(); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 0d361de300935..f9f5dfc914e83 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -435,16 +435,16 @@ where #[cfg(test)] mod tests { - use crate::Wal; - use super::*; + use crate::Wal; use alloy_consensus::Header; use alloy_eips::BlockNumHash; use eyre::OptionExt; use futures::StreamExt; use reth_db_common::init::init_genesis; use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_primitives::{Block, BlockExt}; + use reth_primitives::Block; + use reth_primitives_traits::Block as _; use reth_provider::{ providers::BlockchainProvider, test_utils::create_test_provider_factory, BlockWriter, Chain, DatabaseProviderFactory, StorageLocation, @@ -473,10 +473,8 @@ mod tests { BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ); let provider_rw = provider_factory.provider_rw()?; - provider_rw.insert_block( - node_head_block.clone().seal_with_senders().ok_or_eyre("failed to recover senders")?, - StorageLocation::Database, - )?; + provider_rw + .insert_block(node_head_block.clone().try_recover()?, StorageLocation::Database)?; provider_rw.commit()?; let node_head = Head { @@ -494,8 +492,7 @@ mod tests { node_head.number + 1, BlockParams { parent: Some(node_head.hash), ..Default::default() }, ) - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], + .try_recover()?], Default::default(), None, )), @@ -565,8 +562,7 @@ mod tests { ..Default::default() } .seal_slow() - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], + .try_recover()?], Default::default(), None, )), @@ -611,8 +607,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), tx_count: Some(0), ..Default::default() }, ) - .seal_with_senders::() - .ok_or_eyre("failed to recover senders")?; + .try_recover()?; let node_head = Head { number: node_head_block.number, hash: node_head_block.hash(), @@ -638,10 +633,7 @@ mod tests { let exex_head = ExExHead { block: exex_head_block.num_hash() }; let exex_head_notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![exex_head_block - .clone() - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], + vec![exex_head_block.clone().try_recover()?], Default::default(), None, )), @@ -655,8 +647,7 @@ mod tests { node_head.number + 1, BlockParams { parent: Some(node_head.hash), ..Default::default() }, ) - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], + .try_recover()?], Default::default(), None, )), @@ -713,10 +704,7 @@ mod tests { ); let exex_head_notification = ExExNotification::ChainCommitted { new: Arc::new(Chain::new( - vec![exex_head_block - .clone() - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], + vec![exex_head_block.clone().try_recover()?], Default::default(), None, )), @@ -736,8 +724,7 @@ mod tests { genesis_block.number + 1, BlockParams { parent: Some(genesis_hash), ..Default::default() }, ) - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?], + .try_recover()?], Default::default(), None, )), diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index fb6be6e8c8529..58fb12441ff02 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -233,18 +233,15 @@ where #[cfg(test)] mod tests { - use std::sync::Arc; - + use crate::wal::{cache::CachedBlock, Wal}; use alloy_primitives::B256; - use eyre::OptionExt; use itertools::Itertools; use reth_exex_types::ExExNotification; use reth_provider::Chain; use reth_testing_utils::generators::{ self, random_block, random_block_range, BlockParams, BlockRangeParams, }; - - use crate::wal::{cache::CachedBlock, Wal}; + use std::sync::Arc; fn read_notifications(wal: &Wal) -> eyre::Result> { wal.inner.storage.files_range()?.map_or(Ok(Vec::new()), |range| { @@ -279,26 +276,20 @@ mod tests { // Create 4 canonical blocks and one reorged block with number 2 let blocks = random_block_range(&mut rng, 0..=3, BlockRangeParams::default()) .into_iter() - .map(|block| { - block - .seal_with_senders::() - .ok_or_eyre("failed to recover senders") - }) - .collect::>>()?; + .map(|block| block.try_recover()) + .collect::, _>>()?; let block_1_reorged = random_block( &mut rng, 1, BlockParams { parent: Some(blocks[0].hash()), ..Default::default() }, ) - .seal_with_senders::() - .ok_or_eyre("failed to recover senders")?; + .try_recover()?; let block_2_reorged = random_block( &mut rng, 2, BlockParams { parent: Some(blocks[1].hash()), ..Default::default() }, ) - .seal_with_senders::() - .ok_or_eyre("failed to recover senders")?; + .try_recover()?; // Create notifications for the above blocks. // 1. Committed notification for blocks with number 0 and 1 diff --git a/crates/exex/exex/src/wal/storage.rs b/crates/exex/exex/src/wal/storage.rs index 5e268ac5ec67a..e437fcd7fa5f0 100644 --- a/crates/exex/exex/src/wal/storage.rs +++ b/crates/exex/exex/src/wal/storage.rs @@ -178,14 +178,11 @@ where #[cfg(test)] mod tests { - use std::{fs::File, sync::Arc}; - - use eyre::OptionExt; + use super::Storage; use reth_exex_types::ExExNotification; use reth_provider::Chain; use reth_testing_utils::generators::{self, random_block}; - - use super::Storage; + use std::{fs::File, sync::Arc}; #[test] fn test_roundtrip() -> eyre::Result<()> { @@ -194,12 +191,8 @@ mod tests { let temp_dir = tempfile::tempdir()?; let storage: Storage = Storage::new(&temp_dir)?; - let old_block = random_block(&mut rng, 0, Default::default()) - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?; - let new_block = random_block(&mut rng, 0, Default::default()) - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?; + let old_block = random_block(&mut rng, 0, Default::default()).try_recover()?; + let new_block = random_block(&mut rng, 0, Default::default()).try_recover()?; let notification = ExExNotification::ChainReorged { new: Arc::new(Chain::new(vec![new_block], Default::default(), None)), diff --git a/crates/exex/test-utils/Cargo.toml b/crates/exex/test-utils/Cargo.toml index be3d40c7f2cf1..1bb82e97f7db9 100644 --- a/crates/exex/test-utils/Cargo.toml +++ b/crates/exex/test-utils/Cargo.toml @@ -27,6 +27,7 @@ reth-node-builder = { workspace = true, features = ["test-utils"] } reth-node-ethereum.workspace = true reth-payload-builder.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index bbb8c6710edc2..60970999bfa0b 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -45,12 +45,14 @@ use reth_node_ethereum::{ EthEngineTypes, EthEvmConfig, }; use reth_payload_builder::noop::NoopPayloadBuilderService; -use reth_primitives::{BlockExt, EthPrimitives, Head, SealedBlockWithSenders, TransactionSigned}; -use reth_provider::{providers::StaticFileProvider, BlockReader, EthStorage, ProviderFactory}; +use reth_primitives::{EthPrimitives, Head, RecoveredBlock, TransactionSigned}; +use reth_primitives_traits::Block as _; +use reth_provider::{ + providers::{BlockchainProvider, StaticFileProvider}, + BlockReader, EthStorage, ProviderFactory, +}; use reth_tasks::TaskManager; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; - -use reth_provider::providers::BlockchainProvider; use tempfile::TempDir; use thiserror::Error; use tokio::sync::mpsc::{Sender, UnboundedReceiver}; @@ -185,7 +187,7 @@ pub type TestExExContext = ExExContext; #[derive(Debug)] pub struct TestExExHandle { /// Genesis block that was inserted into the storage - pub genesis: SealedBlockWithSenders, + pub genesis: RecoveredBlock, /// Provider Factory for accessing the emphemeral storage of the host node pub provider_factory: ProviderFactory>, /// Channel for receiving events from the Execution Extension @@ -304,8 +306,7 @@ pub async fn test_exex_context_with_chain_spec( .block_by_hash(genesis_hash)? .ok_or_else(|| eyre::eyre!("genesis block not found"))? .seal_slow() - .seal_with_senders::() - .ok_or_else(|| eyre::eyre!("failed to recover senders"))?; + .try_recover()?; let head = Head { number: genesis.number, diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 19e47c0a1da85..fb3d6c5069570 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -174,7 +174,7 @@ pub(super) mod serde_bincode_compat { use arbitrary::Arbitrary; use rand::Rng; use reth_execution_types::Chain; - use reth_primitives::SealedBlockWithSenders; + use reth_primitives::RecoveredBlock; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use std::sync::Arc; @@ -193,18 +193,14 @@ pub(super) mod serde_bincode_compat { let data = Data { notification: ExExNotification::ChainReorged { old: Arc::new(Chain::new( - vec![SealedBlockWithSenders::arbitrary(&mut arbitrary::Unstructured::new( - &bytes, - )) - .unwrap()], + vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap()], Default::default(), None, )), new: Arc::new(Chain::new( - vec![SealedBlockWithSenders::arbitrary(&mut arbitrary::Unstructured::new( - &bytes, - )) - .unwrap()], + vec![RecoveredBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) + .unwrap()], Default::default(), None, )), diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 9aed7d3b698ab..93efd6dd93e22 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -15,7 +15,7 @@ use reth_network_p2p::{ error::{DownloadError, DownloadResult}, }; use reth_primitives::SealedHeader; -use reth_primitives_traits::size::InMemorySize; +use reth_primitives_traits::{size::InMemorySize, Block}; use reth_storage_api::HeaderProvider; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ @@ -35,11 +35,15 @@ use tracing::info; /// All blocks in a batch are fetched at the same time. #[must_use = "Stream does nothing unless polled"] #[derive(Debug)] -pub struct BodiesDownloader { +pub struct BodiesDownloader< + B: Block, + C: BodiesClient, + Provider: HeaderProvider

, +> { /// The bodies client - client: Arc, + client: Arc, /// The consensus client - consensus: Arc>, + consensus: Arc>, /// The database handle provider: Provider, /// The maximum number of non-empty blocks per one request @@ -57,19 +61,20 @@ pub struct BodiesDownloader { /// The latest block number returned. latest_queued_block_number: Option, /// Requests in progress - in_progress_queue: BodiesRequestQueue, + in_progress_queue: BodiesRequestQueue, /// Buffered responses - buffered_responses: BinaryHeap>, + buffered_responses: BinaryHeap>, /// Queued body responses that can be returned for insertion into the database. - queued_bodies: Vec>, + queued_bodies: Vec>, /// The bodies downloader metrics. metrics: BodyDownloaderMetrics, } -impl BodiesDownloader +impl BodiesDownloader where - B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + B: Block, + C: BodiesClient + 'static, + Provider: HeaderProvider
+ Unpin + 'static, { /// Returns the next contiguous request. fn next_headers_request(&self) -> DownloadResult>>> { @@ -97,7 +102,7 @@ where &self, range: RangeInclusive, max_non_empty: u64, - ) -> DownloadResult>>> { + ) -> DownloadResult>>> { if range.is_empty() || max_non_empty == 0 { return Ok(None) } @@ -193,16 +198,14 @@ where } /// Queues bodies and sets the latest queued block number - fn queue_bodies(&mut self, bodies: Vec>) { + fn queue_bodies(&mut self, bodies: Vec>) { self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number()); self.queued_bodies.extend(bodies); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); } /// Removes the next response from the buffer. - fn pop_buffered_response( - &mut self, - ) -> Option> { + fn pop_buffered_response(&mut self) -> Option> { let resp = self.buffered_responses.pop()?; self.metrics.buffered_responses.decrement(1.); self.buffered_blocks_size_bytes -= resp.size(); @@ -212,10 +215,10 @@ where } /// Adds a new response to the internal buffer - fn buffer_bodies_response(&mut self, response: Vec>) { + fn buffer_bodies_response(&mut self, response: Vec>) { // take into account capacity let size = response.iter().map(BlockResponse::size).sum::() + - response.capacity() * mem::size_of::>(); + response.capacity() * mem::size_of::>(); let response = OrderedBodiesResponse { resp: response, size }; let response_len = response.len(); @@ -229,7 +232,7 @@ where } /// Returns a response if it's first block number matches the next expected. - fn try_next_buffered(&mut self) -> Option>> { + fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); let next_block_range = next.block_range(); @@ -255,7 +258,7 @@ where /// Returns the next batch of block bodies that can be returned if we have enough buffered /// bodies - fn try_split_next_batch(&mut self) -> Option>> { + fn try_split_next_batch(&mut self) -> Option>> { if self.queued_bodies.len() >= self.stream_batch_size { let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::>(); self.queued_bodies.shrink_to_fit(); @@ -280,24 +283,19 @@ where } } -impl BodiesDownloader +impl BodiesDownloader where - B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, - Self: BodyDownloader + 'static, + B: Block + 'static, + C: BodiesClient + 'static, + Provider: HeaderProvider
+ Unpin + 'static, { /// Spawns the downloader task via [`tokio::task::spawn`] - pub fn into_task( - self, - ) -> TaskDownloader<::Header, ::Body> { + pub fn into_task(self) -> TaskDownloader { self.into_task_with(&TokioTaskExecutor::default()) } /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given spawner. - pub fn into_task_with( - self, - spawner: &S, - ) -> TaskDownloader<::Header, ::Body> + pub fn into_task_with(self, spawner: &S) -> TaskDownloader where S: TaskSpawner, { @@ -305,13 +303,13 @@ where } } -impl BodyDownloader for BodiesDownloader +impl BodyDownloader for BodiesDownloader where - B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + B: Block + 'static, + C: BodiesClient + 'static, + Provider: HeaderProvider
+ Unpin + 'static, { - type Header = Provider::Header; - type Body = B::Body; + type Block = B; /// Set a new download range (exclusive). /// @@ -356,12 +354,13 @@ where } } -impl Stream for BodiesDownloader +impl Stream for BodiesDownloader where - B: BodiesClient + 'static, - Provider: HeaderProvider + Unpin + 'static, + B: Block + 'static, + C: BodiesClient + 'static, + Provider: HeaderProvider
+ Unpin + 'static, { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -443,13 +442,13 @@ where } #[derive(Debug)] -struct OrderedBodiesResponse { - resp: Vec>, +struct OrderedBodiesResponse { + resp: Vec>, /// The total size of the response in bytes size: usize, } -impl OrderedBodiesResponse { +impl OrderedBodiesResponse { #[inline] fn len(&self) -> usize { self.resp.len() @@ -464,10 +463,7 @@ impl OrderedBodiesResponse { } } -impl OrderedBodiesResponse -where - H: BlockHeader, -{ +impl OrderedBodiesResponse { /// Returns the block number of the first element /// /// # Panics @@ -485,21 +481,21 @@ where } } -impl PartialEq for OrderedBodiesResponse { +impl PartialEq for OrderedBodiesResponse { fn eq(&self, other: &Self) -> bool { self.first_block_number() == other.first_block_number() } } -impl Eq for OrderedBodiesResponse {} +impl Eq for OrderedBodiesResponse {} -impl PartialOrd for OrderedBodiesResponse { +impl PartialOrd for OrderedBodiesResponse { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for OrderedBodiesResponse { +impl Ord for OrderedBodiesResponse { fn cmp(&self, other: &Self) -> Ordering { self.first_block_number().cmp(&other.first_block_number()).reverse() } @@ -576,15 +572,16 @@ impl BodiesDownloaderBuilder { } /// Consume self and return the concurrent downloader. - pub fn build( + pub fn build( self, - client: B, - consensus: Arc>, + client: C, + consensus: Arc>, provider: Provider, - ) -> BodiesDownloader + ) -> BodiesDownloader where - B: BodiesClient + 'static, - Provider: HeaderProvider, + B: Block, + C: BodiesClient + 'static, + Provider: HeaderProvider
, { let Self { request_limit, @@ -646,15 +643,16 @@ mod tests { ); let (_static_dir, static_dir_path) = create_test_static_files_dir(); - let mut downloader = BodiesDownloaderBuilder::default().build( - client.clone(), - Arc::new(TestConsensus::default()), - ProviderFactory::::new( - db, - MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - ); + let mut downloader = BodiesDownloaderBuilder::default() + .build::( + client.clone(), + Arc::new(TestConsensus::default()), + ProviderFactory::::new( + db, + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + ), + ); downloader.set_download_range(0..=19).expect("failed to set download range"); assert_matches!( @@ -689,16 +687,17 @@ mod tests { let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone())); let (_static_dir, static_dir_path) = create_test_static_files_dir(); - let mut downloader = - BodiesDownloaderBuilder::default().with_request_limit(request_limit).build( - client.clone(), - Arc::new(TestConsensus::default()), - ProviderFactory::::new( - db, - MAINNET.clone(), - StaticFileProvider::read_write(static_dir_path).unwrap(), - ), - ); + let mut downloader = BodiesDownloaderBuilder::default() + .with_request_limit(request_limit) + .build::( + client.clone(), + Arc::new(TestConsensus::default()), + ProviderFactory::::new( + db, + MAINNET.clone(), + StaticFileProvider::read_write(static_dir_path).unwrap(), + ), + ); downloader.set_download_range(0..=199).expect("failed to set download range"); let _ = downloader.collect::>().await; @@ -724,7 +723,7 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default() .with_stream_batch_size(stream_batch_size) .with_request_limit(request_limit) - .build( + .build::( client.clone(), Arc::new(TestConsensus::default()), ProviderFactory::::new( @@ -760,7 +759,9 @@ mod tests { let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone())); let (_static_dir, static_dir_path) = create_test_static_files_dir(); - let mut downloader = BodiesDownloaderBuilder::default().with_stream_batch_size(100).build( + let mut downloader = BodiesDownloaderBuilder::default() + .with_stream_batch_size(100) + .build::( client.clone(), Arc::new(TestConsensus::default()), ProviderFactory::::new( @@ -806,7 +807,7 @@ mod tests { .with_stream_batch_size(10) .with_request_limit(1) .with_max_buffered_blocks_size_bytes(1) - .build( + .build::( client.clone(), Arc::new(TestConsensus::default()), ProviderFactory::::new( @@ -843,7 +844,7 @@ mod tests { let mut downloader = BodiesDownloaderBuilder::default() .with_request_limit(3) .with_stream_batch_size(100) - .build( + .build::( client.clone(), Arc::new(TestConsensus::default()), ProviderFactory::::new( diff --git a/crates/net/downloaders/src/bodies/noop.rs b/crates/net/downloaders/src/bodies/noop.rs index b7a9431a4d7bd..bd4e9d9b9ed5e 100644 --- a/crates/net/downloaders/src/bodies/noop.rs +++ b/crates/net/downloaders/src/bodies/noop.rs @@ -4,29 +4,26 @@ use reth_network_p2p::{ bodies::{downloader::BodyDownloader, response::BlockResponse}, error::{DownloadError, DownloadResult}, }; +use reth_primitives_traits::Block; use std::{fmt::Debug, ops::RangeInclusive}; /// A [`BodyDownloader`] implementation that does nothing. #[derive(Debug, Default)] #[non_exhaustive] -pub struct NoopBodiesDownloader { - _header: std::marker::PhantomData, - _body: std::marker::PhantomData, +pub struct NoopBodiesDownloader { + _block: std::marker::PhantomData, } -impl - BodyDownloader for NoopBodiesDownloader -{ - type Body = B; - type Header = H; +impl BodyDownloader for NoopBodiesDownloader { + type Block = B; fn set_download_range(&mut self, _: RangeInclusive) -> DownloadResult<()> { Ok(()) } } -impl Stream for NoopBodiesDownloader { - type Item = Result>, DownloadError>; +impl Stream for NoopBodiesDownloader { + type Item = Result>, DownloadError>; fn poll_next( self: std::pin::Pin<&mut Self>, diff --git a/crates/net/downloaders/src/bodies/queue.rs b/crates/net/downloaders/src/bodies/queue.rs index b9f63b143ac2a..73e4c68e88c54 100644 --- a/crates/net/downloaders/src/bodies/queue.rs +++ b/crates/net/downloaders/src/bodies/queue.rs @@ -10,7 +10,7 @@ use reth_network_p2p::{ error::DownloadResult, }; use reth_primitives::SealedHeader; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::Block; use std::{ pin::Pin, sync::Arc, @@ -20,19 +20,19 @@ use std::{ /// The wrapper around [`FuturesUnordered`] that keeps information /// about the blocks currently being requested. #[derive(Debug)] -pub(crate) struct BodiesRequestQueue { +pub(crate) struct BodiesRequestQueue> { /// Inner body request queue. - inner: FuturesUnordered>, + inner: FuturesUnordered>, /// The downloader metrics. metrics: BodyDownloaderMetrics, /// Last requested block number. pub(crate) last_requested_block_number: Option, } -impl BodiesRequestQueue +impl BodiesRequestQueue where - B: BodiesClient + 'static, - H: BlockHeader, + B: Block, + C: BodiesClient + 'static, { /// Create new instance of request queue. pub(crate) fn new(metrics: BodyDownloaderMetrics) -> Self { @@ -58,9 +58,9 @@ where /// Expects a sorted list of headers. pub(crate) fn push_new_request( &mut self, - client: Arc, - consensus: Arc>, - request: Vec>, + client: Arc, + consensus: Arc>, + request: Vec>, ) { // Set last max requested block number self.last_requested_block_number = request @@ -78,12 +78,12 @@ where } } -impl Stream for BodiesRequestQueue +impl Stream for BodiesRequestQueue where - H: BlockHeader + Send + Sync + Unpin + 'static, - B: BodiesClient + 'static, + B: Block + 'static, + C: BodiesClient + 'static, { - type Item = DownloadResult>>; + type Item = DownloadResult>>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.get_mut().inner.poll_next_unpin(cx) diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index f8c93a2a78e33..aa455f57900b8 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -10,7 +10,7 @@ use reth_network_p2p::{ }; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::{BlockBody, GotExpected, SealedBlock, SealedHeader}; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{Block, InMemorySize}; use std::{ collections::VecDeque, mem, @@ -38,31 +38,31 @@ use std::{ /// All errors regarding the response cause the peer to get penalized, meaning that adversaries /// that try to give us bodies that do not match the requested order are going to be penalized /// and eventually disconnected. -pub(crate) struct BodiesRequestFuture { - client: Arc, - consensus: Arc>, +pub(crate) struct BodiesRequestFuture> { + client: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, /// Metrics for individual responses. This can be used to observe how the size (in bytes) of /// responses change while bodies are being downloaded. response_metrics: ResponseMetrics, // Headers to download. The collection is shrunk as responses are buffered. - pending_headers: VecDeque>, + pending_headers: VecDeque>, /// Internal buffer for all blocks - buffer: Vec>, - fut: Option, + buffer: Vec>, + fut: Option, /// Tracks how many bodies we requested in the last request. last_request_len: Option, } -impl BodiesRequestFuture +impl BodiesRequestFuture where - H: BlockHeader, - B: BodiesClient + 'static, + B: Block, + C: BodiesClient + 'static, { /// Returns an empty future. Use [`BodiesRequestFuture::with_headers`] to set the request. pub(crate) fn new( - client: Arc, - consensus: Arc>, + client: Arc, + consensus: Arc>, metrics: BodyDownloaderMetrics, ) -> Self { Self { @@ -77,7 +77,7 @@ where } } - pub(crate) fn with_headers(mut self, headers: Vec>) -> Self { + pub(crate) fn with_headers(mut self, headers: Vec>) -> Self { self.buffer.reserve_exact(headers.len()); self.pending_headers = VecDeque::from(headers); // Submit the request only if there are any headers to download. @@ -163,9 +163,9 @@ where /// /// This method removes headers from the internal collection. /// If the response fails validation, then the header will be put back. - fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> + fn try_buffer_blocks(&mut self, bodies: Vec) -> DownloadResult<()> where - B::Body: InMemorySize, + C::Body: InMemorySize, { let bodies_capacity = bodies.capacity(); let bodies_len = bodies.len(); @@ -180,7 +180,7 @@ where if next_header.is_empty() { // increment empty block body metric - total_size += mem::size_of::(); + total_size += mem::size_of::(); self.buffer.push(BlockResponse::Empty(next_header)); } else { let next_body = bodies.next().unwrap(); @@ -188,7 +188,7 @@ where // increment full block body metric total_size += next_body.size(); - let block = SealedBlock::new(next_header, next_body); + let block = SealedBlock::from_sealed_parts(next_header, next_body); if let Err(error) = self.consensus.validate_block_pre_execution(&block) { // Body is invalid, put the header back and return an error @@ -214,12 +214,12 @@ where } } -impl Future for BodiesRequestFuture +impl Future for BodiesRequestFuture where - H: BlockHeader + Unpin + Send + Sync + 'static, - B: BodiesClient + 'static, + B: Block + 'static, + C: BodiesClient + 'static, { - type Output = DownloadResult>>; + type Output = DownloadResult>>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -275,7 +275,7 @@ mod tests { let headers = random_header_range(&mut rng, 0..20, B256::ZERO); let client = Arc::new(TestBodiesClient::default()); - let fut = BodiesRequestFuture::new( + let fut = BodiesRequestFuture::::new( client.clone(), Arc::new(TestConsensus::default()), BodyDownloaderMetrics::default(), @@ -299,7 +299,7 @@ mod tests { let client = Arc::new( TestBodiesClient::default().with_bodies(bodies.clone()).with_max_batch_size(batch_size), ); - let fut = BodiesRequestFuture::new( + let fut = BodiesRequestFuture::::new( client.clone(), Arc::new(TestConsensus::default()), BodyDownloaderMetrics::default(), diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 863c889532c3c..d3093aec49c23 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -6,6 +6,7 @@ use reth_network_p2p::{ bodies::downloader::{BodyDownloader, BodyDownloaderResult}, error::DownloadResult, }; +use reth_primitives_traits::Block; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; use std::{ fmt::Debug, @@ -24,15 +25,13 @@ pub const BODIES_TASK_BUFFER_SIZE: usize = 4; /// A [BodyDownloader] that drives a spawned [BodyDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] - from_downloader: ReceiverStream>, + from_downloader: ReceiverStream>, to_downloader: UnboundedSender>, } -// === impl TaskDownloader === - -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] returns a [`TaskDownloader`] that's /// connected to that task. /// @@ -46,25 +45,27 @@ impl TaskDow /// use reth_consensus::{Consensus, ConsensusError}; /// use reth_downloaders::bodies::{bodies::BodiesDownloaderBuilder, task::TaskDownloader}; /// use reth_network_p2p::bodies::client::BodiesClient; - /// use reth_primitives_traits::InMemorySize; + /// use reth_primitives_traits::{Block, InMemorySize}; /// use reth_storage_api::HeaderProvider; /// use std::{fmt::Debug, sync::Arc}; /// /// fn t< - /// B: BodiesClient + 'static, - /// Provider: HeaderProvider
+ Unpin + 'static, + /// B: Block + 'static, + /// C: BodiesClient + 'static, + /// Provider: HeaderProvider
+ Unpin + 'static, /// >( - /// client: Arc, - /// consensus: Arc>, + /// client: Arc, + /// consensus: Arc>, /// provider: Provider, /// ) { - /// let downloader = BodiesDownloaderBuilder::default().build(client, consensus, provider); + /// let downloader = + /// BodiesDownloaderBuilder::default().build::(client, consensus, provider); /// let downloader = TaskDownloader::spawn(downloader); /// } /// ``` pub fn spawn(downloader: T) -> Self where - T: BodyDownloader
+ 'static, + T: BodyDownloader + 'static, { Self::spawn_with(downloader, &TokioTaskExecutor::default()) } @@ -73,7 +74,7 @@ impl TaskDow /// that's connected to that task. pub fn spawn_with(downloader: T, spawner: &S) -> Self where - T: BodyDownloader
+ 'static, + T: BodyDownloader + 'static, S: TaskSpawner, { let (bodies_tx, bodies_rx) = mpsc::channel(BODIES_TASK_BUFFER_SIZE); @@ -91,11 +92,8 @@ impl TaskDow } } -impl - BodyDownloader for TaskDownloader -{ - type Header = H; - type Body = B; +impl BodyDownloader for TaskDownloader { + type Block = B; fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()> { let _ = self.to_downloader.send(range); @@ -103,8 +101,8 @@ impl Stream for TaskDownloader { - type Item = BodyDownloaderResult; +impl Stream for TaskDownloader { + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.project().from_downloader.poll_next(cx) @@ -114,7 +112,7 @@ impl Stream for TaskDownloader { /// A [`BodyDownloader`] that runs on its own task struct SpawnedDownloader { updates: UnboundedReceiverStream>, - bodies_tx: PollSender>, + bodies_tx: PollSender>, downloader: T, } @@ -197,7 +195,7 @@ mod tests { let client = Arc::new( TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true), ); - let downloader = BodiesDownloaderBuilder::default().build( + let downloader = BodiesDownloaderBuilder::default().build::( client.clone(), Arc::new(TestConsensus::default()), factory, @@ -219,7 +217,7 @@ mod tests { reth_tracing::init_test_tracing(); let factory = create_test_provider_factory(); - let downloader = BodiesDownloaderBuilder::default().build( + let downloader = BodiesDownloaderBuilder::default().build::( Arc::new(TestBodiesClient::default()), Arc::new(TestConsensus::default()), factory, diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index 6ca012f9c684c..eb87f1c2b4cb0 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -7,13 +7,14 @@ use alloy_primitives::B256; use reth_db::{tables, DatabaseEnv}; use reth_db_api::{database::Database, transaction::DbTxMut}; use reth_network_p2p::bodies::response::BlockResponse; -use reth_primitives::{Block, BlockBody, SealedBlock, SealedHeader}; +use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; +use reth_primitives_traits::Block; use std::collections::HashMap; -pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>( - headers: impl Iterator>, - bodies: &mut HashMap, -) -> Vec> { +pub(crate) fn zip_blocks<'a, B: Block>( + headers: impl Iterator>, + bodies: &mut HashMap, +) -> Vec> { headers .into_iter() .map(|header| { @@ -21,7 +22,7 @@ pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>( if header.is_empty() { BlockResponse::Empty(header.clone()) } else { - BlockResponse::Full(SealedBlock::new(header.clone(), body)) + BlockResponse::Full(SealedBlock::from_sealed_parts(header.clone(), body)) } }) .collect() @@ -30,7 +31,7 @@ pub(crate) fn zip_blocks<'a, H: Clone + BlockHeader + 'a, B>( pub(crate) fn create_raw_bodies( headers: impl IntoIterator, bodies: &mut HashMap, -) -> Vec { +) -> Vec { headers .into_iter() .map(|header| { @@ -45,7 +46,7 @@ pub(crate) fn insert_headers(db: &DatabaseEnv, headers: &[SealedHeader]) { db.update(|tx| { for header in headers { tx.put::(header.number, header.hash()).unwrap(); - tx.put::(header.number, header.clone().unseal()).unwrap(); + tx.put::(header.number, header.clone_header()).unwrap(); } }) .expect("failed to commit") diff --git a/crates/net/downloaders/src/file_client.rs b/crates/net/downloaders/src/file_client.rs index 9230af5415198..4cc9f0eaa1447 100644 --- a/crates/net/downloaders/src/file_client.rs +++ b/crates/net/downloaders/src/file_client.rs @@ -11,6 +11,7 @@ use reth_network_p2p::{ error::RequestError, headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest}, priority::Priority, + BlockClient, }; use reth_network_peers::PeerId; use reth_primitives::SealedHeader; @@ -40,7 +41,7 @@ pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000; /// transactions in memory for use in the bodies stage. /// /// This reads the entire file into memory, so it is not suitable for large files. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct FileClient { /// The buffered headers retrieved when fetching new bodies. headers: HashMap, @@ -116,7 +117,7 @@ impl FileClient { /// Clones and returns the highest header of this client has or `None` if empty. Seals header /// before returning. pub fn tip_header(&self) -> Option> { - self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal(h.clone())) + self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal_slow(h.clone())) } /// Returns true if all blocks are canonical (no gaps) @@ -350,6 +351,10 @@ impl DownloadClient for FileClient { } } +impl BlockClient for FileClient { + type Block = B; +} + /// Chunks file into several [`FileClient`]s. #[derive(Debug)] pub struct ChunkedFileReader { @@ -546,11 +551,12 @@ mod tests { let client: Arc = Arc::new(FileClient::from_file(file.into()).await.unwrap().with_bodies(bodies.clone())); - let mut downloader = BodiesDownloaderBuilder::default().build( - client.clone(), - Arc::new(TestConsensus::default()), - factory, - ); + let mut downloader = BodiesDownloaderBuilder::default() + .build::( + client.clone(), + Arc::new(TestConsensus::default()), + factory, + ); downloader.set_download_range(0..=19).expect("failed to set download range"); assert_matches!( @@ -571,10 +577,10 @@ mod tests { let file = tempfile::tempfile().unwrap(); let client: Arc = Arc::new( FileClient::from_file(file.into()).await.unwrap().with_headers(HashMap::from([ - (0u64, p0.clone().unseal()), - (1, p1.clone().unseal()), - (2, p2.clone().unseal()), - (3, p3.clone().unseal()), + (0u64, p0.clone_header()), + (1, p1.clone_header()), + (2, p2.clone_header()), + (3, p3.clone_header()), ])), ); @@ -628,11 +634,12 @@ mod tests { // insert headers in db for the bodies downloader insert_headers(factory.db_ref().db(), &headers); - let mut downloader = BodiesDownloaderBuilder::default().build( - client.clone(), - Arc::new(TestConsensus::default()), - factory, - ); + let mut downloader = BodiesDownloaderBuilder::default() + .build::( + client.clone(), + Arc::new(TestConsensus::default()), + factory, + ); downloader.set_download_range(0..=19).expect("failed to set download range"); assert_matches!( diff --git a/crates/net/downloaders/src/headers/noop.rs b/crates/net/downloaders/src/headers/noop.rs index e9dee56dd2e11..45a0656c4aa8f 100644 --- a/crates/net/downloaders/src/headers/noop.rs +++ b/crates/net/downloaders/src/headers/noop.rs @@ -1,3 +1,4 @@ +use alloy_primitives::Sealable; use futures::Stream; use reth_network_p2p::headers::{ downloader::{HeaderDownloader, SyncTarget}, @@ -11,7 +12,9 @@ use std::fmt::Debug; #[non_exhaustive] pub struct NoopHeaderDownloader(std::marker::PhantomData); -impl HeaderDownloader for NoopHeaderDownloader { +impl HeaderDownloader + for NoopHeaderDownloader +{ type Header = H; fn update_local_head(&mut self, _: SealedHeader) {} @@ -21,7 +24,7 @@ impl HeaderDownloader for NoopHeaderDo fn set_batch_size(&mut self, _: usize) {} } -impl Stream for NoopHeaderDownloader { +impl Stream for NoopHeaderDownloader { type Item = Result>, HeadersDownloaderError>; fn poll_next( diff --git a/crates/net/downloaders/src/headers/reverse_headers.rs b/crates/net/downloaders/src/headers/reverse_headers.rs index be359134e79d6..95cbe1fad45e8 100644 --- a/crates/net/downloaders/src/headers/reverse_headers.rs +++ b/crates/net/downloaders/src/headers/reverse_headers.rs @@ -4,7 +4,7 @@ use super::task::TaskDownloader; use crate::metrics::HeaderDownloaderMetrics; use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::{BlockNumber, Sealable, B256}; use futures::{stream::Stream, FutureExt}; use futures_util::{stream::FuturesUnordered, StreamExt}; use rayon::prelude::*; @@ -40,14 +40,14 @@ const REQUESTS_PER_PEER_MULTIPLIER: usize = 5; /// Wrapper for internal downloader errors. #[derive(Error, Debug)] -enum ReverseHeadersDownloaderError { +enum ReverseHeadersDownloaderError { #[error(transparent)] Downloader(#[from] HeadersDownloaderError), #[error(transparent)] Response(#[from] Box), } -impl From for ReverseHeadersDownloaderError { +impl From for ReverseHeadersDownloaderError { fn from(value: HeadersResponseError) -> Self { Self::Response(Box::new(value)) } @@ -251,7 +251,8 @@ where ) -> Result<(), ReverseHeadersDownloaderError> { let mut validated = Vec::with_capacity(headers.len()); - let sealed_headers = headers.into_par_iter().map(SealedHeader::seal).collect::>(); + let sealed_headers = + headers.into_par_iter().map(SealedHeader::seal_slow).collect::>(); for parent in sealed_headers { // Validate that the header is the parent header of the last validated header. if let Some(validated_header) = @@ -378,7 +379,7 @@ where } let header = headers.swap_remove(0); - let target = SealedHeader::seal(header); + let target = SealedHeader::seal_slow(header); match sync_target { SyncTargetBlock::Hash(hash) | SyncTargetBlock::HashAndNumber { hash, .. } => { diff --git a/crates/net/downloaders/src/headers/task.rs b/crates/net/downloaders/src/headers/task.rs index 3dbfd5e3615e9..f2084de872828 100644 --- a/crates/net/downloaders/src/headers/task.rs +++ b/crates/net/downloaders/src/headers/task.rs @@ -1,3 +1,4 @@ +use alloy_primitives::Sealable; use futures::{FutureExt, Stream}; use futures_util::StreamExt; use pin_project::pin_project; @@ -23,7 +24,7 @@ pub const HEADERS_TASK_BUFFER_SIZE: usize = 8; /// A [HeaderDownloader] that drives a spawned [HeaderDownloader] on a spawned task. #[derive(Debug)] #[pin_project] -pub struct TaskDownloader { +pub struct TaskDownloader { #[pin] from_downloader: ReceiverStream>, H>>, to_downloader: UnboundedSender>, @@ -31,7 +32,7 @@ pub struct TaskDownloader { // === impl TaskDownloader === -impl TaskDownloader { +impl TaskDownloader { /// Spawns the given `downloader` via [`tokio::task::spawn`] and returns a [`TaskDownloader`] /// that's connected to that task. /// @@ -83,7 +84,7 @@ impl TaskDownloader { } } -impl HeaderDownloader for TaskDownloader { +impl HeaderDownloader for TaskDownloader { type Header = H; fn update_sync_gap(&mut self, head: SealedHeader, target: SyncTarget) { @@ -103,7 +104,7 @@ impl HeaderDownloader for TaskDownload } } -impl Stream for TaskDownloader { +impl Stream for TaskDownloader { type Item = HeadersDownloaderResult>, H>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/crates/net/downloaders/src/headers/test_utils.rs b/crates/net/downloaders/src/headers/test_utils.rs index baea409f20e76..34bcfd43ac3ba 100644 --- a/crates/net/downloaders/src/headers/test_utils.rs +++ b/crates/net/downloaders/src/headers/test_utils.rs @@ -9,5 +9,5 @@ pub(crate) fn child_header(parent: &SealedHeader) -> SealedHeader { let mut child = parent.as_ref().clone(); child.number += 1; child.parent_hash = parent.hash_slow(); - SealedHeader::seal(child) + SealedHeader::seal_slow(child) } diff --git a/crates/net/network/src/fetch/client.rs b/crates/net/network/src/fetch/client.rs index e24ea167f5fe6..c043692d26d57 100644 --- a/crates/net/network/src/fetch/client.rs +++ b/crates/net/network/src/fetch/client.rs @@ -11,6 +11,7 @@ use reth_network_p2p::{ error::{PeerRequestResult, RequestError}, headers::client::{HeadersClient, HeadersRequest}, priority::Priority, + BlockClient, }; use reth_network_peers::PeerId; use reth_network_types::ReputationChangeKind; @@ -96,3 +97,7 @@ impl BodiesClient for FetchClient { } } } + +impl BlockClient for FetchClient { + type Block = N::Block; +} diff --git a/crates/net/p2p/src/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs index ce7827c8e8851..f11473daa96c0 100644 --- a/crates/net/p2p/src/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -2,10 +2,11 @@ use super::response::BlockResponse; use crate::error::DownloadResult; use alloy_primitives::BlockNumber; use futures::Stream; -use std::{fmt::Debug, ops::RangeInclusive}; +use reth_primitives_traits::Block; +use std::ops::RangeInclusive; /// Body downloader return type. -pub type BodyDownloaderResult = DownloadResult>>; +pub type BodyDownloaderResult = DownloadResult>>; /// A downloader capable of fetching and yielding block bodies from block headers. /// @@ -13,13 +14,10 @@ pub type BodyDownloaderResult = DownloadResult>>; /// while a [`BodiesClient`][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. pub trait BodyDownloader: - Send + Sync + Stream> + Unpin + Send + Sync + Stream> + Unpin { - /// The type of header that is being used - type Header: Debug + Send + Sync + Unpin + 'static; - - /// The type of the body that is being downloaded. - type Body: Debug + Send + Sync + Unpin + 'static; + /// The Block type this downloader supports + type Block: Block + 'static; /// Method for setting the download range. fn set_download_range(&mut self, range: RangeInclusive) -> DownloadResult<()>; diff --git a/crates/net/p2p/src/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs index 517c5b879835a..d53ca32eb3391 100644 --- a/crates/net/p2p/src/bodies/response.rs +++ b/crates/net/p2p/src/bodies/response.rs @@ -1,31 +1,26 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, U256}; -use reth_primitives::{BlockBody, SealedBlock, SealedHeader}; -use reth_primitives_traits::InMemorySize; +use reth_primitives::{SealedBlock, SealedHeader}; +use reth_primitives_traits::{Block, InMemorySize}; /// The block response #[derive(PartialEq, Eq, Debug, Clone)] -pub enum BlockResponse { +pub enum BlockResponse { /// Full block response (with transactions or ommers) - Full(SealedBlock), + Full(SealedBlock), /// The empty block response - Empty(SealedHeader), + Empty(SealedHeader), } -impl BlockResponse +impl BlockResponse where - H: BlockHeader, + B: Block, { - /// Return the reference to the response header - pub const fn header(&self) -> &SealedHeader { - match self { - Self::Full(block) => block.sealed_header(), - Self::Empty(header) => header, - } - } - /// Return the block number pub fn block_number(&self) -> BlockNumber { - self.header().number() + match self { + Self::Full(block) => block.number(), + Self::Empty(header) => header.number(), + } } /// Return the reference to the response header @@ -37,7 +32,7 @@ where } /// Return the reference to the response body - pub fn into_body(self) -> Option { + pub fn into_body(self) -> Option { match self { Self::Full(block) => Some(block.into_body()), Self::Empty(_) => None, @@ -45,7 +40,7 @@ where } } -impl InMemorySize for BlockResponse { +impl InMemorySize for BlockResponse { #[inline] fn size(&self) -> usize { match self { diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 309252bb8f26a..c8b5154cd40e1 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -30,7 +30,7 @@ where Client: BlockClient, { client: Client, - consensus: Arc>, + consensus: Arc>, } impl FullBlockClient @@ -40,7 +40,7 @@ where /// Creates a new instance of `FullBlockClient`. pub fn new( client: Client, - consensus: Arc>, + consensus: Arc>, ) -> Self { Self { client, consensus } } @@ -118,7 +118,7 @@ where Client: BlockClient, { client: Client, - consensus: Arc>, + consensus: Arc>, hash: B256, request: FullBlockRequest, header: Option>, @@ -140,7 +140,7 @@ where } /// Returns the [`SealedBlock`] if the request is complete and valid. - fn take_block(&mut self) -> Option> { + fn take_block(&mut self) -> Option> { if self.header.is_none() || self.body.is_none() { return None } @@ -148,7 +148,7 @@ where let header = self.header.take().unwrap(); let resp = self.body.take().unwrap(); match resp { - BodyResponse::Validated(body) => Some(SealedBlock::new(header, body)), + BodyResponse::Validated(body) => Some(SealedBlock::from_sealed_parts(header, body)), BodyResponse::PendingValidation(resp) => { // ensure the block is valid, else retry if let Err(err) = self.consensus.validate_body_against_header(resp.data(), &header) @@ -159,7 +159,7 @@ where self.request.body = Some(self.client.get_block_body(self.hash)); return None } - Some(SealedBlock::new(header, resp.into_data())) + Some(SealedBlock::from_sealed_parts(header, resp.into_data())) } } } @@ -182,7 +182,7 @@ impl Future for FetchFullBlockFuture where Client: BlockClient + 'static, { - type Output = SealedBlock; + type Output = SealedBlock; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -196,7 +196,7 @@ where match res { Ok(maybe_header) => { let (peer, maybe_header) = - maybe_header.map(|h| h.map(SealedHeader::seal)).split(); + maybe_header.map(|h| h.map(SealedHeader::seal_slow)).split(); if let Some(header) = maybe_header { if header.hash() == this.hash { this.header = Some(header); @@ -330,7 +330,7 @@ where /// The client used to fetch headers and bodies. client: Client, /// The consensus instance used to validate the blocks. - consensus: Arc>, + consensus: Arc>, /// The block hash to start fetching from (inclusive). start_hash: B256, /// How many blocks to fetch: `len([start_hash, ..]) == count` @@ -388,7 +388,7 @@ where /// /// These are returned in falling order starting with the requested `hash`, i.e. with /// descending block numbers. - fn take_blocks(&mut self) -> Option>> { + fn take_blocks(&mut self) -> Option>> { if !self.is_bodies_complete() { // not done with bodies yet return None @@ -421,7 +421,8 @@ where } }; - valid_responses.push(SealedBlock::new(header.clone(), body)); + valid_responses + .push(SealedBlock::::from_sealed_parts(header.clone(), body)); } } @@ -429,7 +430,7 @@ where // put response hashes back into bodies map since we aren't returning them as a // response for block in valid_responses { - let (header, body) = block.split(); + let (header, body) = block.split_sealed_header_body(); self.bodies.insert(header, BodyResponse::Validated(body)); } @@ -447,7 +448,7 @@ where fn on_headers_response(&mut self, headers: WithPeerId>) { let (peer, mut headers_falling) = - headers.map(|h| h.into_iter().map(SealedHeader::seal).collect::>()).split(); + headers.map(|h| h.into_iter().map(SealedHeader::seal_slow).collect::>()).split(); // fill in the response if it's the correct length if headers_falling.len() == self.count as usize { @@ -505,7 +506,7 @@ impl Future for FetchFullBlockRangeFuture where Client: BlockClient + 'static, { - type Output = Vec>; + type Output = Vec>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.get_mut(); @@ -658,7 +659,7 @@ mod tests { let client = FullBlockClient::test_client(client); let received = client.get_full_block(header.hash()).await; - assert_eq!(received, SealedBlock::new(header, body)); + assert_eq!(received, SealedBlock::from_sealed_parts(header, body)); } #[tokio::test] @@ -671,7 +672,7 @@ mod tests { let received = client.get_full_block_range(header.hash(), 1).await; let received = received.first().expect("response should include a block"); - assert_eq!(*received, SealedBlock::new(header, body)); + assert_eq!(*received, SealedBlock::from_sealed_parts(header, body)); } /// Inserts headers and returns the last header and block body. @@ -687,7 +688,7 @@ mod tests { header.parent_hash = hash; header.number += 1; - sealed_header = SealedHeader::seal(header); + sealed_header = SealedHeader::seal_slow(header); client.insert(sealed_header.clone(), body.clone()); } @@ -703,7 +704,7 @@ mod tests { let received = client.get_full_block_range(header.hash(), 1).await; let received = received.first().expect("response should include a block"); - assert_eq!(*received, SealedBlock::new(header.clone(), body)); + assert_eq!(*received, SealedBlock::from_sealed_parts(header.clone(), body)); let received = client.get_full_block_range(header.hash(), 10).await; assert_eq!(received.len(), 10); @@ -722,7 +723,7 @@ mod tests { let received = client.get_full_block_range(header.hash(), 1).await; let received = received.first().expect("response should include a block"); - assert_eq!(*received, SealedBlock::new(header.clone(), body)); + assert_eq!(*received, SealedBlock::from_sealed_parts(header.clone(), body)); let received = client.get_full_block_range(header.hash(), 50).await; assert_eq!(received.len(), 50); diff --git a/crates/net/p2p/src/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs index 1bc76924a6c8d..1dc2f691af38c 100644 --- a/crates/net/p2p/src/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,11 +1,10 @@ use super::error::HeadersDownloaderResult; use crate::error::{DownloadError, DownloadResult}; -use alloy_consensus::BlockHeader; use alloy_eips::{eip1898::BlockWithParent, BlockHashOrNumber}; -use alloy_primitives::B256; +use alloy_primitives::{Sealable, B256}; use futures::Stream; use reth_consensus::HeaderValidator; -use reth_primitives::SealedHeader; +use reth_primitives_traits::{BlockHeader, SealedHeader}; use std::fmt::Debug; /// A downloader capable of fetching and yielding block headers. @@ -22,7 +21,7 @@ pub trait HeaderDownloader: + Unpin { /// The header type being downloaded. - type Header: Debug + Send + Sync + Unpin + 'static; + type Header: Sealable + Debug + Send + Sync + Unpin + 'static; /// Updates the gap to sync which ranges from local head to the sync target /// diff --git a/crates/net/p2p/src/headers/error.rs b/crates/net/p2p/src/headers/error.rs index 8757bb215f5f2..5adf016c7d118 100644 --- a/crates/net/p2p/src/headers/error.rs +++ b/crates/net/p2p/src/headers/error.rs @@ -1,3 +1,4 @@ +use alloy_primitives::Sealable; use derive_more::{Display, Error}; use reth_consensus::ConsensusError; use reth_primitives::SealedHeader; @@ -7,7 +8,7 @@ pub type HeadersDownloaderResult = Result>; /// Error variants that can happen when sending requests to a session. #[derive(Debug, Clone, Eq, PartialEq, Display, Error)] -pub enum HeadersDownloaderError { +pub enum HeadersDownloaderError { /// The downloaded header cannot be attached to the local head, /// but is valid otherwise. #[display("valid downloaded header cannot be attached to the local head: {error}")] diff --git a/crates/net/p2p/src/lib.rs b/crates/net/p2p/src/lib.rs index 7dcb77671d469..bef537bdcf3d4 100644 --- a/crates/net/p2p/src/lib.rs +++ b/crates/net/p2p/src/lib.rs @@ -47,19 +47,20 @@ pub mod test_utils; pub use bodies::client::BodiesClient; pub use headers::client::HeadersClient; +use reth_primitives_traits::Block; -/// Helper trait that unifies network behaviour needed for fetching blocks. -pub trait BlockClient: HeadersClient + BodiesClient + Unpin + Clone {} - -impl BlockClient for T where T: HeadersClient + BodiesClient + Unpin + Clone {} - -/// The [`BlockClient`] providing Ethereum block parts. -pub trait EthBlockClient: - BlockClient
+/// Helper trait that unifies network behaviour needed for fetching entire blocks. +pub trait BlockClient: + HeadersClient
::Header> + + BodiesClient::Body> + + Unpin + + Clone { + /// The Block type that this client fetches. + type Block: Block; } -impl EthBlockClient for T where - T: BlockClient
-{ -} +/// The [`BlockClient`] providing Ethereum block parts. +pub trait EthBlockClient: BlockClient {} + +impl EthBlockClient for T where T: BlockClient {} diff --git a/crates/net/p2p/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs index ee65bcb3f0720..2165ddbf56bf1 100644 --- a/crates/net/p2p/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -4,6 +4,7 @@ use crate::{ error::PeerRequestResult, headers::client::{HeadersClient, HeadersRequest}, priority::Priority, + BlockClient, }; use alloy_consensus::Header; use alloy_eips::{BlockHashOrNumber, BlockNumHash}; @@ -134,7 +135,7 @@ impl TestFullBlockClient { self.headers.lock().iter().max_by_key(|(_, header)| header.number).and_then( |(hash, header)| { self.bodies.lock().get(hash).map(|body| { - SealedBlock::new(SealedHeader::new(header.clone(), *hash), body.clone()) + SealedBlock::from_parts_unchecked(header.clone(), body.clone(), *hash) }) }, ) @@ -243,3 +244,7 @@ impl BodiesClient for TestFullBlockClient { ))) } } + +impl BlockClient for TestFullBlockClient { + type Block = reth_primitives::Block; +} diff --git a/crates/net/p2p/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs index ee0d95d5004ba..15adc3bedef80 100644 --- a/crates/net/p2p/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -12,7 +12,7 @@ use crate::{ }; use alloy_consensus::Header; use futures::{Future, FutureExt, Stream, StreamExt}; -use reth_consensus::{test_utils::TestConsensus, Consensus, ConsensusError}; +use reth_consensus::{test_utils::TestConsensus, HeaderValidator}; use reth_eth_wire_types::HeadersDirection; use reth_network_peers::{PeerId, WithPeerId}; use reth_primitives::SealedHeader; @@ -146,13 +146,7 @@ impl Stream for TestDownload { } let empty: SealedHeader = SealedHeader::default(); - if let Err(error) = - >::validate_header_against_parent( - &this.consensus, - &empty, - &empty, - ) - { + if let Err(error) = this.consensus.validate_header_against_parent(&empty, &empty) { this.done = true; return Poll::Ready(Some(Err(DownloadError::HeaderValidation { hash: empty.hash(), @@ -165,7 +159,7 @@ impl Stream for TestDownload { Ok(resp) => { // Skip head and seal headers let mut headers = - resp.1.into_iter().skip(1).map(SealedHeader::seal).collect::>(); + resp.1.into_iter().skip(1).map(SealedHeader::seal_slow).collect::>(); headers.sort_unstable_by_key(|h| h.number); headers.into_iter().for_each(|h| this.buffer.push(h)); this.done = true; diff --git a/crates/node/builder/src/components/builder.rs b/crates/node/builder/src/components/builder.rs index 977381b6582b2..4e67616cd4336 100644 --- a/crates/node/builder/src/components/builder.rs +++ b/crates/node/builder/src/components/builder.rs @@ -10,7 +10,7 @@ use crate::{ use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::execute::BlockExecutorProvider; use reth_network::NetworkPrimitives; -use reth_node_api::{BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; +use reth_node_api::{BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, TxTy}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{future::Future, marker::PhantomData}; @@ -310,6 +310,7 @@ where Primitives: NetworkPrimitives< BlockHeader = HeaderTy, BlockBody = BodyTy, + Block = BlockTy, >, >, PayloadB: PayloadServiceBuilder, @@ -393,7 +394,11 @@ pub trait NodeComponentsBuilder: Send { impl NodeComponentsBuilder for F where - N: NetworkPrimitives, BlockBody = BodyTy>, + N: NetworkPrimitives< + BlockHeader = HeaderTy, + BlockBody = BodyTy, + Block = BlockTy, + >, Node: FullNodeTypes, F: FnOnce(&BuilderContext) -> Fut + Send, Fut: Future>> + Send, diff --git a/crates/node/builder/src/components/mod.rs b/crates/node/builder/src/components/mod.rs index c5ac67e5cbc79..93fe031bf5770 100644 --- a/crates/node/builder/src/components/mod.rs +++ b/crates/node/builder/src/components/mod.rs @@ -27,7 +27,9 @@ use reth_consensus::{ConsensusError, FullConsensus}; use reth_evm::execute::BlockExecutorProvider; use reth_network::{NetworkHandle, NetworkPrimitives}; use reth_network_api::FullNetwork; -use reth_node_api::{BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilder, TxTy}; +use reth_node_api::{ + BlockTy, BodyTy, HeaderTy, NodeTypes, NodeTypesWithEngine, PayloadBuilder, TxTy, +}; use reth_payload_builder::PayloadBuilderHandle; use reth_transaction_pool::{PoolTransaction, TransactionPool}; @@ -53,9 +55,7 @@ pub trait NodeComponents: Clone + Unpin + Send + Sync + 'stati + 'static; /// Network API. - type Network: FullNetwork< - Client: BlockClient
, Body = BodyTy>, - >; + type Network: FullNetwork>>; /// Builds new blocks. type PayloadBuilder: PayloadBuilder::Engine> @@ -102,7 +102,11 @@ pub struct Components NodeComponents for Components where - N: NetworkPrimitives, BlockBody = BodyTy>, + N: NetworkPrimitives< + BlockHeader = HeaderTy, + BlockBody = BodyTy, + Block = BlockTy, + >, Node: FullNodeTypes, Pool: TransactionPool>> + Unpin diff --git a/crates/node/builder/src/setup.rs b/crates/node/builder/src/setup.rs index 610ca7bbc7990..00cc9d58f96a0 100644 --- a/crates/node/builder/src/setup.rs +++ b/crates/node/builder/src/setup.rs @@ -2,6 +2,7 @@ use std::sync::Arc; +use crate::BlockTy; use alloy_primitives::{BlockNumber, B256}; use reth_config::{config::StageConfig, PruneConfig}; use reth_consensus::{Consensus, ConsensusError}; @@ -14,7 +15,7 @@ use reth_exex::ExExManagerHandle; use reth_network_p2p::{ bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader, BlockClient, }; -use reth_node_api::{BodyTy, HeaderTy}; +use reth_node_api::HeaderTy; use reth_provider::{providers::ProviderNodeTypes, ProviderFactory}; use reth_stages::{prelude::DefaultStages, stages::ExecutionStage, Pipeline, StageSet}; use reth_static_file::StaticFileProducer; @@ -27,7 +28,7 @@ use tokio::sync::watch; pub fn build_networked_pipeline( config: &StageConfig, client: Client, - consensus: Arc>, + consensus: Arc, Error = ConsensusError>>, provider_factory: ProviderFactory, task_executor: &TaskExecutor, metrics_tx: reth_stages::MetricEventsSender, @@ -39,7 +40,7 @@ pub fn build_networked_pipeline( ) -> eyre::Result> where N: ProviderNodeTypes, - Client: BlockClient
, Body = BodyTy> + 'static, + Client: BlockClient> + 'static, Executor: BlockExecutorProvider, { // building network downloaders using the fetch client @@ -75,7 +76,7 @@ pub fn build_pipeline( stage_config: &StageConfig, header_downloader: H, body_downloader: B, - consensus: Arc>, + consensus: Arc, Error = ConsensusError>>, max_block: Option, metrics_tx: reth_stages::MetricEventsSender, prune_config: Option, @@ -86,7 +87,7 @@ pub fn build_pipeline( where N: ProviderNodeTypes, H: HeaderDownloader
> + 'static, - B: BodyDownloader
, Body = BodyTy> + 'static, + B: BodyDownloader> + 'static, Executor: BlockExecutorProvider, { let mut builder = Pipeline::::builder(); diff --git a/crates/node/core/src/utils.rs b/crates/node/core/src/utils.rs index 31d847da7fbd5..184cacfb36233 100644 --- a/crates/node/core/src/utils.rs +++ b/crates/node/core/src/utils.rs @@ -10,7 +10,7 @@ use reth_network_p2p::{ bodies::client::BodiesClient, headers::client::HeadersClient, priority::Priority, }; use reth_primitives::SealedBlock; -use reth_primitives_traits::SealedHeader; +use reth_primitives_traits::{Block, SealedHeader}; use std::{ env::VarError, path::{Path, PathBuf}, @@ -49,7 +49,7 @@ where eyre::bail!("Invalid number of headers received. Expected: 1. Received: 0") }; - let header = SealedHeader::seal(header); + let header = SealedHeader::seal_slow(header); let valid = match id { BlockHashOrNumber::Hash(hash) => header.hash() == hash, @@ -69,13 +69,14 @@ where } /// Get a body from network based on header -pub async fn get_single_body( +pub async fn get_single_body( client: Client, - header: SealedHeader, - consensus: impl Consensus, -) -> Result> + header: SealedHeader, + consensus: impl Consensus, +) -> Result> where - Client: BodiesClient, + B: Block, + Client: BodiesClient, { let (peer_id, response) = client.get_block_body(header.hash()).await?.split(); @@ -84,7 +85,7 @@ where eyre::bail!("Invalid number of bodies received. Expected: 1. Received: 0") }; - let block = SealedBlock::new(header, body); + let block = SealedBlock::from_sealed_parts(header, body); consensus.validate_block_pre_execution(&block)?; Ok(block) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 2735b77dd0b6a..98a019e85844f 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -19,7 +19,7 @@ mod op_sepolia; use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader, Header}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; pub use base::BASE_MAINNET; @@ -200,12 +200,12 @@ impl OpChainSpec { /// Caution: Caller must ensure that holocene is active in the parent header. /// /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) - pub fn decode_holocene_base_fee( + pub fn decode_holocene_base_fee( &self, - parent: &Header, + parent: &H, timestamp: u64, ) -> Result { - let (elasticity, denominator) = decode_holocene_extra_data(&parent.extra_data)?; + let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; let base_fee = if elasticity == 0 && denominator == 0 { parent .next_block_base_fee(self.base_fee_params_at_timestamp(timestamp)) @@ -220,15 +220,15 @@ impl OpChainSpec { /// Read from parent to determine the base fee for the next block /// /// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) - pub fn next_block_base_fee( + pub fn next_block_base_fee( &self, - parent: &Header, + parent: &H, timestamp: u64, ) -> Result { // > if Holocene is active in parent_header.timestamp, then the parameters from // > parent_header.extraData are used. let is_holocene_activated = - self.inner.is_fork_active_at_timestamp(OpHardfork::Holocene, parent.timestamp); + self.inner.is_fork_active_at_timestamp(OpHardfork::Holocene, parent.timestamp()); // If we are in the Holocene, we need to use the base fee params // from the parent block's extra data. diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index e49ffdce2285b..31925620c398d 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -41,18 +41,18 @@ reth-optimism-chainspec.workspace = true [features] default = ["std"] std = [ - "reth-chainspec/std", - "reth-consensus/std", - "reth-consensus-common/std", - "reth-primitives/std", - "reth-optimism-forks/std", - "reth-optimism-chainspec/std", - "reth-optimism-primitives/std", - "alloy-eips/std", - "alloy-primitives/std", - "alloy-consensus/std", - "alloy-trie/std", - "op-alloy-consensus/std", - "reth-primitives-traits/std" + "reth-chainspec/std", + "reth-consensus/std", + "reth-consensus-common/std", + "reth-primitives/std", + "reth-primitives-traits/std", + "reth-optimism-forks/std", + "reth-optimism-chainspec/std", + "reth-optimism-primitives/std", + "alloy-eips/std", + "alloy-primitives/std", + "alloy-consensus/std", + "alloy-trie/std", + "op-alloy-consensus/std", ] optimism = ["reth-primitives/optimism", "reth-optimism-primitives/optimism"] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 5f1423211b50f..5d2e002d23813 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -13,7 +13,7 @@ extern crate alloc; use alloc::sync::Arc; -use alloy_consensus::{BlockHeader, Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{BlockHeader as _, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{B64, U256}; use reth_chainspec::EthereumHardforks; @@ -28,11 +28,12 @@ use reth_consensus_common::validation::{ }; use reth_optimism_chainspec::OpChainSpec; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{OpBlock, OpBlockBody, OpPrimitives, OpReceipt}; -use reth_primitives::{BlockWithSenders, GotExpected, SealedBlockFor, SealedHeader}; +use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt}; +use reth_primitives::{GotExpected, RecoveredBlock, SealedHeader}; mod proof; pub use proof::calculate_receipt_root_no_memo_optimism; +use reth_primitives_traits::{Block, BlockBody, BlockHeader, SealedBlock}; mod validation; pub use validation::validate_block_post_execution; @@ -56,33 +57,34 @@ impl OpBeaconConsensus { impl FullConsensus for OpBeaconConsensus { fn validate_block_post_execution( &self, - block: &BlockWithSenders, + block: &RecoveredBlock, input: PostExecutionInput<'_, OpReceipt>, ) -> Result<(), ConsensusError> { - validate_block_post_execution(&block.header, &self.chain_spec, input.receipts) + validate_block_post_execution(block.header(), &self.chain_spec, input.receipts) } } -impl Consensus for OpBeaconConsensus { +impl Consensus for OpBeaconConsensus { type Error = ConsensusError; fn validate_body_against_header( &self, - body: &OpBlockBody, - header: &SealedHeader, + body: &B::Body, + header: &SealedHeader, ) -> Result<(), ConsensusError> { validate_body_against_header(body, header.header()) } - fn validate_block_pre_execution( - &self, - block: &SealedBlockFor, - ) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { // Check ommers hash let ommers_hash = block.body().calculate_ommers_root(); - if block.ommers_hash != ommers_hash { + if Some(block.ommers_hash()) != ommers_hash { return Err(ConsensusError::BodyOmmersHashDiff( - GotExpected { got: ommers_hash, expected: block.ommers_hash }.into(), + GotExpected { + got: ommers_hash.unwrap_or(EMPTY_OMMER_ROOT_HASH), + expected: block.ommers_hash(), + } + .into(), )) } @@ -92,11 +94,11 @@ impl Consensus for OpBeaconConsensus { } // EIP-4895: Beacon chain push withdrawals as operations - if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp) { + if self.chain_spec.is_shanghai_active_at_timestamp(block.timestamp()) { validate_shanghai_withdrawals(block)?; } - if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp) { + if self.chain_spec.is_cancun_active_at_timestamp(block.timestamp()) { validate_cancun_gas(block)?; } @@ -104,20 +106,20 @@ impl Consensus for OpBeaconConsensus { } } -impl HeaderValidator for OpBeaconConsensus { - fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { +impl HeaderValidator for OpBeaconConsensus { + fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { validate_header_gas(header.header())?; validate_header_base_fee(header.header(), &self.chain_spec) } fn validate_header_against_parent( &self, - header: &SealedHeader, - parent: &SealedHeader, + header: &SealedHeader, + parent: &SealedHeader, ) -> Result<(), ConsensusError> { validate_against_parent_hash_number(header.header(), parent)?; - if self.chain_spec.is_bedrock_active_at_block(header.number) { + if self.chain_spec.is_bedrock_active_at_block(header.number()) { validate_against_parent_timestamp(header.header(), parent.header())?; } @@ -125,12 +127,12 @@ impl HeaderValidator for OpBeaconConsensus { // // > if Holocene is active in parent_header.timestamp, then the parameters from // > parent_header.extraData are used. - if self.chain_spec.is_holocene_active_at_timestamp(parent.timestamp) { + if self.chain_spec.is_holocene_active_at_timestamp(parent.timestamp()) { let header_base_fee = header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; let expected_base_fee = self .chain_spec - .decode_holocene_base_fee(parent, header.timestamp) + .decode_holocene_base_fee(parent.header(), header.timestamp()) .map_err(|_| ConsensusError::BaseFeeMissing)?; if expected_base_fee != header_base_fee { return Err(ConsensusError::BaseFeeDiff(GotExpected { @@ -147,7 +149,7 @@ impl HeaderValidator for OpBeaconConsensus { } // ensure that the blob gas fields for this block - if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + if self.chain_spec.is_cancun_active_at_timestamp(header.timestamp()) { validate_against_parent_4844(header.header(), parent.header(), BlobParams::cancun())?; } @@ -156,20 +158,20 @@ impl HeaderValidator for OpBeaconConsensus { fn validate_header_with_total_difficulty( &self, - header: &Header, + header: &H, _total_difficulty: U256, ) -> Result<(), ConsensusError> { // with OP-stack Bedrock activation number determines when TTD (eth Merge) has been reached. debug_assert!( - self.chain_spec.is_bedrock_active_at_block(header.number), + self.chain_spec.is_bedrock_active_at_block(header.number()), "manually import OVM blocks" ); - if header.nonce != B64::ZERO { + if header.nonce() != Some(B64::ZERO) { return Err(ConsensusError::TheMergeNonceIsNotZero) } - if header.ommers_hash != EMPTY_OMMER_ROOT_HASH { + if header.ommers_hash() != EMPTY_OMMER_ROOT_HASH { return Err(ConsensusError::TheMergeOmmerRootIsNotEmpty) } diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 59fd6e0ecac12..bfac67fbb5731 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -5,7 +5,7 @@ use crate::{ OpReceiptBuilder, ReceiptBuilderCtx, }; use alloc::{boxed::Box, sync::Arc, vec::Vec}; -use alloy_consensus::{Eip658Value, Receipt, Transaction as _}; +use alloy_consensus::{BlockHeader, Eip658Value, Receipt, Transaction as _}; use alloy_eips::eip7685::Requests; use core::fmt::Display; use op_alloy_consensus::{DepositTransaction, OpDepositReceipt}; @@ -25,8 +25,8 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::validate_block_post_execution; use reth_optimism_forks::OpHardfork; use reth_optimism_primitives::{DepositReceipt, OpPrimitives, OpReceipt}; -use reth_primitives::{BlockWithSenders, NodePrimitives}; -use reth_primitives_traits::{Block, BlockBody, SignedTransaction}; +use reth_primitives::{NodePrimitives, RecoveredBlock}; +use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_revm::{Database, State}; use revm_primitives::{db::DatabaseCommit, ResultAndState}; use tracing::trace; @@ -161,11 +161,11 @@ where fn apply_pre_execution_changes( &mut self, - block: &BlockWithSenders, + block: &RecoveredBlock, ) -> Result<(), Self::Error> { // Set state clear flag if the block is after the Spurious Dragon hardfork. let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header().number); + (*self.chain_spec).is_spurious_dragon_active_at_block(block.number()); self.state.set_state_clear_flag(state_clear_flag); let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); @@ -189,21 +189,19 @@ where fn execute_transactions( &mut self, - block: &BlockWithSenders, + block: &RecoveredBlock, ) -> Result, Self::Error> { let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); - let is_regolith = self - .chain_spec - .fork(OpHardfork::Regolith) - .active_at_timestamp(block.header().timestamp); + let is_regolith = + self.chain_spec.fork(OpHardfork::Regolith).active_at_timestamp(block.timestamp()); let mut cumulative_gas_used = 0; - let mut receipts = Vec::with_capacity(block.body().transactions().len()); + let mut receipts = Vec::with_capacity(block.body().transaction_count()); for (sender, transaction) in block.transactions_with_sender() { // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. - let block_available_gas = block.header().gas_limit - cumulative_gas_used; + let block_available_gas = block.gas_limit() - cumulative_gas_used; if transaction.gas_limit() > block_available_gas && (is_regolith || !transaction.is_deposit()) { @@ -298,11 +296,10 @@ where fn apply_post_execution_changes( &mut self, - block: &BlockWithSenders, + block: &RecoveredBlock, _receipts: &[N::Receipt], ) -> Result { - let balance_increments = - post_block_balance_increments(&self.chain_spec.clone(), &block.block); + let balance_increments = post_block_balance_increments(&self.chain_spec.clone(), block); // increment balances self.state .increment_balances(balance_increments.clone()) @@ -328,7 +325,7 @@ where fn validate_block_post_execution( &self, - block: &BlockWithSenders, + block: &RecoveredBlock, receipts: &[N::Receipt], _requests: &Requests, ) -> Result<(), ConsensusError> { @@ -455,7 +452,7 @@ mod tests { // Attempt to execute a block with one deposit and one non-deposit transaction executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() }, @@ -531,7 +528,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail executor - .execute_and_verify_one(&BlockWithSenders::new_unchecked( + .execute_and_verify_one(&RecoveredBlock::new_unhashed( Block { header, body: BlockBody { transactions: vec![tx, tx_deposit], ..Default::default() }, diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 9f7ead251562b..a9a88f67295d5 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -204,7 +204,7 @@ mod tests { }; use reth_optimism_chainspec::BASE_MAINNET; use reth_optimism_primitives::{OpBlock, OpPrimitives, OpReceipt}; - use reth_primitives::{Account, Log, Receipts, SealedBlockWithSenders}; + use reth_primitives::{Account, Log, Receipts, RecoveredBlock}; use reth_revm::{ db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, @@ -530,8 +530,8 @@ mod tests { #[test] fn receipts_by_block_hash() { - // Create a default SealedBlockWithSenders object - let block: SealedBlockWithSenders = Default::default(); + // Create a default recovered block + let block: RecoveredBlock = Default::default(); // Define block hashes for block1 and block2 let block1_hash = B256::new([0x01; 32]); @@ -542,11 +542,11 @@ mod tests { let mut block2 = block; // Set the hashes of block1 and block2 - block1.block.set_block_number(10); - block1.block.set_hash(block1_hash); + block1.set_block_number(10); + block1.set_hash(block1_hash); - block2.block.set_block_number(11); - block2.block.set_hash(block2_hash); + block2.set_block_number(11); + block2.set_hash(block2_hash); // Create a random receipt object, receipt1 let receipt1 = OpReceipt::Legacy(Receipt { diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index cb4c88e9d52a9..29a13f9d8b3e0 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -20,7 +20,7 @@ use reth_optimism_forks::{OpHardfork, OpHardforks}; use reth_optimism_payload_builder::{OpBuiltPayload, OpPayloadBuilderAttributes}; use reth_optimism_primitives::OpBlock; use reth_payload_validator::ExecutionPayloadValidator; -use reth_primitives::SealedBlockFor; +use reth_primitives::SealedBlock; use reth_rpc_types_compat::engine::payload::block_to_payload; use std::sync::Arc; @@ -51,7 +51,7 @@ where type ExecutionPayloadEnvelopeV4 = OpExecutionPayloadEnvelopeV4; fn block_to_payload( - block: SealedBlockFor< + block: SealedBlock< <::Primitives as NodePrimitives>::Block, >, ) -> (ExecutionPayload, ExecutionPayloadSidecar) { @@ -96,7 +96,7 @@ impl PayloadValidator for OpEngineValidator { &self, payload: ExecutionPayload, sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError> { + ) -> Result, PayloadError> { self.inner.ensure_well_formed_payload(payload, sidecar) } } diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 752a78405c312..0cce61fb7a013 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -400,10 +400,9 @@ where self.validate_all(transactions) } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) + fn on_new_head_block(&self, new_tip_block: &SealedBlock) where - H: reth_primitives_traits::BlockHeader, - B: BlockBody, + B: Block, { self.inner.on_new_head_block(new_tip_block); self.update_l1_block_info( diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 49ae3d93e8459..69104acd7ce3a 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -15,6 +15,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-revm = { workspace = true, features = ["witness"] } reth-transaction-pool.workspace = true reth-provider.workspace = true diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 7bf3f8015b711..505843370a141 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -25,9 +25,9 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::{NoopPayloadTransactions, PayloadTransactions}; use reth_primitives::{ - proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, BlockExt, - SealedHeader, TxType, + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, SealedHeader, TxType, }; +use reth_primitives_traits::block::Block as _; use reth_provider::{ HashedPostStateProvider, ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider, diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 2b7c683df8556..10c4f2780cd57 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -17,7 +17,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_primitives::{OpBlock, OpPrimitives, OpTransactionSigned}; use reth_payload_builder::EthPayloadBuilderAttributes; use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives::{transaction::WithEncoded, SealedBlockFor}; +use reth_primitives::{transaction::WithEncoded, SealedBlock}; use reth_rpc_types_compat::engine::payload::{ block_to_payload_v1, block_to_payload_v3, convert_block_to_payload_field_v2, }; @@ -135,7 +135,7 @@ pub struct OpBuiltPayload { /// Identifier of the payload pub(crate) id: PayloadId, /// The built block - pub(crate) block: Arc>, + pub(crate) block: Arc>, /// Block execution data for the payload, if any. pub(crate) executed_block: Option>, /// The fees of the block @@ -155,7 +155,7 @@ impl OpBuiltPayload { /// Initializes the payload with the given initial block. pub const fn new( id: PayloadId, - block: Arc>, + block: Arc>, fees: U256, chain_spec: Arc, attributes: OpPayloadBuilderAttributes, @@ -170,7 +170,7 @@ impl OpBuiltPayload { } /// Returns the built block(sealed) - pub fn block(&self) -> &SealedBlockFor { + pub fn block(&self) -> &SealedBlock { &self.block } @@ -188,7 +188,7 @@ impl OpBuiltPayload { impl BuiltPayload for OpBuiltPayload { type Primitives = OpPrimitives; - fn block(&self) -> &SealedBlockFor { + fn block(&self) -> &SealedBlock { &self.block } @@ -208,7 +208,7 @@ impl BuiltPayload for OpBuiltPayload { impl BuiltPayload for &OpBuiltPayload { type Primitives = OpPrimitives; - fn block(&self) -> &SealedBlockFor { + fn block(&self) -> &SealedBlock { (**self).block() } diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 5361d00be0d79..b33a3dda8670f 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -14,7 +14,7 @@ use reth_evm::ConfigureEvm; use reth_optimism_consensus::calculate_receipt_root_no_memo_optimism; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpBlock, OpReceipt, OpTransactionSigned}; -use reth_primitives::{logs_bloom, BlockBody, SealedBlockWithSenders}; +use reth_primitives::{logs_bloom, BlockBody, RecoveredBlock}; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx, ReceiptProvider, StateProviderFactory, @@ -64,7 +64,7 @@ where &self, ) -> Result< Option<( - SealedBlockWithSenders>, + RecoveredBlock>, Vec>, )>, Self::Error, @@ -80,8 +80,7 @@ where .provider() .block_with_senders(block_id, Default::default()) .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(block_id.into()))? - .seal_unchecked(latest.hash()); + .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; let receipts = self .provider() diff --git a/crates/payload/basic/src/stack.rs b/crates/payload/basic/src/stack.rs index d7cf9d13111b4..1dd57d1556858 100644 --- a/crates/payload/basic/src/stack.rs +++ b/crates/payload/basic/src/stack.rs @@ -7,7 +7,7 @@ use alloy_eips::eip4895::Withdrawals; use alloy_primitives::{Address, B256, U256}; use reth_payload_builder::PayloadId; use reth_payload_primitives::BuiltPayload; -use reth_primitives::{NodePrimitives, SealedBlockFor}; +use reth_primitives::{NodePrimitives, SealedBlock}; use alloy_eips::eip7685::Requests; use std::{error::Error, fmt}; @@ -155,7 +155,7 @@ where { type Primitives = L::Primitives; - fn block(&self) -> &SealedBlockFor<::Block> { + fn block(&self) -> &SealedBlock<::Block> { match self { Self::Left(l) => l.block(), Self::Right(r) => r.block(), diff --git a/crates/payload/builder/src/lib.rs b/crates/payload/builder/src/lib.rs index b6191ea7fd111..2dbf8e86eef4a 100644 --- a/crates/payload/builder/src/lib.rs +++ b/crates/payload/builder/src/lib.rs @@ -31,7 +31,7 @@ //! use alloy_consensus::Header; //! use alloy_primitives::U256; //! use reth_payload_builder::{EthBuiltPayload, PayloadBuilderError, KeepPayloadJobAlive, EthPayloadBuilderAttributes, PayloadJob, PayloadJobGenerator, PayloadKind}; -//! use reth_primitives::{Block, BlockExt}; +//! use reth_primitives::{Block, SealedBlock}; //! //! /// The generator type that creates new jobs that builds empty blocks. //! pub struct EmptyBlockPayloadJobGenerator; @@ -67,7 +67,7 @@ //! }, //! ..Default::default() //! }; -//! let payload = EthBuiltPayload::new(self.attributes.id, Arc::new(block.seal_slow()), U256::ZERO, None, None); +//! let payload = EthBuiltPayload::new(self.attributes.id, Arc::new(SealedBlock::seal_slow(block)), U256::ZERO, None, None); //! Ok(payload) //! } //! diff --git a/crates/payload/builder/src/test_utils.rs b/crates/payload/builder/src/test_utils.rs index 4690ca14f0d85..9cd680ce6521d 100644 --- a/crates/payload/builder/src/test_utils.rs +++ b/crates/payload/builder/src/test_utils.rs @@ -9,7 +9,8 @@ use alloy_primitives::U256; use reth_chain_state::{CanonStateNotification, ExecutedBlock}; use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::{PayloadKind, PayloadTypes}; -use reth_primitives::{Block, BlockExt}; +use reth_primitives::Block; +use reth_primitives_traits::Block as _; use std::{ future::Future, pin::Pin, diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index a02e00d99f917..05c58e35b94a9 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -5,7 +5,7 @@ use alloy_eips::{ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use reth_chain_state::ExecutedBlock; -use reth_primitives::{NodePrimitives, SealedBlockFor}; +use reth_primitives::{NodePrimitives, SealedBlock}; /// Represents a built payload type that contains a built `SealedBlock` and can be converted into /// engine API execution payloads. @@ -14,7 +14,7 @@ pub trait BuiltPayload: Send + Sync + std::fmt::Debug { type Primitives: NodePrimitives; /// Returns the built block (sealed) - fn block(&self) -> &SealedBlockFor<::Block>; + fn block(&self) -> &SealedBlock<::Block>; /// Returns the fees collected for the built block fn fees(&self) -> U256; diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index e696e557afa0c..ba78f1cbc8d5c 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -12,8 +12,8 @@ use alloy_rpc_types::engine::{ ExecutionPayload, ExecutionPayloadSidecar, MaybeCancunPayloadFields, PayloadError, }; use reth_chainspec::EthereumHardforks; -use reth_primitives::{BlockBody, BlockExt, Header, SealedBlock}; -use reth_primitives_traits::SignedTransaction; +use reth_primitives::SealedBlock; +use reth_primitives_traits::{Block, SignedTransaction}; use std::sync::Arc; /// Execution payload validator. @@ -59,9 +59,9 @@ impl ExecutionPayloadValidator { /// /// Ensures that the number of blob versioned hashes matches the number hashes included in the /// _separate_ `block_versioned_hashes` of the cancun payload fields. - fn ensure_matching_blob_versioned_hashes( + fn ensure_matching_blob_versioned_hashes( &self, - sealed_block: &SealedBlock>, + sealed_block: &SealedBlock, cancun_fields: &MaybeCancunPayloadFields, ) -> Result<(), PayloadError> { let num_blob_versioned_hashes = sealed_block.blob_versioned_hashes_iter().count(); @@ -116,7 +116,7 @@ impl ExecutionPayloadValidator { &self, payload: ExecutionPayload, sidecar: ExecutionPayloadSidecar, - ) -> Result>, PayloadError> { + ) -> Result>, PayloadError> { let expected_hash = payload.block_hash(); // First parse the block diff --git a/crates/primitives-traits/src/block/body.rs b/crates/primitives-traits/src/block/body.rs index 279e7d45cc8d1..7120d95b35779 100644 --- a/crates/primitives-traits/src/block/body.rs +++ b/crates/primitives-traits/src/block/body.rs @@ -1,10 +1,11 @@ //! Block body abstraction. use crate::{ - BlockHeader, FullSignedTx, InMemorySize, MaybeSerde, MaybeSerdeBincodeCompat, SignedTransaction, + transaction::signed::RecoveryError, BlockHeader, FullSignedTx, InMemorySize, MaybeSerde, + MaybeSerdeBincodeCompat, SignedTransaction, }; use alloc::{fmt, vec::Vec}; -use alloy_consensus::{Header, Transaction}; +use alloy_consensus::{Header, Transaction, Typed2718}; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals}; use alloy_primitives::{Address, Bytes, B256}; @@ -14,6 +15,9 @@ pub trait FullBlockBody: BlockBody + MaybeSerdeBincod impl FullBlockBody for T where T: BlockBody + MaybeSerdeBincodeCompat {} /// Abstraction for block's body. +/// +/// This type is a container for everything that is included in a block except the header. +/// For ethereum this includes transactions, ommers, and withdrawals. pub trait BlockBody: Send + Sync @@ -47,9 +51,15 @@ pub trait BlockBody: fn transaction_count(&self) -> usize { self.transactions().len() } + /// Consume the block body and return a [`Vec`] of transactions. fn into_transactions(self) -> Vec; + /// Returns `true` if the block body contains a transaction of the given type. + fn contains_transaction_type(&self, tx_type: u8) -> bool { + self.transactions().iter().any(|tx| tx.is_type(tx_type)) + } + /// Calculate the transaction root for the block body. fn calculate_tx_root(&self) -> B256 { alloy_consensus::proofs::calculate_transaction_root(self.transactions()) @@ -115,6 +125,16 @@ pub trait BlockBody: crate::transaction::recover::recover_signers(self.transactions()) } + /// Recover signer addresses for all transactions in the block body. + /// + /// Returns an error if some transaction's signature is invalid. + fn try_recover_signers(&self) -> Result, RecoveryError> + where + Self::Transaction: SignedTransaction, + { + self.recover_signers().ok_or(RecoveryError) + } + /// Recover signer addresses for all transactions in the block body _without ensuring that the /// signature has a low `s` value_. /// @@ -125,6 +145,17 @@ pub trait BlockBody: { crate::transaction::recover::recover_signers_unchecked(self.transactions()) } + + /// Recover signer addresses for all transactions in the block body _without ensuring that the + /// signature has a low `s` value_. + /// + /// Returns an error if some transaction's signature is invalid. + fn try_recover_signers_unchecked(&self) -> Result, RecoveryError> + where + Self::Transaction: SignedTransaction, + { + self.recover_signers_unchecked().ok_or(RecoveryError) + } } impl BlockBody for alloy_consensus::BlockBody diff --git a/crates/primitives-traits/src/block/error.rs b/crates/primitives-traits/src/block/error.rs new file mode 100644 index 0000000000000..471eeb800bff1 --- /dev/null +++ b/crates/primitives-traits/src/block/error.rs @@ -0,0 +1,33 @@ +//! Error types for the `block` module. + +use crate::transaction::signed::RecoveryError; + +/// Type alias for [`BlockRecoveryError`] with a [`SealedBlock`](crate::SealedBlock) value. +pub type SealedBlockRecoveryError = BlockRecoveryError>; + +/// Error when recovering a block from [`SealedBlock`](crate::SealedBlock) to +/// [`RecoveredBlock`](crate::RecoveredBlock). +/// +/// This error is returned when the block recovery fails and contains the erroneous block, because +/// recovering a block takes ownership of the block. +#[derive(Debug, Clone, thiserror::Error)] +#[error("Failed to recover the block")] +pub struct BlockRecoveryError(pub T); + +impl BlockRecoveryError { + /// Create a new error. + pub const fn new(inner: T) -> Self { + Self(inner) + } + + /// Unwrap the error and return the original value. + pub fn into_inner(self) -> T { + self.0 + } +} + +impl From> for RecoveryError { + fn from(_: BlockRecoveryError) -> Self { + Self + } +} diff --git a/crates/primitives-traits/src/block/mod.rs b/crates/primitives-traits/src/block/mod.rs index 85e29995f4b61..f0a9c36702aeb 100644 --- a/crates/primitives-traits/src/block/mod.rs +++ b/crates/primitives-traits/src/block/mod.rs @@ -1,17 +1,33 @@ //! Block abstraction. +pub(crate) mod sealed; +pub use sealed::SealedBlock; + +pub(crate) mod recovered; +pub use recovered::RecoveredBlock; + pub mod body; +pub mod error; pub mod header; -use alloc::fmt; +use alloc::{fmt, vec::Vec}; use alloy_consensus::Header; +use alloy_primitives::{Address, B256}; use alloy_rlp::{Decodable, Encodable}; use crate::{ - BlockBody, BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeSerde, + BlockBody, BlockHeader, FullBlockBody, FullBlockHeader, InMemorySize, MaybeSerde, SealedHeader, SignedTransaction, }; +/// Bincode-compatible header type serde implementations. +#[cfg(feature = "serde-bincode-compat")] +pub mod serde_bincode_compat { + pub use super::{ + recovered::serde_bincode_compat::RecoveredBlock, sealed::serde_bincode_compat::SealedBlock, + }; +} + /// Helper trait that unifies all behaviour required by block to support full node operations. pub trait FullBlock: Block + alloy_rlp::Encodable + alloy_rlp::Decodable @@ -29,9 +45,10 @@ impl FullBlock for T where pub type BlockTx = <::Body as BlockBody>::Transaction; /// Abstraction of block data type. -// todo: make sealable super-trait, depends on -// todo: make with senders extension trait, so block can be impl by block type already containing -// senders +/// +/// This type defines the structure of a block in the blockchain. +/// A [`Block`] is composed of a header and a body. +/// It is expected that a block can always be completely reconstructed from its header and body. pub trait Block: Send + Sync @@ -49,12 +66,30 @@ pub trait Block: /// Header part of the block. type Header: BlockHeader; - /// The block's body contains the transactions in the block. + /// The block's body contains the transactions in the block and additional data, e.g. + /// withdrawals in ethereum. type Body: BlockBody; /// Create new block instance. fn new(header: Self::Header, body: Self::Body) -> Self; + /// Create new a sealed block instance from a sealed header and the block body. + fn new_sealed(header: SealedHeader, body: Self::Body) -> SealedBlock { + SealedBlock::from_sealed_parts(header, body) + } + + /// Seal the block with a known hash. + /// + /// WARNING: This method does not perform validation whether the hash is correct. + fn seal(self, hash: B256) -> SealedBlock { + SealedBlock::new_unchecked(self, hash) + } + + /// Calculate the header hash and seal the block so that it can't be changed. + fn seal_slow(self) -> SealedBlock { + SealedBlock::seal_slow(self) + } + /// Returns reference to block header. fn header(&self) -> &Self::Header; @@ -63,6 +98,84 @@ pub trait Block: /// Splits the block into its header and body. fn split(self) -> (Self::Header, Self::Body); + + /// Returns a tuple of references to the block's header and body. + fn split_ref(&self) -> (&Self::Header, &Self::Body) { + (self.header(), self.body()) + } + + /// Consumes the block and returns the header. + fn into_header(self) -> Self::Header { + self.split().0 + } + + /// Consumes the block and returns the body. + fn into_body(self) -> Self::Body { + self.split().1 + } + + /// Returns the rlp length of the block with the given header and body. + fn rlp_length(header: &Self::Header, body: &Self::Body) -> usize { + // TODO(mattsse): replace default impl with + header.length() + body.length() + } + + /// Expensive operation that recovers transaction signer. + fn senders(&self) -> Option> + where + ::Transaction: SignedTransaction, + { + self.body().recover_signers() + } + + /// Transform into a [`RecoveredBlock`]. + /// + /// # Panics + /// + /// If the number of senders does not match the number of transactions in the block + /// and the signer recovery for one of the transactions fails. + /// + /// Note: this is expected to be called with blocks read from disk. + #[track_caller] + fn with_senders_unchecked(self, senders: Vec
) -> RecoveredBlock + where + ::Transaction: SignedTransaction, + { + self.try_with_senders_unchecked(senders).expect("stored block is valid") + } + + /// Transform into a [`RecoveredBlock`] using the given senders. + /// + /// If the number of senders does not match the number of transactions in the block, this falls + /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. + /// + /// Returns an error if a signature is invalid. + #[track_caller] + fn try_with_senders_unchecked(self, senders: Vec
) -> Result, Self> + where + ::Transaction: SignedTransaction, + { + let senders = if self.body().transactions().len() == senders.len() { + senders + } else { + let Some(senders) = self.body().recover_signers_unchecked() else { return Err(self) }; + senders + }; + + Ok(RecoveredBlock::new_unhashed(self, senders)) + } + + /// **Expensive**. Transform into a [`RecoveredBlock`] by recovering senders in the contained + /// transactions. + /// + /// Returns `None` if a transaction is invalid. + fn with_recovered_senders(self) -> Option> + where + ::Transaction: SignedTransaction, + { + let senders = self.senders()?; + Some(RecoveredBlock::new_unhashed(self, senders)) + } } impl Block for alloy_consensus::Block diff --git a/crates/primitives-traits/src/block/recovered.rs b/crates/primitives-traits/src/block/recovered.rs new file mode 100644 index 0000000000000..0828646cf1f6e --- /dev/null +++ b/crates/primitives-traits/src/block/recovered.rs @@ -0,0 +1,598 @@ +//! Recovered Block variant. + +use crate::{ + block::{error::SealedBlockRecoveryError, SealedBlock}, + transaction::signed::{RecoveryError, SignedTransactionIntoRecoveredExt}, + Block, BlockBody, InMemorySize, SealedHeader, +}; +use alloc::vec::Vec; +use alloy_consensus::{transaction::Recovered, BlockHeader}; +use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; +use alloy_primitives::{Address, BlockHash, BlockNumber, Bloom, Bytes, Sealed, B256, B64, U256}; +use derive_more::Deref; + +/// A block with senders recovered from the block's transactions. +/// +/// This type is a [`SealedBlock`] with a list of senders that match the transactions in the block. +/// +/// ## Sealing +/// +/// This type uses lazy sealing to avoid hashing the header until it is needed: +/// +/// [`RecoveredBlock::new_unhashed`] creates a recovered block without hashing the header. +/// [`RecoveredBlock::new`] creates a recovered block with the corresponding block hash. +/// +/// ## Recovery +/// +/// Sender recovery is fallible and can fail if any of the transactions fail to recover the sender. +/// A [`SealedBlock`] can be upgraded to a [`RecoveredBlock`] using the +/// [`RecoveredBlock::try_recover`] or [`SealedBlock::try_recover`] method. +#[derive(Debug, Clone, Deref)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct RecoveredBlock { + /// Block + #[deref] + #[cfg_attr( + feature = "serde", + serde(bound = "SealedBlock: serde::Serialize + serde::de::DeserializeOwned") + )] + block: SealedBlock, + /// List of senders that match the transactions in the block + senders: Vec
, +} + +impl RecoveredBlock { + /// Creates a new recovered block instance with the given senders as provided and the block + /// hash. + /// + /// Note: This expects that the given senders match the transactions in the block. + pub fn new(block: B, senders: Vec
, hash: BlockHash) -> Self { + Self { block: SealedBlock::new_unchecked(block, hash), senders } + } + + /// Creates a new recovered block instance with the given senders as provided. + /// + /// Note: This expects that the given senders match the transactions in the block. + pub fn new_unhashed(block: B, senders: Vec
) -> Self { + Self { block: SealedBlock::new_unhashed(block), senders } + } + + /// Returns the recovered senders. + pub fn senders(&self) -> &[Address] { + &self.senders + } + + /// Returns an iterator over the recovered senders. + pub fn senders_iter(&self) -> impl Iterator { + self.senders.iter() + } + + /// Consumes the type and returns the inner block. + pub fn into_block(self) -> B { + self.block.into_block() + } + + /// Returns a reference to the sealed block. + pub const fn sealed_block(&self) -> &SealedBlock { + &self.block + } + + /// Creates a new recovered block instance with the given [`SealedBlock`] and senders as + /// provided + pub const fn new_sealed(block: SealedBlock, senders: Vec
) -> Self { + Self { block, senders } + } + + /// A safer variant of [`Self::new_unhashed`] that checks if the number of senders is equal to + /// the number of transactions in the block and recovers the senders from the transactions, if + /// not using [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction) + /// to recover the senders. + pub fn try_new( + block: B, + senders: Vec
, + hash: BlockHash, + ) -> Result> { + let senders = if block.body().transaction_count() == senders.len() { + senders + } else { + let Ok(senders) = block.body().try_recover_signers() else { + return Err(SealedBlockRecoveryError::new(SealedBlock::new_unchecked(block, hash))); + }; + senders + }; + Ok(Self::new(block, senders, hash)) + } + + /// A safer variant of [`Self::new`] that checks if the number of senders is equal to + /// the number of transactions in the block and recovers the senders from the transactions, if + /// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction) + /// to recover the senders. + pub fn try_new_unchecked( + block: B, + senders: Vec
, + hash: BlockHash, + ) -> Result> { + let senders = if block.body().transaction_count() == senders.len() { + senders + } else { + let Ok(senders) = block.body().try_recover_signers_unchecked() else { + return Err(SealedBlockRecoveryError::new(SealedBlock::new_unchecked(block, hash))); + }; + senders + }; + Ok(Self::new(block, senders, hash)) + } + + /// A safer variant of [`Self::new_unhashed`] that checks if the number of senders is equal to + /// the number of transactions in the block and recovers the senders from the transactions, if + /// not using [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction) + /// to recover the senders. + pub fn try_new_unhashed(block: B, senders: Vec
) -> Result { + let senders = if block.body().transaction_count() == senders.len() { + senders + } else { + block.body().try_recover_signers()? + }; + Ok(Self::new_unhashed(block, senders)) + } + + /// A safer variant of [`Self::new_unhashed`] that checks if the number of senders is equal to + /// the number of transactions in the block and recovers the senders from the transactions, if + /// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction) + /// to recover the senders. + pub fn try_new_unhashed_unchecked( + block: B, + senders: Vec
, + ) -> Result { + let senders = if block.body().transaction_count() == senders.len() { + senders + } else { + block.body().try_recover_signers_unchecked()? + }; + Ok(Self::new_unhashed(block, senders)) + } + + /// Recovers the senders from the transactions in the block using + /// [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction). + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_recover(block: B) -> Result { + let senders = block.body().try_recover_signers()?; + Ok(Self::new_unhashed(block, senders)) + } + + /// Recovers the senders from the transactions in the block using + /// [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction). + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_recover_unchecked(block: B) -> Result { + let senders = block.body().try_recover_signers_unchecked()?; + Ok(Self::new_unhashed(block, senders)) + } + + /// Recovers the senders from the transactions in the block using + /// [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction). + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_recover_sealed(block: SealedBlock) -> Result> { + let Ok(senders) = block.body().try_recover_signers() else { + return Err(SealedBlockRecoveryError::new(block)); + }; + let (block, hash) = block.split(); + Ok(Self::new(block, senders, hash)) + } + + /// Recovers the senders from the transactions in the sealed block using + /// [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction). + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_recover_sealed_unchecked( + block: SealedBlock, + ) -> Result> { + let Ok(senders) = block.body().try_recover_signers_unchecked() else { + return Err(SealedBlockRecoveryError::new(block)); + }; + let (block, hash) = block.split(); + Ok(Self::new(block, senders, hash)) + } + + /// A safer variant of [`Self::new_unhashed`] that checks if the number of senders is equal to + /// the number of transactions in the block and recovers the senders from the transactions, if + /// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction) + /// to recover the senders. + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_recover_sealed_with_senders( + block: SealedBlock, + senders: Vec
, + ) -> Result> { + let (block, hash) = block.split(); + Self::try_new(block, senders, hash) + } + + /// A safer variant of [`Self::new`] that checks if the number of senders is equal to + /// the number of transactions in the block and recovers the senders from the transactions, if + /// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction) + /// to recover the senders. + pub fn try_recover_sealed_with_senders_unchecked( + block: SealedBlock, + senders: Vec
, + ) -> Result> { + let (block, hash) = block.split(); + Self::try_new_unchecked(block, senders, hash) + } + + /// Returns the block hash. + pub fn hash_ref(&self) -> &BlockHash { + self.block.hash_ref() + } + + /// Returns a copy of the block hash. + pub fn hash(&self) -> BlockHash { + *self.hash_ref() + } + + /// Return the number hash tuple. + pub fn num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.header().number(), self.hash()) + } + + /// Return a [`BlockWithParent`] for this header. + pub fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent { parent: self.header().parent_hash(), block: self.num_hash() } + } + + /// Clone the header. + pub fn clone_header(&self) -> B::Header { + self.header().clone() + } + + /// Clones the internal header and returns a [`SealedHeader`] sealed with the hash. + pub fn clone_sealed_header(&self) -> SealedHeader { + SealedHeader::new(self.clone_header(), self.hash()) + } + + /// Clones the wrapped block and returns the [`SealedBlock`] sealed with the hash. + pub fn clone_sealed_block(&self) -> SealedBlock { + self.block.clone() + } + + /// Consumes the block and returns the block's header. + pub fn into_header(self) -> B::Header { + self.block.into_header() + } + + /// Consumes the block and returns the block's body. + pub fn into_body(self) -> B::Body { + self.block.into_body() + } + + /// Consumes the block and returns the [`SealedBlock`] and drops the recovered senders. + pub fn into_sealed_block(self) -> SealedBlock { + self.block + } + + /// Consumes the type and returns its components. + pub fn split_sealed(self) -> (SealedBlock, Vec
) { + (self.block, self.senders) + } + + /// Consumes the type and returns its components. + #[doc(alias = "into_components")] + pub fn split(self) -> (B, Vec
) { + (self.block.into_block(), self.senders) + } + + /// Returns an iterator over all transactions and their sender. + #[inline] + pub fn transactions_with_sender( + &self, + ) -> impl Iterator::Transaction)> + '_ { + self.senders.iter().zip(self.block.body().transactions()) + } + + /// Returns an iterator over all transactions in the block. + #[inline] + pub fn into_transactions_ecrecovered( + self, + ) -> impl Iterator::Transaction>> { + self.block + .split() + .0 + .into_body() + .into_transactions() + .into_iter() + .zip(self.senders) + .map(|(tx, sender)| tx.with_signer(sender)) + } + + /// Consumes the block and returns the transactions of the block. + #[inline] + pub fn into_transactions(self) -> Vec<::Transaction> { + self.block.split().0.into_body().into_transactions() + } +} + +impl BlockHeader for RecoveredBlock { + fn parent_hash(&self) -> B256 { + self.header().parent_hash() + } + + fn ommers_hash(&self) -> B256 { + self.header().ommers_hash() + } + + fn beneficiary(&self) -> Address { + self.header().beneficiary() + } + + fn state_root(&self) -> B256 { + self.header().state_root() + } + + fn transactions_root(&self) -> B256 { + self.header().transactions_root() + } + + fn receipts_root(&self) -> B256 { + self.header().receipts_root() + } + + fn withdrawals_root(&self) -> Option { + self.header().withdrawals_root() + } + + fn logs_bloom(&self) -> Bloom { + self.header().logs_bloom() + } + + fn difficulty(&self) -> U256 { + self.header().difficulty() + } + + fn number(&self) -> BlockNumber { + self.header().number() + } + + fn gas_limit(&self) -> u64 { + self.header().gas_limit() + } + + fn gas_used(&self) -> u64 { + self.header().gas_used() + } + + fn timestamp(&self) -> u64 { + self.header().timestamp() + } + + fn mix_hash(&self) -> Option { + self.header().mix_hash() + } + + fn nonce(&self) -> Option { + self.header().nonce() + } + + fn base_fee_per_gas(&self) -> Option { + self.header().base_fee_per_gas() + } + + fn blob_gas_used(&self) -> Option { + self.header().blob_gas_used() + } + + fn excess_blob_gas(&self) -> Option { + self.header().excess_blob_gas() + } + + fn parent_beacon_block_root(&self) -> Option { + self.header().parent_beacon_block_root() + } + + fn requests_hash(&self) -> Option { + self.header().requests_hash() + } + + fn extra_data(&self) -> &Bytes { + self.header().extra_data() + } +} + +impl Eq for RecoveredBlock {} + +impl PartialEq for RecoveredBlock { + fn eq(&self, other: &Self) -> bool { + self.hash_ref().eq(other.hash_ref()) && + self.block.eq(&other.block) && + self.senders.eq(&other.senders) + } +} + +impl Default for RecoveredBlock { + #[inline] + fn default() -> Self { + Self::new_unhashed(B::default(), Default::default()) + } +} + +impl InMemorySize for RecoveredBlock { + #[inline] + fn size(&self) -> usize { + self.block.size() + self.senders.len() * core::mem::size_of::
() + } +} + +impl From> for Sealed { + fn from(value: RecoveredBlock) -> Self { + value.block.into() + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, B> arbitrary::Arbitrary<'a> for RecoveredBlock +where + B: Block + arbitrary::Arbitrary<'a>, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let block = B::arbitrary(u)?; + Ok(Self::try_recover(block).unwrap()) + } +} + +#[cfg(any(test, feature = "test-utils"))] +impl RecoveredBlock { + /// Returns a mutable reference to the recovered senders. + pub fn senders_mut(&mut self) -> &mut Vec
{ + &mut self.senders + } + + /// Appends the sender to the list of senders. + pub fn push_sender(&mut self, sender: Address) { + self.senders.push(sender); + } +} + +#[cfg(any(test, feature = "test-utils"))] +impl core::ops::DerefMut for RecoveredBlock +where + B: Block, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.block + } +} + +#[cfg(any(test, feature = "test-utils"))] +impl RecoveredBlock { + /// Updates the block header. + pub fn set_header(&mut self, header: B::Header) { + *self.header_mut() = header + } + + /// Updates the block hash. + pub fn set_hash(&mut self, hash: BlockHash) { + self.block.set_hash(hash) + } + + /// Returns a mutable reference to the header. + pub fn header_mut(&mut self) -> &mut B::Header { + self.block.header_mut() + } + + /// Returns a mutable reference to the header. + pub fn block_mut(&mut self) -> &mut B::Body { + self.block.body_mut() + } + + /// Updates the parent block hash. + pub fn set_parent_hash(&mut self, hash: BlockHash) { + self.block.set_parent_hash(hash); + } + + /// Updates the block number. + pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { + self.block.set_block_number(number); + } + + /// Updates the block state root. + pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) { + self.block.set_state_root(state_root); + } + + /// Updates the block difficulty. + pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { + self.block.set_difficulty(difficulty); + } +} + +/// Bincode-compatible [`RecoveredBlock`] serde implementation. +#[cfg(feature = "serde-bincode-compat")] +pub(super) mod serde_bincode_compat { + use crate::{ + serde_bincode_compat::{self, SerdeBincodeCompat}, + Block, + }; + use alloc::{borrow::Cow, vec::Vec}; + use alloy_primitives::Address; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{DeserializeAs, SerializeAs}; + + /// Bincode-compatible [`super::RecoveredBlock`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_primitives_traits::{ + /// block::RecoveredBlock, + /// serde_bincode_compat::{self, SerdeBincodeCompat}, + /// Block, + /// }; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data + 'static> { + /// #[serde_as(as = "serde_bincode_compat::RecoveredBlock<'_, T>")] + /// block: RecoveredBlock, + /// } + /// ``` + #[derive(derive_more::Debug, Serialize, Deserialize)] + pub struct RecoveredBlock< + 'a, + T: Block + 'static, + > { + #[serde( + bound = "serde_bincode_compat::SealedBlock<'a, T>: Serialize + serde::de::DeserializeOwned" + )] + block: serde_bincode_compat::SealedBlock<'a, T>, + senders: Cow<'a, Vec
>, + } + + impl<'a, T: Block + 'static> + From<&'a super::RecoveredBlock> for RecoveredBlock<'a, T> + { + fn from(value: &'a super::RecoveredBlock) -> Self { + Self { block: (&value.block).into(), senders: Cow::Borrowed(&value.senders) } + } + } + + impl<'a, T: Block + 'static> + From> for super::RecoveredBlock + { + fn from(value: RecoveredBlock<'a, T>) -> Self { + Self::new_sealed(value.block.into(), value.senders.into_owned()) + } + } + + impl + 'static> + SerializeAs> for RecoveredBlock<'_, T> + { + fn serialize_as( + source: &super::RecoveredBlock, + serializer: S, + ) -> Result + where + S: Serializer, + { + RecoveredBlock::from(source).serialize(serializer) + } + } + + impl<'de, T: Block + 'static> + DeserializeAs<'de, super::RecoveredBlock> for RecoveredBlock<'de, T> + { + fn deserialize_as(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + RecoveredBlock::deserialize(deserializer).map(Into::into) + } + } + + impl + 'static> + SerdeBincodeCompat for super::RecoveredBlock + { + type BincodeRepr<'a> = RecoveredBlock<'a, T>; + } +} diff --git a/crates/primitives-traits/src/block/sealed.rs b/crates/primitives-traits/src/block/sealed.rs new file mode 100644 index 0000000000000..5ff43bff67246 --- /dev/null +++ b/crates/primitives-traits/src/block/sealed.rs @@ -0,0 +1,462 @@ +//! Sealed block types + +use crate::{ + block::{error::BlockRecoveryError, RecoveredBlock}, + Block, BlockBody, GotExpected, InMemorySize, SealedHeader, +}; +use alloc::vec::Vec; +use alloy_consensus::BlockHeader; +use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; +use alloy_primitives::{Address, BlockHash, Sealable, Sealed, B256}; +use alloy_rlp::{Decodable, Encodable}; +use bytes::BufMut; +use core::ops::Deref; + +/// Sealed full block composed of the block's header and body. +/// +/// This type uses lazy sealing to avoid hashing the header until it is needed, see also +/// [`SealedHeader`]. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub struct SealedBlock { + /// Sealed Header. + header: SealedHeader, + /// the block's body. + body: B::Body, +} + +impl SealedBlock { + /// Hashes the header and creates a sealed block. + /// + /// This calculates the header hash. To create a [`SealedBlock`] without calculating the hash + /// upfront see [`SealedBlock::new_unhashed`] + pub fn seal_slow(block: B) -> Self { + let hash = block.header().hash_slow(); + Self::new_unchecked(block, hash) + } + + /// Create a new sealed block instance using the block. + /// + /// Caution: This assumes the given hash is the block's hash. + #[inline] + pub fn new_unchecked(block: B, hash: BlockHash) -> Self { + let (header, body) = block.split(); + Self { header: SealedHeader::new(header, hash), body } + } + + /// Creates a `SealedBlock` from the block without the available hash + pub fn new_unhashed(block: B) -> Self { + let (header, body) = block.split(); + Self { header: SealedHeader::new_unhashed(header), body } + } + + /// Creates the [`SealedBlock`] from the block's parts by hashing the header. + /// + /// + /// This calculates the header hash. To create a [`SealedBlock`] from its parts without + /// calculating the hash upfront see [`SealedBlock::from_parts_unhashed`] + pub fn seal_parts(header: B::Header, body: B::Body) -> Self { + Self::seal_slow(B::new(header, body)) + } + + /// Creates the [`SealedBlock`] from the block's parts without calculating the hash upfront. + pub fn from_parts_unhashed(header: B::Header, body: B::Body) -> Self { + Self::new_unhashed(B::new(header, body)) + } + + /// Creates the [`SealedBlock`] from the block's parts. + pub fn from_parts_unchecked(header: B::Header, body: B::Body, hash: BlockHash) -> Self { + Self::new_unchecked(B::new(header, body), hash) + } + + /// Creates the [`SealedBlock`] from the [`SealedHeader`] and the body. + pub fn from_sealed_parts(header: SealedHeader, body: B::Body) -> Self { + let (header, hash) = header.split(); + Self::from_parts_unchecked(header, body, hash) + } + + /// Returns a reference to the block hash. + #[inline] + pub fn hash_ref(&self) -> &BlockHash { + self.header.hash_ref() + } + + /// Returns the block hash. + #[inline] + pub fn hash(&self) -> B256 { + self.header.hash() + } + + /// Consumes the type and returns its components. + #[doc(alias = "into_components")] + pub fn split(self) -> (B, BlockHash) { + let (header, hash) = self.header.split(); + (B::new(header, self.body), hash) + } + + /// Consumes the type and returns the block. + pub fn into_block(self) -> B { + self.unseal() + } + + /// Consumes the type and returns the block. + pub fn unseal(self) -> B { + let header = self.header.unseal(); + B::new(header, self.body) + } + + /// Clones the wrapped block. + pub fn clone_block(&self) -> B { + B::new(self.header.clone_header(), self.body.clone()) + } + + /// Converts this block into a [`RecoveredBlock`] with the given senders + /// + /// Note: This method assumes the senders are correct and does not validate them. + pub const fn with_senders(self, senders: Vec
) -> RecoveredBlock { + RecoveredBlock::new_sealed(self, senders) + } + + /// Converts this block into a [`RecoveredBlock`] with the given senders if the number of + /// senders is equal to the number of transactions in the block and recovers the senders from + /// the transactions, if + /// not using [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction) + /// to recover the senders. + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_with_senders( + self, + senders: Vec
, + ) -> Result, BlockRecoveryError> { + RecoveredBlock::try_recover_sealed_with_senders(self, senders) + } + + /// Converts this block into a [`RecoveredBlock`] with the given senders if the number of + /// senders is equal to the number of transactions in the block and recovers the senders from + /// the transactions, if + /// not using [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction) + /// to recover the senders. + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_with_senders_unchecked( + self, + senders: Vec
, + ) -> Result, BlockRecoveryError> { + RecoveredBlock::try_recover_sealed_with_senders_unchecked(self, senders) + } + + /// Recovers the senders from the transactions in the block using + /// [`SignedTransaction::recover_signer`](crate::transaction::signed::SignedTransaction). + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_recover(self) -> Result, BlockRecoveryError> { + RecoveredBlock::try_recover_sealed(self) + } + + /// Recovers the senders from the transactions in the block using + /// [`SignedTransaction::recover_signer_unchecked`](crate::transaction::signed::SignedTransaction). + /// + /// Returns an error if any of the transactions fail to recover the sender. + pub fn try_recover_unchecked(self) -> Result, BlockRecoveryError> { + RecoveredBlock::try_recover_sealed_unchecked(self) + } + + /// Returns reference to block header. + pub const fn header(&self) -> &B::Header { + self.header.header() + } + + /// Returns reference to block body. + pub const fn body(&self) -> &B::Body { + &self.body + } + + /// Returns the length of the block. + pub fn rlp_length(&self) -> usize { + B::rlp_length(self.header(), self.body()) + } + + /// Recovers all senders from the transactions in the block. + /// + /// Returns `None` if any of the transactions fail to recover the sender. + pub fn senders(&self) -> Option> { + self.body().recover_signers() + } + + /// Return the number hash tuple. + pub fn num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.number(), self.hash()) + } + + /// Return a [`BlockWithParent`] for this header. + pub fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent { parent: self.parent_hash(), block: self.num_hash() } + } + + /// Returns the Sealed header. + pub const fn sealed_header(&self) -> &SealedHeader { + &self.header + } + + /// Returns the wrapped `SealedHeader` as `SealedHeader<&B::Header>`. + pub fn sealed_header_ref(&self) -> SealedHeader<&B::Header> { + SealedHeader::new(self.header(), self.hash()) + } + + /// Clones the wrapped header and returns a [`SealedHeader`] sealed with the hash. + pub fn clone_sealed_header(&self) -> SealedHeader { + self.header.clone() + } + + /// Consumes the block and returns the sealed header. + pub fn into_sealed_header(self) -> SealedHeader { + self.header + } + + /// Consumes the block and returns the header. + pub fn into_header(self) -> B::Header { + self.header.unseal() + } + + /// Consumes the block and returns the body. + pub fn into_body(self) -> B::Body { + self.body + } + + /// Splits the block into body and header into separate components + pub fn split_header_body(self) -> (B::Header, B::Body) { + let header = self.header.unseal(); + (header, self.body) + } + + /// Splits the block into body and header into separate components. + pub fn split_sealed_header_body(self) -> (SealedHeader, B::Body) { + (self.header, self.body) + } + + /// Returns an iterator over all blob versioned hashes from the block body. + #[inline] + pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { + self.body().blob_versioned_hashes_iter() + } + + /// Returns the number of transactions in the block. + #[inline] + pub fn transaction_count(&self) -> usize { + self.body().transaction_count() + } + + /// Ensures that the transaction root in the block header is valid. + /// + /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure + /// populated with each transaction in the transactions list portion of the block. + /// + /// # Returns + /// + /// Returns `Ok(())` if the calculated transaction root matches the one stored in the header, + /// indicating that the transactions in the block are correctly represented in the trie. + /// + /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` + /// error containing the calculated and expected roots. + pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> { + let calculated_root = self.body().calculate_tx_root(); + + if self.header().transactions_root() != calculated_root { + return Err(GotExpected { + got: calculated_root, + expected: self.header().transactions_root(), + }) + } + + Ok(()) + } +} + +impl From for SealedBlock +where + B: Block, +{ + fn from(block: B) -> Self { + Self::seal_slow(block) + } +} + +impl Default for SealedBlock +where + B: Block + Default, +{ + fn default() -> Self { + Self::seal_slow(Default::default()) + } +} + +impl InMemorySize for SealedBlock { + #[inline] + fn size(&self) -> usize { + self.body.size() + self.header.size() + } +} + +impl Deref for SealedBlock { + type Target = B::Header; + + fn deref(&self) -> &Self::Target { + self.header() + } +} + +impl Encodable for SealedBlock { + fn encode(&self, out: &mut dyn BufMut) { + self.body.encode(out); + } +} + +impl Decodable for SealedBlock { + fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { + let block = B::decode(buf)?; + Ok(Self::seal_slow(block)) + } +} + +impl From> for Sealed { + fn from(value: SealedBlock) -> Self { + let (block, hash) = value.split(); + Self::new_unchecked(block, hash) + } +} + +#[cfg(any(test, feature = "arbitrary"))] +impl<'a, B> arbitrary::Arbitrary<'a> for SealedBlock +where + B: Block + arbitrary::Arbitrary<'a>, +{ + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let block = B::arbitrary(u)?; + Ok(Self::seal_slow(block)) + } +} + +#[cfg(any(test, feature = "test-utils"))] +impl SealedBlock { + /// Returns a mutable reference to the header. + pub fn header_mut(&mut self) -> &mut B::Header { + self.header.header_mut() + } + + /// Updates the block hash. + pub fn set_hash(&mut self, hash: BlockHash) { + self.header.set_hash(hash) + } + + /// Returns a mutable reference to the header. + pub fn body_mut(&mut self) -> &mut B::Body { + &mut self.body + } + + /// Updates the parent block hash. + pub fn set_parent_hash(&mut self, hash: BlockHash) { + self.header.set_parent_hash(hash) + } + + /// Updates the block number. + pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { + self.header.set_block_number(number) + } + + /// Updates the block state root. + pub fn set_state_root(&mut self, state_root: alloy_primitives::B256) { + self.header.set_state_root(state_root) + } + + /// Updates the block difficulty. + pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { + self.header.set_difficulty(difficulty) + } +} + +/// Bincode-compatible [`SealedBlock`] serde implementation. +#[cfg(feature = "serde-bincode-compat")] +pub(super) mod serde_bincode_compat { + use crate::{ + serde_bincode_compat::{self, BincodeReprFor, SerdeBincodeCompat}, + Block, + }; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde_with::{serde_as, DeserializeAs, SerializeAs}; + + /// Bincode-compatible [`super::SealedBlock`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use reth_primitives_traits::{ + /// block::SealedBlock, + /// serde_bincode_compat::{self, SerdeBincodeCompat}, + /// Block, + /// }; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data + 'static> { + /// #[serde_as(as = "serde_bincode_compat::SealedBlock<'_, T>")] + /// block: SealedBlock, + /// } + /// ``` + #[serde_as] + #[derive(derive_more::Debug, Serialize, Deserialize)] + pub struct SealedBlock< + 'a, + T: Block + 'static, + > { + #[serde( + bound = "serde_bincode_compat::SealedHeader<'a, T::Header>: Serialize + serde::de::DeserializeOwned" + )] + header: serde_bincode_compat::SealedHeader<'a, T::Header>, + body: BincodeReprFor<'a, T::Body>, + } + + impl<'a, T: Block + 'static> + From<&'a super::SealedBlock> for SealedBlock<'a, T> + { + fn from(value: &'a super::SealedBlock) -> Self { + Self { header: (&value.header).into(), body: (&value.body).into() } + } + } + + impl<'a, T: Block + 'static> + From> for super::SealedBlock + { + fn from(value: SealedBlock<'a, T>) -> Self { + Self::from_sealed_parts(value.header.into(), value.body.into()) + } + } + + impl + 'static> + SerializeAs> for SealedBlock<'_, T> + { + fn serialize_as(source: &super::SealedBlock, serializer: S) -> Result + where + S: Serializer, + { + SealedBlock::from(source).serialize(serializer) + } + } + + impl<'de, T: Block + 'static> + DeserializeAs<'de, super::SealedBlock> for SealedBlock<'de, T> + { + fn deserialize_as(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + SealedBlock::deserialize(deserializer).map(Into::into) + } + } + + impl + 'static> + SerdeBincodeCompat for super::SealedBlock + { + type BincodeRepr<'a> = SealedBlock<'a, T>; + } +} diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index 4b1a83fb50d77..c7a61f2532066 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -1,4 +1,4 @@ -use crate::InMemorySize; +use crate::{sync::OnceLock, InMemorySize}; pub use alloy_consensus::Header; use alloy_consensus::Sealed; use alloy_eips::{eip1898::BlockWithParent, BlockNumHash}; @@ -8,14 +8,20 @@ use bytes::BufMut; use core::mem; use derive_more::{AsRef, Deref}; -/// A [`Header`] that is sealed at a precalculated hash, use [`SealedHeader::unseal()`] if you want -/// to modify header. -#[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref)] +/// Seals the header with the block hash. +/// +/// This type uses lazy sealing to avoid hashing the header until it is needed: +/// +/// [`SealedHeader::new_unhashed`] creates a sealed header without hashing the header. +/// [`SealedHeader::new`] creates a sealed header with the corresponding block hash. +/// [`SealedHeader::hash`] computes the hash if it has not been computed yet. +#[derive(Debug, Clone, AsRef, Deref)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] pub struct SealedHeader { - /// Locked Header hash. - hash: BlockHash, + /// Block hash + #[cfg_attr(feature = "serde", serde(skip))] + hash: OnceLock, /// Locked Header fields. #[as_ref] #[deref] @@ -23,10 +29,16 @@ pub struct SealedHeader { } impl SealedHeader { + /// Creates the sealed header without hashing the header. + #[inline] + pub fn new_unhashed(header: H) -> Self { + Self { header, hash: Default::default() } + } + /// Creates the sealed header with the corresponding block hash. #[inline] - pub const fn new(header: H, hash: BlockHash) -> Self { - Self { header, hash } + pub fn new(header: H, hash: BlockHash) -> Self { + Self { header, hash: hash.into() } } /// Returns the sealed Header fields. @@ -43,35 +55,62 @@ impl SealedHeader { self.header.clone() } - /// Returns header/block hash. - #[inline] - pub const fn hash(&self) -> BlockHash { - self.hash + /// Consumes the type and returns the wrapped header. + pub fn into_header(self) -> H { + self.header } - /// Extract raw header that can be modified. + /// Consumes the type and returns the wrapped header. pub fn unseal(self) -> H { self.header } - /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. - pub fn split(self) -> (H, BlockHash) { - (self.header, self.hash) + /// Converts from &`SealedHeader` to `SealedHeader<&H>`. + pub fn sealed_ref(&self) -> SealedHeader<&H> { + SealedHeader { hash: self.hash.clone(), header: &self.header } } } impl SealedHeader { /// Hashes the header and creates a sealed header. - pub fn seal(header: H) -> Self { + pub fn seal_slow(header: H) -> Self { let hash = header.hash_slow(); Self::new(header, hash) } + + /// Returns the block hash. + /// + /// Note: if the hash has not been computed yet, this will compute the hash: + /// [`Sealable::hash_slow`]. + pub fn hash_ref(&self) -> &BlockHash { + self.hash.get_or_init(|| self.header.hash_slow()) + } + + /// Returns a copy of the block hash. + pub fn hash(&self) -> BlockHash { + *self.hash_ref() + } + + /// This is the inverse of [`Header::seal_slow`] which returns the raw header and hash. + pub fn split(self) -> (H, BlockHash) { + let hash = self.hash(); + (self.header, hash) + } + + /// Clones the header and returns a new sealed header. + pub fn cloned(self) -> Self + where + H: Clone, + { + let (header, hash) = self.split(); + Self::new(header, hash) + } } -impl SealedHeader { +impl SealedHeader { /// Return the number hash tuple. pub fn num_hash(&self) -> BlockNumHash { - BlockNumHash::new(self.number(), self.hash) + BlockNumHash::new(self.number(), self.hash()) } /// Return a [`BlockWithParent`] for this header. @@ -80,6 +119,20 @@ impl SealedHeader { } } +impl Eq for SealedHeader {} + +impl PartialEq for SealedHeader { + fn eq(&self, other: &Self) -> bool { + self.hash() == other.hash() + } +} + +impl core::hash::Hash for SealedHeader { + fn hash(&self, state: &mut Ha) { + self.hash().hash(state) + } +} + impl InMemorySize for SealedHeader { /// Calculates a heuristic for the in-memory size of the [`SealedHeader`]. #[inline] @@ -90,7 +143,7 @@ impl InMemorySize for SealedHeader { impl Default for SealedHeader { fn default() -> Self { - Self::seal(H::default()) + Self::seal_slow(H::default()) } } @@ -115,13 +168,14 @@ impl Decodable for SealedHeader { // update original buffer *buf = *b; - Ok(Self { header, hash }) + Ok(Self::new(header, hash)) } } -impl From> for Sealed { +impl From> for Sealed { fn from(value: SealedHeader) -> Self { - Self::new_unchecked(value.header, value.hash) + let (header, hash) = value.split(); + Self::new_unchecked(header, hash) } } @@ -133,7 +187,7 @@ where fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { let header = H::arbitrary(u)?; - Ok(Self::seal(header)) + Ok(Self::seal_slow(header)) } } @@ -146,7 +200,7 @@ impl SealedHeader { /// Updates the block hash. pub fn set_hash(&mut self, hash: BlockHash) { - self.hash = hash + self.hash = hash.into() } /// Returns a mutable reference to the header. @@ -178,12 +232,11 @@ impl SealedHeader { /// Bincode-compatible [`SealedHeader`] serde implementation. #[cfg(feature = "serde-bincode-compat")] pub(super) mod serde_bincode_compat { - use alloy_primitives::BlockHash; + use crate::serde_bincode_compat::SerdeBincodeCompat; + use alloy_primitives::{BlockHash, Sealable}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; - use crate::serde_bincode_compat::SerdeBincodeCompat; - /// Bincode-compatible [`super::SealedHeader`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: @@ -201,20 +254,22 @@ pub(super) mod serde_bincode_compat { /// ``` #[derive(derive_more::Debug, Serialize, Deserialize)] #[debug(bound(H::BincodeRepr<'a>: core::fmt::Debug))] - pub struct SealedHeader<'a, H: SerdeBincodeCompat = super::Header> { + pub struct SealedHeader<'a, H: Sealable + SerdeBincodeCompat = super::Header> { hash: BlockHash, header: H::BincodeRepr<'a>, } - impl<'a, H: SerdeBincodeCompat> From<&'a super::SealedHeader> for SealedHeader<'a, H> { + impl<'a, H: Sealable + SerdeBincodeCompat> From<&'a super::SealedHeader> + for SealedHeader<'a, H> + { fn from(value: &'a super::SealedHeader) -> Self { - Self { hash: value.hash, header: (&value.header).into() } + Self { hash: value.hash(), header: (&value.header).into() } } } - impl<'a, H: SerdeBincodeCompat> From> for super::SealedHeader { + impl<'a, H: Sealable + SerdeBincodeCompat> From> for super::SealedHeader { fn from(value: SealedHeader<'a, H>) -> Self { - Self { hash: value.hash, header: value.header.into() } + Self::new(value.header.into(), value.hash) } } @@ -236,9 +291,10 @@ pub(super) mod serde_bincode_compat { } } - impl SerdeBincodeCompat for super::SealedHeader { + impl SerdeBincodeCompat for super::SealedHeader { type BincodeRepr<'a> = SealedHeader<'a, H>; } + #[cfg(test)] mod tests { use super::super::{serde_bincode_compat, SealedHeader}; diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index bad587e0f67d9..f09875ed3482e 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -17,6 +17,32 @@ //! - `secp256k1`: Adds secp256k1 support for transaction signing/recovery. (By default the no-std //! friendly `k256` is used) //! - `rayon`: Uses `rayon` for parallel transaction sender recovery in [`BlockBody`] by default. +//! +//! ## Overview +//! +//! This crate defines various traits and types that form the foundation of the reth stack. +//! The top-level trait is [`Block`] which represents a block in the blockchain. A [`Block`] is +//! composed of a [`Header`] and a [`BlockBody`]. A [`BlockBody`] contains the transactions in the +//! block any additional data that is part of the block. A [`Header`] contains the metadata of the +//! block. +//! +//! ### Sealing (Hashing) +//! +//! The block hash is derived from the [`Header`] and is used to uniquely identify the block. This +//! operation is referred to as sealing in the context of this crate. Sealing is an expensive +//! operation. This crate provides various wrapper types that cache the hash of the block to avoid +//! recomputing it: [`SealedHeader`] and [`SealedBlock`]. All sealed types can be downgraded to +//! their unsealed counterparts. +//! +//! ### Recovery +//! +//! The raw consensus transactions that make up a block don't include the sender's address. This +//! information is recovered from the transaction signature. This operation is referred to as +//! recovery in the context of this crate and is an expensive operation. The [`RecoveredBlock`] +//! represents a [`SealedBlock`] with the sender addresses recovered. A [`SealedBlock`] can be +//! upgraded to a [`RecoveredBlock`] by recovering the sender addresses: +//! [`SealedBlock::try_recover`]. A [`RecoveredBlock`] can be downgraded to a [`SealedBlock`] by +//! removing the sender addresses: [`RecoveredBlock::into_sealed_block`]. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -52,7 +78,7 @@ pub mod block; pub use block::{ body::{BlockBody, FullBlockBody}, header::{BlockHeader, FullBlockHeader}, - Block, FullBlock, + Block, FullBlock, RecoveredBlock, SealedBlock, }; mod encoded; @@ -138,6 +164,6 @@ impl MaybeSerdeBincodeCompat for T {} #[cfg(any(test, feature = "arbitrary", feature = "test-utils"))] pub mod test_utils { pub use crate::header::test_utils::{generate_valid_header, valid_header_strategy}; - #[cfg(feature = "test-utils")] + #[cfg(any(test, feature = "test-utils"))] pub use crate::{block::TestBlock, header::test_utils::TestHeader}; } diff --git a/crates/primitives-traits/src/node.rs b/crates/primitives-traits/src/node.rs index 3f6786c8f94c7..fecb34d70f8ae 100644 --- a/crates/primitives-traits/src/node.rs +++ b/crates/primitives-traits/src/node.rs @@ -1,4 +1,7 @@ -use crate::{Block, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, Receipt}; +use crate::{ + Block, FullBlock, FullBlockBody, FullBlockHeader, FullReceipt, FullSignedTx, + MaybeSerdeBincodeCompat, Receipt, +}; use core::fmt; /// Configures all the primitive types of the node. @@ -6,7 +9,7 @@ pub trait NodePrimitives: Send + Sync + Unpin + Clone + Default + fmt::Debug + PartialEq + Eq + 'static { /// Block primitive. - type Block: Block
; + type Block: Block
+ MaybeSerdeBincodeCompat; /// Block header primitive. type BlockHeader: FullBlockHeader; /// Block body primitive. diff --git a/crates/primitives-traits/src/serde_bincode_compat.rs b/crates/primitives-traits/src/serde_bincode_compat.rs index 705898e6da979..9d3a2fbd509b5 100644 --- a/crates/primitives-traits/src/serde_bincode_compat.rs +++ b/crates/primitives-traits/src/serde_bincode_compat.rs @@ -1,8 +1,11 @@ use core::fmt::Debug; use serde::{de::DeserializeOwned, Serialize}; -pub use super::header::{serde_bincode_compat as header, serde_bincode_compat::*}; -pub use block_bincode::BlockBody; +pub use super::{ + block::{serde_bincode_compat as block, serde_bincode_compat::*}, + header::{serde_bincode_compat as header, serde_bincode_compat::*}, +}; +pub use block_bincode::{Block, BlockBody}; /// Trait for types that can be serialized and deserialized using bincode. pub trait SerdeBincodeCompat: Sized + 'static { @@ -14,6 +17,9 @@ impl SerdeBincodeCompat for alloy_consensus::Header { type BincodeRepr<'a> = alloy_consensus::serde_bincode_compat::Header<'a>; } +/// Type alias for the [`SerdeBincodeCompat::BincodeRepr`] associated type. +pub type BincodeReprFor<'a, T> = ::BincodeRepr<'a>; + mod block_bincode { use crate::serde_bincode_compat::SerdeBincodeCompat; use alloc::{borrow::Cow, vec::Vec}; @@ -22,6 +28,77 @@ mod block_bincode { use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; + /// Bincode-compatible [`alloy_consensus::Block`] serde implementation. + /// + /// Intended to use with the [`serde_with::serde_as`] macro in the following way: + /// ```rust + /// use alloy_consensus::Block; + /// use reth_primitives_traits::serde_bincode_compat::{self, SerdeBincodeCompat}; + /// use serde::{Deserialize, Serialize}; + /// use serde_with::serde_as; + /// + /// #[serde_as] + /// #[derive(Serialize, Deserialize)] + /// struct Data { + /// #[serde_as(as = "serde_bincode_compat::Block<'_, T, H>")] + /// body: Block, + /// } + /// ``` + #[derive(derive_more::Debug, Serialize, Deserialize)] + #[debug(bound())] + pub struct Block<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat> { + header: H::BincodeRepr<'a>, + #[serde(bound = "BlockBody<'a, T>: Serialize + serde::de::DeserializeOwned")] + body: BlockBody<'a, T>, + } + + impl<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat> From<&'a alloy_consensus::Block> + for Block<'a, T, H> + { + fn from(value: &'a alloy_consensus::Block) -> Self { + Self { header: (&value.header).into(), body: (&value.body).into() } + } + } + + impl<'a, T: SerdeBincodeCompat, H: SerdeBincodeCompat> From> + for alloy_consensus::Block + { + fn from(value: Block<'a, T, H>) -> Self { + Self { header: value.header.into(), body: value.body.into() } + } + } + + impl SerializeAs> + for Block<'_, T, H> + { + fn serialize_as( + source: &alloy_consensus::Block, + serializer: S, + ) -> Result + where + S: Serializer, + { + Block::from(source).serialize(serializer) + } + } + + impl<'de, T: SerdeBincodeCompat, H: SerdeBincodeCompat> + DeserializeAs<'de, alloy_consensus::Block> for Block<'de, T, H> + { + fn deserialize_as(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + Block::deserialize(deserializer).map(Into::into) + } + } + + impl SerdeBincodeCompat + for alloy_consensus::Block + { + type BincodeRepr<'a> = Block<'a, T, H>; + } + /// Bincode-compatible [`alloy_consensus::BlockBody`] serde implementation. /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index ddc79829ad1ca..bc64b4521277d 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -64,6 +64,13 @@ pub trait SignedTransaction: /// the signature has a low `s` value. fn recover_signer(&self) -> Option
; + /// Recover signer from signature and hash. + /// + /// Returns an error if the transaction's signature is invalid. + fn try_recover(&self) -> Result { + self.recover_signer().ok_or(RecoveryError) + } + /// Recover signer from signature and hash _without ensuring that the signature has a low `s` /// value_. /// @@ -73,6 +80,14 @@ pub trait SignedTransaction: self.recover_signer_unchecked_with_buf(&mut Vec::new()) } + /// Recover signer from signature and hash _without ensuring that the signature has a low `s` + /// value_. + /// + /// Returns an error if the transaction's signature is invalid. + fn try_recover_unchecked(&self) -> Result { + self.recover_signer_unchecked().ok_or(RecoveryError) + } + /// Same as [`Self::recover_signer_unchecked`] but receives a buffer to operate on. This is used /// during batch recovery to avoid allocating a new buffer for each transaction. fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
; @@ -195,3 +210,8 @@ pub trait SignedTransactionIntoRecoveredExt: SignedTransaction { } impl SignedTransactionIntoRecoveredExt for T where T: SignedTransaction {} + +/// Opaque error type for sender recovery. +#[derive(Debug, Default, thiserror::Error)] +#[error("Failed to recover the signer")] +pub struct RecoveryError; diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index 8888765208304..a269ebfb725ce 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -1,34 +1,12 @@ //! Common conversions from alloy types. -use crate::{BlockBody, SealedBlock, Transaction, TransactionSigned}; +use crate::{Transaction, TransactionSigned}; use alloc::string::ToString; -use alloy_consensus::{Header, TxEnvelope}; -use alloy_network::{AnyRpcBlock, AnyRpcTransaction, AnyTxEnvelope}; +use alloy_consensus::TxEnvelope; +use alloy_network::{AnyRpcTransaction, AnyTxEnvelope}; use alloy_serde::WithOtherFields; -use op_alloy_rpc_types as _; -use reth_primitives_traits::SealedHeader; - -impl TryFrom for SealedBlock> -where - T: TryFrom, -{ - type Error = alloy_rpc_types::ConversionError; - - fn try_from(block: AnyRpcBlock) -> Result { - let block = block.inner; - let block_hash = block.header.hash; - let block = block.try_map_transactions(|tx| tx.try_into())?; - Ok(Self::new( - SealedHeader::new(block.header.inner.into_header_with_defaults(), block_hash), - BlockBody { - transactions: block.transactions.into_transactions().collect(), - ommers: Default::default(), - withdrawals: block.withdrawals.map(|w| w.into_inner().into()), - }, - )) - } -} +use op_alloy_rpc_types as _; impl TryFrom for TransactionSigned { type Error = alloy_rpc_types::ConversionError; @@ -114,8 +92,9 @@ mod tests { "gas": "0xc27a8", "gasPrice": "0x0", "hash": "0x0bf1845c5d7a82ec92365d5027f7310793d53004f3c86aa80965c67bf7e7dc80", - "input": "0xd764ad0b000100000000000000000000000000000000000000000000000000000001cf5400000000000000000000000099c9fc46f92e8a1c0dec1b1747d010903e884be100000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007a12000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e40166a07a0000000000000000000000000994206dfe8de6ec6920ff4d779b0d950605fb53000000000000000000000000d533a949740bb3306d119cc777fa900ba034cd52000000000000000000000000ca74f404e0c7bfa35b13b511097df966d5a65597000000000000000000000000ca74f404e0c7bfa35b13b511097df966d5a65597000000000000000000000000000000000000000000000216614199391dbba2ba00000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "mint": "0x0", + "input": +"0xd764ad0b000100000000000000000000000000000000000000000000000000000001cf5400000000000000000000000099c9fc46f92e8a1c0dec1b1747d010903e884be100000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007a12000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e40166a07a0000000000000000000000000994206dfe8de6ec6920ff4d779b0d950605fb53000000000000000000000000d533a949740bb3306d119cc777fa900ba034cd52000000000000000000000000ca74f404e0c7bfa35b13b511097df966d5a65597000000000000000000000000ca74f404e0c7bfa35b13b511097df966d5a65597000000000000000000000000000000000000000000000216614199391dbba2ba00000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +, "mint": "0x0", "nonce": "0x74060", "r": "0x0", "s": "0x0", @@ -165,8 +144,9 @@ mod tests { "gas": "0x7812e", "gasPrice": "0x0", "hash": "0xf7e83886d3c6864f78e01c453ebcd57020c5795d96089e8f0e0b90a467246ddb", - "input": "0xd764ad0b000100000000000000000000000000000000000000000000000000000001cf5f00000000000000000000000099c9fc46f92e8a1c0dec1b1747d010903e884be100000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000239c2e16a5ca5900000000000000000000000000000000000000000000000000000000000000030d4000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e41635f5fd0000000000000000000000002ce910fbba65b454bbaf6a18c952a70f3bcd82990000000000000000000000002ce910fbba65b454bbaf6a18c952a70f3bcd82990000000000000000000000000000000000000000000000239c2e16a5ca590000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "mint": "0x239c2e16a5ca590000", + "input": +"0xd764ad0b000100000000000000000000000000000000000000000000000000000001cf5f00000000000000000000000099c9fc46f92e8a1c0dec1b1747d010903e884be100000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000239c2e16a5ca5900000000000000000000000000000000000000000000000000000000000000030d4000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e41635f5fd0000000000000000000000002ce910fbba65b454bbaf6a18c952a70f3bcd82990000000000000000000000002ce910fbba65b454bbaf6a18c952a70f3bcd82990000000000000000000000000000000000000000000000239c2e16a5ca590000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +, "mint": "0x239c2e16a5ca590000", "nonce": "0x7406b", "r": "0x0", "s": "0x0", diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 8c6c8a870d612..04d59400025ce 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,19 +1,6 @@ -use crate::{ - traits::BlockExt, transaction::SignedTransactionIntoRecoveredExt, GotExpected, RecoveredTx, - SealedHeader, TransactionSigned, -}; -use alloc::vec::Vec; -use alloy_consensus::Header; -use alloy_eips::{ - eip1898::BlockWithParent, eip2718::Encodable2718, eip4895::Withdrawals, BlockNumHash, -}; -use alloy_primitives::{Address, B256}; -use alloy_rlp::{Decodable, Encodable, RlpDecodable, RlpEncodable}; -use derive_more::{Deref, DerefMut}; +use crate::TransactionSigned; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; -use reth_primitives_traits::{BlockBody as _, InMemorySize, SignedTransaction}; -use serde::{Deserialize, Serialize}; /// Ethereum full block. /// @@ -25,818 +12,20 @@ pub type Block = alloy_consensus::Block; /// Withdrawals can be optionally included at the end of the RLP encoded message. pub type BlockBody = alloy_consensus::BlockBody; -/// We need to implement RLP traits manually because we currently don't have a way to flatten -/// [`BlockBody`] into [`Block`]. -mod block_rlp { - use super::*; - - #[derive(RlpDecodable)] - #[rlp(trailing)] - struct Helper { - header: H, - transactions: Vec, - ommers: Vec
, - withdrawals: Option, - } - - #[derive(RlpEncodable)] - #[rlp(trailing)] - struct HelperRef<'a, H, T = TransactionSigned> { - header: &'a H, - transactions: &'a Vec, - ommers: &'a Vec
, - withdrawals: Option<&'a Withdrawals>, - } - - impl<'a, T> From<&'a Block> for HelperRef<'a, Header, T> { - fn from(block: &'a Block) -> Self { - let Block { header, body: BlockBody { transactions, ommers, withdrawals } } = block; - Self { header, transactions, ommers, withdrawals: withdrawals.as_ref() } - } - } - - impl<'a> From<&'a SealedBlock> for HelperRef<'a, SealedHeader> { - fn from(block: &'a SealedBlock) -> Self { - let SealedBlock { header, body: BlockBody { transactions, ommers, withdrawals } } = - block; - Self { header, transactions, ommers, withdrawals: withdrawals.as_ref() } - } - } - - impl Decodable for SealedBlock { - fn decode(b: &mut &[u8]) -> alloy_rlp::Result { - let Helper { header, transactions, ommers, withdrawals } = Helper::decode(b)?; - Ok(Self { header, body: BlockBody { transactions, ommers, withdrawals } }) - } - } - - impl Encodable for SealedBlock { - fn encode(&self, out: &mut dyn bytes::BufMut) { - let helper: HelperRef<'_, _, _> = self.into(); - helper.encode(out) - } - - fn length(&self) -> usize { - let helper: HelperRef<'_, _, _> = self.into(); - helper.length() - } - } -} - -/// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Default, Deref, DerefMut)] -pub struct BlockWithSenders { - /// Block - #[deref] - #[deref_mut] - pub block: B, - /// List of senders that match the transactions in the block - senders: Vec
, -} - -impl BlockWithSenders { - /// New block with senders - pub const fn new_unchecked(block: B, senders: Vec
) -> Self { - Self { block, senders } - } - - /// New block with senders. Return none if len of tx and senders does not match - pub fn new(block: B, senders: Vec
) -> Option { - (block.body().transactions().len() == senders.len()).then_some(Self { block, senders }) - } - - /// Returns all senders of the transactions in the block. - pub fn senders(&self) -> &[Address] { - &self.senders - } +/// Ethereum sealed block type +pub type SealedBlock = reth_primitives_traits::block::SealedBlock; - /// Returns an iterator over all senders in the block. - pub fn senders_iter(&self) -> impl Iterator { - self.senders.iter() - } - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - #[inline] - pub fn seal_unchecked(self, hash: B256) -> SealedBlockWithSenders { - let Self { block, senders } = self; - SealedBlockWithSenders:: { block: block.seal(hash), senders } - } - - /// Calculate the header hash and seal the block with senders so that it can't be changed. - #[inline] - pub fn seal_slow(self) -> SealedBlockWithSenders { - SealedBlockWithSenders { block: self.block.seal_slow(), senders: self.senders } - } - - /// Split Structure to its components - #[inline] - pub fn split(self) -> (B, Vec
) { - (self.block, self.senders) - } - - /// Returns an iterator over all transactions and their sender. - #[inline] - pub fn transactions_with_sender( - &self, - ) -> impl Iterator::Transaction)> - + '_ { - self.senders.iter().zip(self.block.body().transactions()) - } - - /// Returns an iterator over all transactions in the chain. - #[inline] - pub fn into_transactions_ecrecovered( - self, - ) -> impl Iterator::Transaction>> - where - ::Transaction: SignedTransaction, - { - self.block - .split() - .1 - .into_transactions() - .into_iter() - .zip(self.senders) - .map(|(tx, sender)| tx.with_signer(sender)) - } - - /// Consumes the block and returns the transactions of the block. - #[inline] - pub fn into_transactions( - self, - ) -> Vec<::Transaction> { - self.block.split().1.into_transactions() - } -} +/// Helper type for constructing the block +#[deprecated(note = "Use `RecoveredBlock` instead")] +pub type SealedBlockFor = reth_primitives_traits::block::SealedBlock; -/// Sealed Ethereum full block. -/// -/// Withdrawals can be optionally included at the end of the RLP encoded message. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct SealedBlock { - /// Locked block header. - header: SealedHeader, - /// Block body. - body: B, -} - -impl SealedBlock { - /// Create a new sealed block instance using the sealed header and block body. - #[inline] - pub const fn new(header: SealedHeader, body: B) -> Self { - Self { header, body } - } - - /// Header hash. - #[inline] - pub const fn hash(&self) -> B256 { - self.header.hash() - } +/// Ethereum recovered block +#[deprecated(note = "Use `RecoveredBlock` instead")] +pub type BlockWithSenders = reth_primitives_traits::block::RecoveredBlock; - /// Returns reference to block header. - pub const fn header(&self) -> &H { - self.header.header() - } - - /// Returns reference to block body. - pub const fn body(&self) -> &B { - &self.body - } - - /// Returns the Sealed header. - pub const fn sealed_header(&self) -> &SealedHeader { - &self.header - } - - /// Clones the wrapped header and returns a [`SealedHeader`] sealed with the hash. - pub fn clone_sealed_header(&self) -> SealedHeader - where - H: Clone, - { - self.header.clone() - } - - /// Consumes the block and returns the sealed header. - pub fn into_sealed_header(self) -> SealedHeader { - self.header - } - - /// Consumes the block and returns the header. - pub fn into_header(self) -> H { - self.header.unseal() - } - - /// Consumes the block and returns the body. - pub fn into_body(self) -> B { - self.body - } - - /// Splits the [`BlockBody`] and [`SealedHeader`] into separate components - #[inline] - pub fn split(self) -> (SealedHeader, B) { - (self.header, self.body) - } -} - -impl SealedBlock -where - B: reth_primitives_traits::BlockBody, -{ - /// Returns an iterator over all blob versioned hashes from the block body. - #[inline] - pub fn blob_versioned_hashes_iter(&self) -> impl Iterator + '_ { - self.body.blob_versioned_hashes_iter() - } -} - -impl SealedBlock -where - B: reth_primitives_traits::BlockBody, -{ - /// Returns the number of transactions in the block. - #[inline] - pub fn transaction_count(&self) -> usize { - self.body.transaction_count() - } -} - -impl SealedBlock -where - H: alloy_consensus::BlockHeader, - B: reth_primitives_traits::BlockBody, -{ - /// Return the number hash tuple. - pub fn num_hash(&self) -> BlockNumHash { - BlockNumHash::new(self.number(), self.hash()) - } - - /// Return a [`BlockWithParent`] for this header. - pub fn block_with_parent(&self) -> BlockWithParent { - BlockWithParent { parent: self.parent_hash(), block: self.num_hash() } - } - - /// Ensures that the transaction root in the block header is valid. - /// - /// The transaction root is the Keccak 256-bit hash of the root node of the trie structure - /// populated with each transaction in the transactions list portion of the block. - /// - /// # Returns - /// - /// Returns `Ok(())` if the calculated transaction root matches the one stored in the header, - /// indicating that the transactions in the block are correctly represented in the trie. - /// - /// Returns `Err(error)` if the transaction root validation fails, providing a `GotExpected` - /// error containing the calculated and expected roots. - pub fn ensure_transaction_root_valid(&self) -> Result<(), GotExpected> - where - B::Transaction: Encodable2718, - { - let calculated_root = self.body.calculate_tx_root(); - - if self.header.transactions_root() != calculated_root { - return Err(GotExpected { - got: calculated_root, - expected: self.header.transactions_root(), - }) - } - - Ok(()) - } -} - -impl SealedBlock -where - H: reth_primitives_traits::BlockHeader, - B: reth_primitives_traits::BlockBody, -{ - /// Expensive operation that recovers transaction signer. See [`SealedBlockWithSenders`]. - pub fn senders(&self) -> Option> - where - B::Transaction: SignedTransaction, - { - self.body.recover_signers() - } - - /// Seal sealed block with recovered transaction senders. - pub fn seal_with_senders(self) -> Option> - where - B::Transaction: SignedTransaction, - T: reth_primitives_traits::Block
, - { - self.try_seal_with_senders().ok() - } - - /// Seal sealed block with recovered transaction senders. - pub fn try_seal_with_senders(self) -> Result, Self> - where - B::Transaction: SignedTransaction, - T: reth_primitives_traits::Block
, - { - match self.senders() { - Some(senders) => Ok(SealedBlockWithSenders { block: self, senders }), - None => Err(self), - } - } - - /// Transform into a [`SealedBlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - #[track_caller] - pub fn with_senders_unchecked(self, senders: Vec
) -> SealedBlockWithSenders - where - B::Transaction: SignedTransaction, - T: reth_primitives_traits::Block
, - { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a [`SealedBlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// See also [`TransactionSigned::recover_signer_unchecked`] - /// - /// Returns an error if a signature is invalid. - #[track_caller] - pub fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result, Self> - where - B::Transaction: SignedTransaction, - T: reth_primitives_traits::Block
, - { - let senders = if self.body.transactions().len() == senders.len() { - senders - } else { - let Some(senders) = self.body.recover_signers_unchecked() else { return Err(self) }; - senders - }; - - Ok(SealedBlockWithSenders { block: self, senders }) - } - - /// Unseal the block - pub fn unseal(self) -> Block - where - Block: reth_primitives_traits::Block
, - { - Block::new(self.header.unseal(), self.body) - } -} - -impl InMemorySize for SealedBlock { - #[inline] - fn size(&self) -> usize { - self.header.size() + self.body.size() - } -} - -impl From for Block { - fn from(block: SealedBlock) -> Self { - block.unseal() - } -} - -impl Default for SealedBlock -where - SealedHeader: Default, - B: Default, -{ - fn default() -> Self { - Self { header: Default::default(), body: Default::default() } - } -} - -impl Deref for SealedBlock { - type Target = H; - - fn deref(&self) -> &Self::Target { - self.header.header() - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a, H, B> arbitrary::Arbitrary<'a> for SealedBlock -where - SealedHeader: arbitrary::Arbitrary<'a>, - B: arbitrary::Arbitrary<'a>, -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - Ok(Self { header: u.arbitrary()?, body: u.arbitrary()? }) - } -} - -#[cfg(any(test, feature = "test-utils"))] -impl SealedBlock -where - H: reth_primitives_traits::test_utils::TestHeader, -{ - /// Returns a mutable reference to the header. - pub fn header_mut(&mut self) -> &mut H { - self.header.header_mut() - } - - /// Returns a mutable reference to the header. - pub fn body_mut(&mut self) -> &mut B { - &mut self.body - } - - /// Updates the block header. - pub fn set_header(&mut self, header: H) { - self.header.set_header(header) - } - - /// Updates the block hash. - pub fn set_hash(&mut self, hash: alloy_primitives::BlockHash) { - self.header.set_hash(hash); - } - - /// Updates the parent block hash. - pub fn set_parent_hash(&mut self, hash: alloy_primitives::BlockHash) { - self.header.set_parent_hash(hash); - } - - /// Updates the block number. - pub fn set_block_number(&mut self, number: alloy_primitives::BlockNumber) { - self.header.set_block_number(number); - } - - /// Updates the block state root. - pub fn set_state_root(&mut self, state_root: B256) { - self.header.set_state_root(state_root); - } - - /// Updates the block difficulty. - pub fn set_difficulty(&mut self, difficulty: alloy_primitives::U256) { - self.header.set_difficulty(difficulty); - } -} - -/// A helepr trait to construct [`SealedBlock`] from a [`reth_primitives_traits::Block`]. -pub type SealedBlockFor = SealedBlock< - ::Header, - ::Body, ->; - -/// Sealed block with senders recovered from transactions. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Deref, DerefMut)] -pub struct SealedBlockWithSenders { - /// Sealed block - #[deref] - #[deref_mut] - #[serde(bound = "SealedBlock: Serialize + serde::de::DeserializeOwned")] - pub block: SealedBlock, - /// List of senders that match transactions from block. - senders: Vec
, -} - -impl Default for SealedBlockWithSenders { - fn default() -> Self { - Self { block: SealedBlock::default(), senders: Default::default() } - } -} - -impl SealedBlockWithSenders { - /// New sealed block with sender - pub const fn new_unchecked( - block: SealedBlock, - senders: Vec
, - ) -> Self { - Self { block, senders } - } - - /// New sealed block with sender. Return none if len of tx and senders does not match - pub fn new(block: SealedBlock, senders: Vec
) -> Option { - (block.body.transactions().len() == senders.len()).then_some(Self { block, senders }) - } -} - -impl SealedBlockWithSenders { - /// Returns all senders of the transactions in the block. - pub fn senders(&self) -> &[Address] { - &self.senders - } - - /// Returns an iterator over all senders in the block. - pub fn senders_iter(&self) -> impl Iterator { - self.senders.iter() - } - - /// Split Structure to its components - #[inline] - pub fn split(self) -> (SealedBlock, Vec
) { - (self.block, self.senders) - } - - /// Returns the unsealed [`BlockWithSenders`] - #[inline] - pub fn unseal(self) -> BlockWithSenders { - let (block, senders) = self.split(); - let (header, body) = block.split(); - let header = header.unseal(); - BlockWithSenders::new_unchecked(B::new(header, body), senders) - } - - /// Returns an iterator over all transactions in the block. - #[inline] - pub fn transactions(&self) -> &[::Transaction] { - self.block.body.transactions() - } - - /// Returns an iterator over all transactions and their sender. - #[inline] - pub fn transactions_with_sender( - &self, - ) -> impl Iterator::Transaction)> - + '_ { - self.senders.iter().zip(self.block.body.transactions()) - } - - /// Consumes the block and returns the transactions of the block. - #[inline] - pub fn into_transactions( - self, - ) -> Vec<::Transaction> { - self.block.body.into_transactions() - } - - /// Returns an iterator over all transactions in the chain. - #[inline] - pub fn into_transactions_ecrecovered( - self, - ) -> impl Iterator::Transaction>> - where - ::Transaction: SignedTransaction, - { - self.block - .body - .into_transactions() - .into_iter() - .zip(self.senders) - .map(|(tx, sender)| tx.with_signer(sender)) - } -} - -#[cfg(any(test, feature = "test-utils"))] -impl SealedBlockWithSenders -where - B: reth_primitives_traits::Block, -{ - /// Returns a mutable reference to the recovered senders. - pub fn senders_mut(&mut self) -> &mut Vec
{ - &mut self.senders - } - - /// Appends the sender to the list of senders. - pub fn push_sender(&mut self, sender: Address) { - self.senders.push(sender); - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a, B> arbitrary::Arbitrary<'a> for SealedBlockWithSenders -where - B: reth_primitives_traits::Block + arbitrary::Arbitrary<'a>, -{ - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let block = B::arbitrary(u)?; - - let senders = block - .body() - .transactions() - .iter() - .map(|tx| tx.recover_signer().unwrap()) - .collect::>(); - - let (header, body) = block.split(); - let block = SealedBlock::new(SealedHeader::seal(header), body); - Ok(Self { block, senders }) - } -} - -/// Bincode-compatible block type serde implementations. -#[cfg(feature = "serde-bincode-compat")] -pub(super) mod serde_bincode_compat { - use alloc::{borrow::Cow, vec::Vec}; - use alloy_primitives::Address; - use reth_primitives_traits::{ - serde_bincode_compat::{SealedHeader, SerdeBincodeCompat}, - Block, - }; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - use serde_with::{DeserializeAs, SerializeAs}; - - /// Bincode-compatible [`super::BlockBody`] serde implementation. - pub type BlockBody<'a, T = super::TransactionSigned> = - reth_primitives_traits::serde_bincode_compat::BlockBody<'a, T>; - - /// Bincode-compatible [`super::SealedBlock`] serde implementation. - /// - /// Intended to use with the [`serde_with::serde_as`] macro in the following way: - /// ```rust - /// use reth_primitives::{serde_bincode_compat, SealedBlock}; - /// use serde::{Deserialize, Serialize}; - /// use serde_with::serde_as; - /// - /// #[serde_as] - /// #[derive(Serialize, Deserialize)] - /// struct Data { - /// #[serde_as(as = "serde_bincode_compat::SealedBlock")] - /// block: SealedBlock, - /// } - /// ``` - #[derive(Debug, Serialize, Deserialize)] - pub struct SealedBlock<'a, H = super::Header, B = super::BlockBody> - where - H: SerdeBincodeCompat, - B: SerdeBincodeCompat, - { - header: SealedHeader<'a, H>, - body: B::BincodeRepr<'a>, - } - - impl<'a, H, B> From<&'a super::SealedBlock> for SealedBlock<'a, H, B> - where - H: SerdeBincodeCompat, - B: SerdeBincodeCompat, - { - fn from(value: &'a super::SealedBlock) -> Self { - Self { - header: SealedHeader::from(&value.header), - body: B::BincodeRepr::from(&value.body), - } - } - } - - impl<'a, H, B> From> for super::SealedBlock - where - H: SerdeBincodeCompat, - B: SerdeBincodeCompat, - { - fn from(value: SealedBlock<'a, H, B>) -> Self { - Self { header: value.header.into(), body: value.body.into() } - } - } - - impl SerializeAs for SealedBlock<'_> { - fn serialize_as(source: &super::SealedBlock, serializer: S) -> Result - where - S: Serializer, - { - SealedBlock::from(source).serialize(serializer) - } - } - - impl<'de> DeserializeAs<'de, super::SealedBlock> for SealedBlock<'de> { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - SealedBlock::deserialize(deserializer).map(Into::into) - } - } - - /// Bincode-compatible [`super::SealedBlockWithSenders`] serde implementation. - /// - /// Intended to use with the [`serde_with::serde_as`] macro in the following way: - /// ```rust - /// use reth_primitives::{serde_bincode_compat, SealedBlockWithSenders}; - /// use serde::{Deserialize, Serialize}; - /// use serde_with::serde_as; - /// - /// #[serde_as] - /// #[derive(Serialize, Deserialize)] - /// struct Data { - /// #[serde_as(as = "serde_bincode_compat::SealedBlockWithSenders")] - /// block: SealedBlockWithSenders, - /// } - /// ``` - #[derive(Debug, Serialize, Deserialize)] - pub struct SealedBlockWithSenders<'a, B = super::Block> - where - B: Block, - { - block: SealedBlock<'a, B::Header, B::Body>, - senders: Cow<'a, Vec
>, - } - - impl<'a, B> From<&'a super::SealedBlockWithSenders> for SealedBlockWithSenders<'a, B> - where - B: Block, - { - fn from(value: &'a super::SealedBlockWithSenders) -> Self { - Self { block: SealedBlock::from(&value.block), senders: Cow::Borrowed(&value.senders) } - } - } - - impl<'a, B> From> for super::SealedBlockWithSenders - where - B: Block, - { - fn from(value: SealedBlockWithSenders<'a, B>) -> Self { - Self { block: value.block.into(), senders: value.senders.into_owned() } - } - } - - impl SerializeAs for SealedBlockWithSenders<'_> { - fn serialize_as( - source: &super::SealedBlockWithSenders, - serializer: S, - ) -> Result - where - S: Serializer, - { - SealedBlockWithSenders::from(source).serialize(serializer) - } - } - - impl<'de> DeserializeAs<'de, super::SealedBlockWithSenders> for SealedBlockWithSenders<'de> { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - SealedBlockWithSenders::deserialize(deserializer).map(Into::into) - } - } - - #[cfg(test)] - mod tests { - use super::super::{serde_bincode_compat, BlockBody, SealedBlock, SealedBlockWithSenders}; - use arbitrary::Arbitrary; - use rand::Rng; - use reth_testing_utils::generators; - use serde::{Deserialize, Serialize}; - use serde_with::serde_as; - - #[test] - fn test_block_body_bincode_roundtrip() { - #[serde_as] - #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] - struct Data { - #[serde_as(as = "serde_bincode_compat::BlockBody")] - block_body: BlockBody, - } - - let mut bytes = [0u8; 1024]; - generators::rng().fill(bytes.as_mut_slice()); - let data = Data { - block_body: BlockBody::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) - .unwrap(), - }; - - let encoded = bincode::serialize(&data).unwrap(); - let decoded: Data = bincode::deserialize(&encoded).unwrap(); - assert_eq!(decoded, data); - } - - #[test] - fn test_sealed_block_bincode_roundtrip() { - #[serde_as] - #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] - struct Data { - #[serde_as(as = "serde_bincode_compat::SealedBlock")] - block: SealedBlock, - } - - let mut bytes = [0u8; 1024]; - generators::rng().fill(bytes.as_mut_slice()); - let data = Data { - block: SealedBlock::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(), - }; - - let encoded = bincode::serialize(&data).unwrap(); - let decoded: Data = bincode::deserialize(&encoded).unwrap(); - assert_eq!(decoded, data); - } - - #[test] - fn test_sealed_block_with_senders_bincode_roundtrip() { - #[serde_as] - #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] - struct Data { - #[serde_as(as = "serde_bincode_compat::SealedBlockWithSenders")] - block: SealedBlockWithSenders, - } - - let mut bytes = [0u8; 1024]; - generators::rng().fill(bytes.as_mut_slice()); - let data = Data { - block: SealedBlockWithSenders::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) - .unwrap(), - }; - - let encoded = bincode::serialize(&data).unwrap(); - let decoded: Data = bincode::deserialize(&encoded).unwrap(); - assert_eq!(decoded, data); - } - } -} +/// Ethereum recovered block +#[deprecated(note = "Use `RecoveredBlock` instead")] +pub type SealedBlockWithSenders = reth_primitives_traits::block::RecoveredBlock; #[cfg(test)] mod tests { @@ -845,8 +34,9 @@ mod tests { eip1898::HexStringMissingPrefixError, BlockId, BlockNumberOrTag, BlockNumberOrTag::*, RpcBlockHash, }; - use alloy_primitives::hex_literal::hex; + use alloy_primitives::{hex_literal::hex, B256}; use alloy_rlp::{Decodable, Encodable}; + use reth_primitives_traits::{BlockBody, RecoveredBlock}; use std::str::FromStr; const fn _traits() { @@ -1008,37 +198,18 @@ mod tests { #[test] fn block_with_senders() { - let mut block = Block::default(); - let sender = Address::random(); + let mut block: Block = Block::default(); block.body.transactions.push(TransactionSigned::default()); - assert_eq!(BlockWithSenders::new(block.clone(), vec![]), None); - assert_eq!( - BlockWithSenders::new(block.clone(), vec![sender]), - Some(BlockWithSenders { block: block.clone(), senders: vec![sender] }) - ); - let sealed = block.seal_slow(); - assert_eq!(SealedBlockWithSenders::::new(sealed.clone(), vec![]), None); - assert_eq!( - SealedBlockWithSenders::::new(sealed.clone(), vec![sender]), - Some(SealedBlockWithSenders { block: sealed, senders: vec![sender] }) - ); - } - - #[test] - fn test_default_seal() { - let block: SealedBlock = SealedBlock::default(); - let sealed = block.hash(); - let block: Block = block.unseal(); - let block = block.seal_slow(); - assert_eq!(sealed, block.hash()); + let block = RecoveredBlock::try_new_unhashed(block.clone(), vec![]).unwrap(); + assert_eq!(block.senders().len(), 1); } #[test] fn empty_block_rlp() { - let body: BlockBody = BlockBody::default(); + let body = alloy_consensus::BlockBody::::default(); let mut buf = Vec::new(); body.encode(&mut buf); - let decoded = BlockBody::decode(&mut buf.as_slice()).unwrap(); + let decoded = alloy_consensus::BlockBody::decode(&mut buf.as_slice()).unwrap(); assert_eq!(body, decoded); } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 4667689aebf46..b613141242073 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -21,9 +21,6 @@ extern crate alloc; -mod traits; -pub use traits::*; - #[cfg(feature = "alloy-compat")] mod alloy_compat; mod block; @@ -33,13 +30,14 @@ pub use reth_static_file_types as static_file; pub mod transaction; #[cfg(any(test, feature = "arbitrary"))] pub use block::{generate_valid_header, valid_header_strategy}; -pub use block::{ - Block, BlockBody, BlockWithSenders, SealedBlock, SealedBlockFor, SealedBlockWithSenders, -}; +pub use block::{Block, BlockBody, SealedBlock}; +#[allow(deprecated)] +pub use block::{BlockWithSenders, SealedBlockFor, SealedBlockWithSenders}; + pub use receipt::{gas_spent_by_transactions, Receipt, Receipts}; pub use reth_primitives_traits::{ logs_bloom, Account, Bytecode, GotExpected, GotExpectedBoxed, Header, HeaderError, Log, - LogData, NodePrimitives, SealedHeader, StorageEntry, + LogData, NodePrimitives, RecoveredBlock, SealedHeader, StorageEntry, }; pub use static_file::StaticFileSegment; @@ -71,10 +69,8 @@ pub use c_kzg as kzg; /// Read more: #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { - pub use super::{ - block::serde_bincode_compat::*, - transaction::{serde_bincode_compat as transaction, serde_bincode_compat::*}, - }; + pub use super::transaction::{serde_bincode_compat as transaction, serde_bincode_compat::*}; + pub use reth_primitives_traits::serde_bincode_compat::*; } /// Temp helper struct for integrating [`NodePrimitives`]. diff --git a/crates/primitives/src/traits.rs b/crates/primitives/src/traits.rs deleted file mode 100644 index 08a8ab3e665a7..0000000000000 --- a/crates/primitives/src/traits.rs +++ /dev/null @@ -1,85 +0,0 @@ -use crate::{BlockWithSenders, SealedBlock}; -use alloc::vec::Vec; -use reth_primitives_traits::{Block, BlockBody, SealedHeader, SignedTransaction}; -use revm_primitives::{Address, B256}; - -/// Extension trait for [`reth_primitives_traits::Block`] implementations -/// allowing for conversions into common block parts containers such as [`SealedBlock`], -/// [`BlockWithSenders`], etc. -pub trait BlockExt: Block { - /// Calculate the header hash and seal the block so that it can't be changed. - fn seal_slow(self) -> SealedBlock { - let (header, body) = self.split(); - SealedBlock::new(SealedHeader::seal(header), body) - } - - /// Seal the block with a known hash. - /// - /// WARNING: This method does not perform validation whether the hash is correct. - fn seal(self, hash: B256) -> SealedBlock { - let (header, body) = self.split(); - SealedBlock::new(SealedHeader::new(header, hash), body) - } - - /// Expensive operation that recovers transaction signer. - fn senders(&self) -> Option> - where - ::Transaction: SignedTransaction, - { - self.body().recover_signers() - } - - /// Transform into a [`BlockWithSenders`]. - /// - /// # Panics - /// - /// If the number of senders does not match the number of transactions in the block - /// and the signer recovery for one of the transactions fails. - /// - /// Note: this is expected to be called with blocks read from disk. - #[track_caller] - fn with_senders_unchecked(self, senders: Vec
) -> BlockWithSenders - where - ::Transaction: SignedTransaction, - { - self.try_with_senders_unchecked(senders).expect("stored block is valid") - } - - /// Transform into a [`BlockWithSenders`] using the given senders. - /// - /// If the number of senders does not match the number of transactions in the block, this falls - /// back to manually recovery, but _without ensuring that the signature has a low `s` value_. - /// - /// Returns an error if a signature is invalid. - #[track_caller] - fn try_with_senders_unchecked( - self, - senders: Vec
, - ) -> Result, Self> - where - ::Transaction: SignedTransaction, - { - let senders = if self.body().transactions().len() == senders.len() { - senders - } else { - let Some(senders) = self.body().recover_signers_unchecked() else { return Err(self) }; - senders - }; - - Ok(BlockWithSenders::new_unchecked(self, senders)) - } - - /// **Expensive**. Transform into a [`BlockWithSenders`] by recovering senders in the contained - /// transactions. - /// - /// Returns `None` if a transaction is invalid. - fn with_recovered_senders(self) -> Option> - where - ::Transaction: SignedTransaction, - { - let senders = self.senders()?; - Some(BlockWithSenders::new_unchecked(self, senders)) - } -} - -impl BlockExt for T {} diff --git a/crates/prune/prune/src/segments/mod.rs b/crates/prune/prune/src/segments/mod.rs index c1e23063fe6f6..a947ab8eb89de 100644 --- a/crates/prune/prune/src/segments/mod.rs +++ b/crates/prune/prune/src/segments/mod.rs @@ -191,7 +191,7 @@ mod tests { for block in &blocks { provider_rw .insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), ) .expect("failed to insert block"); } @@ -229,7 +229,7 @@ mod tests { for block in &blocks { provider_rw .insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), ) .expect("failed to insert block"); } @@ -275,7 +275,7 @@ mod tests { for block in &blocks { provider_rw .insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), ) .expect("failed to insert block"); } @@ -311,7 +311,7 @@ mod tests { for block in &blocks { provider_rw .insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), ) .expect("failed to insert block"); } diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 957e2e33cf1ed..79ff504dfcd45 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -50,6 +50,7 @@ tokio-util = { workspace = true } tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } [dev-dependencies] +reth-primitives-traits.workspace = true reth-chainspec.workspace = true reth-network-api.workspace = true reth-network-peers.workspace = true diff --git a/crates/rpc/rpc-builder/tests/it/auth.rs b/crates/rpc/rpc-builder/tests/it/auth.rs index ca5db0494e666..381b48dad207a 100644 --- a/crates/rpc/rpc-builder/tests/it/auth.rs +++ b/crates/rpc/rpc-builder/tests/it/auth.rs @@ -8,10 +8,12 @@ use alloy_rpc_types_engine::{ }; use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; use reth_ethereum_engine_primitives::EthEngineTypes; -use reth_primitives::{Block, BlockExt, TransactionSigned}; +use reth_primitives::{Block, TransactionSigned}; +use reth_primitives_traits::block::Block as _; use reth_rpc_api::clients::EngineApiClient; use reth_rpc_layer::JwtSecret; use reth_rpc_types_compat::engine::payload::block_to_payload_v1; + #[allow(unused_must_use)] async fn test_basic_engine_calls(client: &C) where @@ -23,7 +25,7 @@ where client, ExecutionPayloadInputV2 { execution_payload: ExecutionPayloadV1::from_block_slow::( - &block.unseal(), + &block.into_block(), ), withdrawals: None, }, diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index c3ed8dc5add99..50177199cb41d 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -1150,12 +1150,14 @@ mod tests { start..=start + count - 1, BlockRangeParams { tx_count: 0..2, ..Default::default() }, ); - handle.provider.extend_blocks(blocks.iter().cloned().map(|b| (b.hash(), b.unseal()))); + handle + .provider + .extend_blocks(blocks.iter().cloned().map(|b| (b.hash(), b.into_block()))); let expected = blocks .iter() .cloned() - .map(|b| Some(ExecutionPayloadBodyV1::from_block(b.unseal::()))) + .map(|b| Some(ExecutionPayloadBodyV1::from_block(b.into_block()))) .collect::>(); let res = api.get_payload_bodies_by_range_v1(start, count).await.unwrap(); @@ -1184,7 +1186,7 @@ mod tests { !first_missing_range.contains(&b.number) && !second_missing_range.contains(&b.number) }) - .map(|b| (b.hash(), b.clone().unseal())), + .map(|b| (b.hash(), b.clone().into_block())), ); let expected = blocks @@ -1197,7 +1199,7 @@ mod tests { if first_missing_range.contains(&b.number) { None } else { - Some(ExecutionPayloadBodyV1::from_block(b.unseal::())) + Some(ExecutionPayloadBodyV1::from_block(b.into_block())) } }) .collect::>(); @@ -1216,7 +1218,7 @@ mod tests { { None } else { - Some(ExecutionPayloadBodyV1::from_block(b.unseal::())) + Some(ExecutionPayloadBodyV1::from_block(b.into_block())) } }) .collect::>(); @@ -1290,7 +1292,7 @@ mod tests { // Add block and to provider local store and test for mismatch handle.provider.add_block( execution_terminal_block.hash(), - execution_terminal_block.clone().unseal(), + execution_terminal_block.clone().into_block(), ); let res = api.exchange_transition_configuration(transition_config); @@ -1320,7 +1322,7 @@ mod tests { terminal_block_number, }; - handle.provider.add_block(terminal_block.hash(), terminal_block.unseal()); + handle.provider.add_block(terminal_block.hash(), terminal_block.into_block()); let config = api.exchange_transition_configuration(transition_config).unwrap(); assert_eq!(config, transition_config); diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 385607e47f9f7..e0debe679d7de 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -15,13 +15,17 @@ use reth_testing_utils::generators::{ }; fn transform_block Block>(src: SealedBlock, f: F) -> ExecutionPayload { - let unsealed = src.unseal(); + let unsealed = src.into_block(); let mut transformed: Block = f(unsealed); // Recalculate roots transformed.header.transactions_root = proofs::calculate_transaction_root(&transformed.body.transactions); transformed.header.ommers_hash = proofs::calculate_ommers_root(&transformed.body.ommers); - block_to_payload(SealedBlock::new(SealedHeader::seal(transformed.header), transformed.body)).0 + block_to_payload(SealedBlock::from_sealed_parts( + SealedHeader::seal_slow(transformed.header), + transformed.body, + )) + .0 } #[test] @@ -33,7 +37,7 @@ fn payload_body_roundtrip() { BlockRangeParams { tx_count: 0..2, ..Default::default() }, ) { let payload_body: ExecutionPayloadBodyV1 = - ExecutionPayloadBodyV1::from_block(block.clone().unseal::()); + ExecutionPayloadBodyV1::from_block(block.clone().into_block()); assert_eq!( Ok(block.body().transactions.clone()), diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 4cb01d16dab3f..b1930995d5a4b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -1,33 +1,30 @@ //! Database access for `eth_` block RPC methods. Loads block and receipt data w.r.t. network. -use std::sync::Arc; - +use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; +use crate::{ + node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, + RpcReceipt, +}; use alloy_eips::BlockId; use alloy_primitives::Sealable; use alloy_rlp::Encodable; use alloy_rpc_types_eth::{Block, BlockTransactions, Header, Index}; use futures::Future; use reth_node_api::BlockBody; -use reth_primitives::{SealedBlockFor, SealedBlockWithSenders}; +use reth_primitives::{RecoveredBlock, SealedBlock}; use reth_provider::{ BlockIdReader, BlockReader, BlockReaderIdExt, ProviderHeader, ProviderReceipt, }; use reth_rpc_types_compat::block::from_block; use revm_primitives::U256; - -use crate::{ - node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, - RpcReceipt, -}; - -use super::{LoadPendingBlock, LoadReceipt, SpawnBlocking}; +use std::sync::Arc; /// Result type of the fetched block receipts. pub type BlockReceiptsResult = Result>>, E>; /// Result type of the fetched block and its receipts. pub type BlockAndReceiptsResult = Result< Option<( - SealedBlockFor<<::Provider as BlockReader>::Block>, + SealedBlock<<::Provider as BlockReader>::Block>, Arc::Provider>>>, )>, ::Error, @@ -62,14 +59,8 @@ pub trait EthBlocks: LoadBlock { { async move { let Some(block) = self.block_with_senders(block_id).await? else { return Ok(None) }; - let block_hash = block.hash(); - let block = from_block( - (*block).clone().unseal(), - full.into(), - Some(block_hash), - self.tx_resp_builder(), - )?; + let block = from_block((*block).clone(), full.into(), self.tx_resp_builder())?; Ok(Some(block)) } } @@ -105,7 +96,7 @@ pub trait EthBlocks: LoadBlock { .get_sealed_block_with_senders(block_hash) .await .map_err(Self::Error::from_eth_err)? - .map(|b| b.body().transactions().len())) + .map(|b| b.body().transaction_count())) } } @@ -143,7 +134,7 @@ pub trait EthBlocks: LoadBlock { // If no pending block from provider, build the pending block locally. if let Some((block, receipts)) = self.local_pending_block().await? { - return Ok(Some((block.block, Arc::new(receipts)))); + return Ok(Some((block.into_sealed_block(), Arc::new(receipts)))); } } @@ -155,7 +146,7 @@ pub trait EthBlocks: LoadBlock { .get_block_and_receipts(block_hash) .await .map_err(Self::Error::from_eth_err) - .map(|b| b.map(|(b, r)| (b.block.clone(), r))) + .map(|b| b.map(|(b, r)| (b.clone_sealed_block(), r))) } Ok(None) @@ -219,7 +210,7 @@ pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { block_id: BlockId, ) -> impl Future< Output = Result< - Option::Block>>>, + Option::Block>>>, Self::Error, >, > + Send { diff --git a/crates/rpc/rpc-eth-api/src/helpers/fee.rs b/crates/rpc/rpc-eth-api/src/helpers/fee.rs index f2ab11acc3c27..30b414859e4c8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/fee.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/fee.rs @@ -1,5 +1,7 @@ //! Loads fee history from database. Helper trait for `eth_` fee and transaction RPC methods. +use super::LoadBlock; +use crate::FromEthApiError; use alloy_consensus::BlockHeader; use alloy_eips::eip7840::BlobParams; use alloy_primitives::U256; @@ -14,10 +16,6 @@ use reth_rpc_eth_types::{ }; use tracing::debug; -use crate::FromEthApiError; - -use super::LoadBlock; - /// Fee related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. pub trait EthFees: LoadFee { diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 9b52b94a4db86..e947a0d0a6d7c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -15,7 +15,7 @@ use reth_evm::{ env::EvmEnv, state_change::post_block_withdrawals_balance_increments, system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes, }; -use reth_primitives::{BlockExt, InvalidTransactionError, SealedBlockWithSenders}; +use reth_primitives::{InvalidTransactionError, RecoveredBlock}; use reth_primitives_traits::Receipt; use reth_provider::{ BlockReader, BlockReaderIdExt, ChainSpecProvider, ProviderBlock, ProviderError, ProviderHeader, @@ -133,7 +133,7 @@ pub trait LoadPendingBlock: ) -> impl Future< Output = Result< Option<( - SealedBlockWithSenders<::Block>, + RecoveredBlock<::Block>, Vec>, )>, Self::Error, @@ -247,10 +247,7 @@ pub trait LoadPendingBlock: block_env: BlockEnv, parent_hash: B256, ) -> Result< - ( - SealedBlockWithSenders>, - Vec>, - ), + (RecoveredBlock>, Vec>), Self::Error, > where @@ -426,6 +423,6 @@ pub trait LoadPendingBlock: results, ); - Ok((SealedBlockWithSenders::new_unchecked(block.seal_slow(), senders), receipts)) + Ok((RecoveredBlock::new_unhashed(block, senders), receipts)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index bb4c9c5ebf58d..ccb0cd84fec2c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,7 +1,6 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. -use std::{fmt::Display, sync::Arc}; - +use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; use crate::{FromEvmError, RpcNodeCore}; use alloy_consensus::BlockHeader; use alloy_primitives::B256; @@ -9,7 +8,7 @@ use alloy_rpc_types_eth::{BlockId, TransactionInfo}; use futures::Future; use reth_chainspec::ChainSpecProvider; use reth_evm::{env::EvmEnv, system_calls::SystemCaller, ConfigureEvm, ConfigureEvmEnv}; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::RecoveredBlock; use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_provider::{BlockReader, ProviderBlock, ProviderHeader, ProviderTx}; use reth_revm::database::StateProviderDatabase; @@ -22,8 +21,7 @@ use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState, }; - -use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; +use std::{fmt::Display, sync::Arc}; /// Executes CPU heavy tasks. pub trait Trace: @@ -246,7 +244,7 @@ pub trait Trace: fn trace_block_until( &self, block_id: BlockId, - block: Option>>>, + block: Option>>>, highest_index: Option, config: TracingInspectorConfig, f: F, @@ -286,7 +284,7 @@ pub trait Trace: fn trace_block_until_with_inspector( &self, block_id: BlockId, - block: Option>>>, + block: Option>>>, highest_index: Option, mut inspector_setup: Setup, f: F, @@ -350,7 +348,7 @@ pub trait Trace: // prepare transactions, we do everything upfront to reduce time spent with open // state let max_transactions = - highest_index.map_or(block.body().transactions().len(), |highest| { + highest_index.map_or(block.body().transaction_count(), |highest| { // we need + 1 because the index is 0-based highest as usize + 1 }); @@ -413,7 +411,7 @@ pub trait Trace: fn trace_block_with( &self, block_id: BlockId, - block: Option>>>, + block: Option>>>, config: TracingInspectorConfig, f: F, ) -> impl Future>, Self::Error>> + Send @@ -452,7 +450,7 @@ pub trait Trace: fn trace_block_inspector( &self, block_id: BlockId, - block: Option>>>, + block: Option>>>, insp_setup: Setup, f: F, ) -> impl Future>, Self::Error>> + Send @@ -483,7 +481,7 @@ pub trait Trace: /// already applied. fn apply_pre_execution_changes + DatabaseCommit>( &self, - block: &SealedBlockWithSenders>, + block: &RecoveredBlock>, db: &mut DB, cfg: &CfgEnvWithHandlerCfg, block_env: &BlockEnv, diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 285b12f856a23..6ab585dede7c9 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -14,7 +14,7 @@ use alloy_primitives::{Address, Bytes, TxHash, B256}; use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; use futures::Future; use reth_node_api::BlockBody; -use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, SealedBlockWithSenders}; +use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, RecoveredBlock}; use reth_primitives_traits::SignedTransaction; use reth_provider::{ BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, @@ -320,7 +320,7 @@ pub trait EthTransactions: LoadTransaction { { async move { if let Some(block) = self.block_with_senders(block_id).await? { - if let Some(tx) = block.transactions().get(index) { + if let Some(tx) = block.body().transactions().get(index) { return Ok(Some(tx.encoded_2718().into())) } } @@ -546,7 +546,7 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt { Output = Result< Option<( TransactionSource>, - Arc>>, + Arc>>, )>, Self::Error, >, diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index 271f9d214162e..ff2646cfc5b74 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -7,7 +7,7 @@ use futures::{future::Either, Stream, StreamExt}; use reth_chain_state::CanonStateNotification; use reth_errors::{ProviderError, ProviderResult}; use reth_execution_types::Chain; -use reth_primitives::{NodePrimitives, SealedBlockWithSenders}; +use reth_primitives::{NodePrimitives, RecoveredBlock}; use reth_primitives_traits::{Block, BlockBody}; use reth_storage_api::{BlockReader, StateProviderFactory, TransactionVariant}; use reth_tasks::{TaskSpawner, TokioTaskExecutor}; @@ -29,12 +29,12 @@ pub mod db; pub mod metrics; pub mod multi_consumer; -/// The type that can send the response to a requested [`SealedBlockWithSenders`] +/// The type that can send the response to a requested [`RecoveredBlock`] type BlockTransactionsResponseSender = oneshot::Sender>>>; -/// The type that can send the response to a requested [`SealedBlockWithSenders`] +/// The type that can send the response to a requested [`RecoveredBlock`] type BlockWithSendersResponseSender = - oneshot::Sender>>>>; + oneshot::Sender>>>>; /// The type that can send the response to the requested receipts of a block. type ReceiptsResponseSender = oneshot::Sender>>>>; @@ -44,7 +44,7 @@ type HeaderResponseSender = oneshot::Sender>; type BlockLruCache = MultiConsumerLruCache< B256, - Arc>, + Arc>, L, Either< BlockWithSendersResponseSender, @@ -76,10 +76,7 @@ impl ActionSender { const fn new(blockhash: B256, tx: Option>>) -> Self { Self { blockhash, tx } } - fn send_block( - &mut self, - block_sender: Result>>, ProviderError>, - ) { + fn send_block(&mut self, block_sender: Result>>, ProviderError>) { if let Some(tx) = self.tx.take() { let _ = tx.send(CacheAction::BlockWithSendersResult { block_hash: self.blockhash, @@ -191,13 +188,13 @@ impl EthStateCache { this } - /// Requests the [`SealedBlockWithSenders`] for the block hash + /// Requests the [`RecoveredBlock`] for the block hash /// /// Returns `None` if the block does not exist. pub async fn get_sealed_block_with_senders( &self, block_hash: B256, - ) -> ProviderResult>>> { + ) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -216,7 +213,7 @@ impl EthStateCache { pub async fn get_block_and_receipts( &self, block_hash: B256, - ) -> ProviderResult>, Arc>)>> { + ) -> ProviderResult>, Arc>)>> { let block = self.get_sealed_block_with_senders(block_hash); let receipts = self.get_receipts(block_hash); @@ -260,7 +257,7 @@ pub(crate) struct EthStateCacheService< LimitHeaders = ByLength, > where Provider: BlockReader, - LimitBlocks: Limiter>>, + LimitBlocks: Limiter>>, LimitReceipts: Limiter>>, LimitHeaders: Limiter, { @@ -293,7 +290,7 @@ where fn on_new_block( &mut self, block_hash: B256, - res: ProviderResult>>>, + res: ProviderResult>>>, ) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders @@ -304,7 +301,7 @@ where } Either::Right(transaction_tx) => { let _ = transaction_tx.send(res.clone().map(|maybe_block| { - maybe_block.map(|block| block.block.body().transactions().to_vec()) + maybe_block.map(|block| block.body().transactions().to_vec()) })); } } @@ -338,7 +335,7 @@ where fn on_reorg_block( &mut self, block_hash: B256, - res: ProviderResult>>, + res: ProviderResult>>, ) { let res = res.map(|b| b.map(Arc::new)); if let Some(queued) = self.full_block_cache.remove(&block_hash) { @@ -350,7 +347,7 @@ where } Either::Right(transaction_tx) => { let _ = transaction_tx.send(res.clone().map(|maybe_block| { - maybe_block.map(|block| block.block.body().transactions().to_vec()) + maybe_block.map(|block| block.body().transactions().to_vec()) })); } } @@ -541,36 +538,14 @@ where /// All message variants sent through the channel enum CacheAction { - GetBlockWithSenders { - block_hash: B256, - response_tx: BlockWithSendersResponseSender, - }, - GetHeader { - block_hash: B256, - response_tx: HeaderResponseSender, - }, - GetReceipts { - block_hash: B256, - response_tx: ReceiptsResponseSender, - }, - BlockWithSendersResult { - block_hash: B256, - res: ProviderResult>>>, - }, - ReceiptsResult { - block_hash: B256, - res: ProviderResult>>>, - }, - HeaderResult { - block_hash: B256, - res: Box>, - }, - CacheNewCanonicalChain { - chain_change: ChainChange, - }, - RemoveReorgedChain { - chain_change: ChainChange, - }, + GetBlockWithSenders { block_hash: B256, response_tx: BlockWithSendersResponseSender }, + GetHeader { block_hash: B256, response_tx: HeaderResponseSender }, + GetReceipts { block_hash: B256, response_tx: ReceiptsResponseSender }, + BlockWithSendersResult { block_hash: B256, res: ProviderResult>>> }, + ReceiptsResult { block_hash: B256, res: ProviderResult>>> }, + HeaderResult { block_hash: B256, res: Box> }, + CacheNewCanonicalChain { chain_change: ChainChange }, + RemoveReorgedChain { chain_change: ChainChange }, } struct BlockReceipts { @@ -580,7 +555,7 @@ struct BlockReceipts { /// A change of the canonical chain struct ChainChange { - blocks: Vec>, + blocks: Vec>, receipts: Vec>, } @@ -593,7 +568,7 @@ impl ChainChange { .blocks_and_receipts() .map(|(block, receipts)| { let block_receipts = - BlockReceipts { block_hash: block.block.hash(), receipts: receipts.clone() }; + BlockReceipts { block_hash: block.hash(), receipts: receipts.clone() }; (block.clone(), block_receipts) }) .unzip(); diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index ae509dd2fdb44..d5c6b72f7a4df 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -18,7 +18,7 @@ use metrics::atomics::AtomicU64; use reth_chain_state::CanonStateNotification; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_primitives::{NodePrimitives, SealedBlock}; -use reth_primitives_traits::BlockBody; +use reth_primitives_traits::{Block, BlockBody}; use reth_rpc_server_types::constants::gas_oracle::MAX_HEADER_HISTORY; use reth_storage_api::BlockReaderIdExt; use serde::{Deserialize, Serialize}; @@ -72,12 +72,11 @@ impl FeeHistoryCache { } /// Insert block data into the cache. - async fn insert_blocks<'a, I, H, B, R>(&self, blocks: I) + async fn insert_blocks<'a, I, B, R>(&self, blocks: I) where - H: BlockHeader + 'a, - B: BlockBody, + B: Block + 'a, R: TxReceipt, - I: IntoIterator, Arc>)>, + I: IntoIterator, Arc>)>, { let mut entries = self.inner.entries.write().await; @@ -236,9 +235,9 @@ pub async fn fee_history_cache_new_blocks_task( tokio::select! { res = &mut fetch_missing_block => { if let Ok(res) = res { - fee_history_cache.insert_blocks(res.as_ref() - .map(|(b, r)| (&b.block, r.clone())) - .into_iter()).await; + let res = res.as_ref() + .map(|(b, r)| (b.sealed_block(), r.clone())); + fee_history_cache.insert_blocks(res).await; } } event = events.next() => { @@ -251,10 +250,10 @@ pub async fn fee_history_cache_new_blocks_task( let (blocks, receipts): (Vec<_>, Vec<_>) = committed .blocks_and_receipts() .map(|(block, receipts)| { - (&block.block, Arc::new(receipts.iter().flatten().cloned().collect::>())) + (block.clone_sealed_block(), Arc::new(receipts.iter().flatten().cloned().collect::>())) }) .unzip(); - fee_history_cache.insert_blocks(blocks.into_iter().zip(receipts)).await; + fee_history_cache.insert_blocks(blocks.iter().zip(receipts)).await; // keep track of missing blocks missing_blocks = fee_history_cache.missing_consecutive_blocks().await; @@ -363,22 +362,23 @@ impl FeeHistoryEntry { /// Creates a new entry from a sealed block. /// /// Note: This does not calculate the rewards for the block. - pub fn new(block: &SealedBlock) -> Self { + pub fn new(block: &SealedBlock) -> Self { Self { - base_fee_per_gas: block.base_fee_per_gas().unwrap_or_default(), - gas_used_ratio: block.gas_used() as f64 / block.gas_limit() as f64, + base_fee_per_gas: block.header().base_fee_per_gas().unwrap_or_default(), + gas_used_ratio: block.header().gas_used() as f64 / block.header().gas_limit() as f64, base_fee_per_blob_gas: block + .header() .excess_blob_gas() .map(alloy_eips::eip4844::calc_blob_gasprice), blob_gas_used_ratio: block.body().blob_gas_used() as f64 / alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK as f64, - excess_blob_gas: block.excess_blob_gas(), - blob_gas_used: block.blob_gas_used(), - gas_used: block.gas_used(), + excess_blob_gas: block.header().excess_blob_gas(), + blob_gas_used: block.header().blob_gas_used(), + gas_used: block.header().gas_used(), header_hash: block.hash(), - gas_limit: block.gas_limit(), + gas_limit: block.header().gas_limit(), rewards: Vec::new(), - timestamp: block.timestamp(), + timestamp: block.header().timestamp(), } } diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index dd523a19a0a2f..147159b40d841 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -8,7 +8,7 @@ use alloy_primitives::TxHash; use alloy_rpc_types_eth::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::RecoveredBlock; use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_storage_api::{BlockReader, ProviderBlock}; use std::sync::Arc; @@ -55,8 +55,8 @@ where pub enum ProviderOrBlock<'a, P: BlockReader> { /// Provider Provider(&'a P), - /// [`SealedBlockWithSenders`] - Block(Arc>>), + /// [`RecoveredBlock`] + Block(Arc>>), } /// Appends all matching logs of a block's receipts. diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index ef2a61dd720c3..62d5954cd9a5c 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -8,7 +8,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; -use reth_primitives::{Receipt, SealedBlockWithSenders}; +use reth_primitives::{Receipt, RecoveredBlock}; use reth_primitives_traits::Block; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; @@ -27,7 +27,7 @@ pub struct PendingBlockEnv { #[derive(Clone, Debug)] pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(SealedBlockWithSenders, Vec), + ActualPending(RecoveredBlock, Vec), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: @@ -44,7 +44,7 @@ impl PendingBlockEnvOrigin { } /// Consumes the type and returns the actual pending block. - pub fn into_actual_pending(self) -> Option> { + pub fn into_actual_pending(self) -> Option> { match self { Self::ActualPending(block, _) => Some(block), _ => None, @@ -81,7 +81,7 @@ pub struct PendingBlock { /// Timestamp when the pending block is considered outdated. pub expires_at: Instant, /// The locally built pending block. - pub block: SealedBlockWithSenders, + pub block: RecoveredBlock, /// The receipts for the pending block pub receipts: Vec, } diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index a5d47739815cd..0782d62ac2005 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -7,7 +7,7 @@ use alloy_rpc_types_eth::{ Block, BlockTransactionsKind, Header, }; use jsonrpsee_types::ErrorObject; -use reth_primitives::BlockWithSenders; +use reth_primitives::RecoveredBlock; use reth_primitives_traits::{block::BlockTx, BlockBody as _, SignedTransaction}; use reth_rpc_server_types::result::rpc_err; use reth_rpc_types_compat::{block::from_block, TransactionCompat}; @@ -178,11 +178,11 @@ where calls.push(call); } - let block = BlockWithSenders::new_unchecked(block, senders); + let block = RecoveredBlock::new_unhashed(block, senders); let txs_kind = if full_transactions { BlockTransactionsKind::Full } else { BlockTransactionsKind::Hashes }; - let block = from_block(block, txs_kind, None, tx_resp_builder)?; + let block = from_block(block, txs_kind, tx_resp_builder)?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc-types-compat/src/block.rs b/crates/rpc/rpc-types-compat/src/block.rs index ed97c7f5b40bc..752bf154145a9 100644 --- a/crates/rpc/rpc-types-compat/src/block.rs +++ b/crates/rpc/rpc-types-compat/src/block.rs @@ -1,15 +1,14 @@ //! Compatibility functions for rpc `Block` type. -use alloy_consensus::{BlockHeader, Sealable, Sealed}; +use crate::transaction::TransactionCompat; +use alloy_consensus::{BlockHeader, Sealable}; use alloy_eips::eip4895::Withdrawals; -use alloy_primitives::{B256, U256}; +use alloy_primitives::U256; use alloy_rpc_types_eth::{ Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, }; -use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, BlockWithSenders}; -use reth_primitives_traits::{Block as BlockTrait, BlockBody, SignedTransaction}; - -use crate::transaction::TransactionCompat; +use reth_primitives::{transaction::SignedTransactionIntoRecoveredExt, RecoveredBlock}; +use reth_primitives_traits::{Block as BlockTrait, BlockBody, SealedHeader, SignedTransaction}; /// Converts the given primitive block into a [`Block`] response with the given /// [`BlockTransactionsKind`] @@ -17,9 +16,8 @@ use crate::transaction::TransactionCompat; /// If a `block_hash` is provided, then this is used, otherwise the block hash is computed. #[expect(clippy::type_complexity)] pub fn from_block( - block: BlockWithSenders, + block: RecoveredBlock, kind: BlockTransactionsKind, - block_hash: Option, tx_resp_builder: &T, ) -> Result>, T::Error> where @@ -27,10 +25,8 @@ where B: BlockTrait, { match kind { - BlockTransactionsKind::Hashes => { - Ok(from_block_with_tx_hashes::(block, block_hash)) - } - BlockTransactionsKind::Full => from_block_full::(block, block_hash, tx_resp_builder), + BlockTransactionsKind::Hashes => Ok(from_block_with_tx_hashes::(block)), + BlockTransactionsKind::Full => from_block_full::(block, tx_resp_builder), } } @@ -39,20 +35,17 @@ where /// /// This will populate the `transactions` field with only the hashes of the transactions in the /// block: [`BlockTransactions::Hashes`] -pub fn from_block_with_tx_hashes( - block: BlockWithSenders, - block_hash: Option, -) -> Block> +pub fn from_block_with_tx_hashes(block: RecoveredBlock) -> Block> where B: BlockTrait, { - let block_hash = block_hash.unwrap_or_else(|| block.header().hash_slow()); let transactions = block.body().transaction_hashes_iter().copied().collect(); - - from_block_with_transactions( - block.length(), - block_hash, - block.block, + let rlp_length = block.rlp_length(); + let (header, body) = block.into_sealed_block().split_sealed_header_body(); + from_block_with_transactions::( + rlp_length, + header, + body, BlockTransactions::Hashes(transactions), ) } @@ -64,23 +57,22 @@ where /// [`TransactionCompat::Transaction`] objects: [`BlockTransactions::Full`] #[expect(clippy::type_complexity)] pub fn from_block_full( - block: BlockWithSenders, - block_hash: Option, + block: RecoveredBlock, tx_resp_builder: &T, ) -> Result>, T::Error> where T: TransactionCompat<<::Body as BlockBody>::Transaction>, B: BlockTrait, { - let block_hash = block_hash.unwrap_or_else(|| block.block.header().hash_slow()); - let block_number = block.block.header().number(); - let base_fee_per_gas = block.block.header().base_fee_per_gas(); + let block_number = block.header().number(); + let base_fee_per_gas = block.header().base_fee_per_gas(); // NOTE: we can safely remove the body here because not needed to finalize the `Block` in // `from_block_with_transactions`, however we need to compute the length before - let block_length = block.block.length(); - let transactions = block.block.body().transactions().to_vec(); + let block_length = block.rlp_length(); + let transactions = block.body().transactions().to_vec(); let transactions_with_senders = transactions.into_iter().zip(block.senders_iter().copied()); + let block_hash = Some(block.hash()); let transactions = transactions_with_senders .enumerate() .map(|(idx, (tx, sender))| { @@ -88,7 +80,7 @@ where let signed_tx_ec_recovered = tx.with_signer(sender); let tx_info = TransactionInfo { hash: Some(tx_hash), - block_hash: Some(block_hash), + block_hash, block_number: Some(block_number), base_fee: base_fee_per_gas.map(u128::from), index: Some(idx as u64), @@ -98,10 +90,11 @@ where }) .collect::, T::Error>>()?; - Ok(from_block_with_transactions( + let (header, body) = block.into_sealed_block().split_sealed_header_body(); + Ok(from_block_with_transactions::<_, B>( block_length, - block_hash, - block.block, + header, + body, BlockTransactions::Full(transactions), )) } @@ -109,28 +102,19 @@ where #[inline] fn from_block_with_transactions( block_length: usize, - block_hash: B256, - block: B, + header: SealedHeader, + body: B::Body, transactions: BlockTransactions, ) -> Block> { - let withdrawals = block - .header() + let withdrawals = header .withdrawals_root() .is_some() - .then(|| block.body().withdrawals().cloned().map(Withdrawals::into_inner).map(Into::into)) + .then(|| body.withdrawals().cloned().map(Withdrawals::into_inner).map(Into::into)) .flatten(); - let uncles = block - .body() - .ommers() - .map(|o| o.iter().map(|h| h.hash_slow()).collect()) - .unwrap_or_default(); - let (header, _) = block.split(); - let header = Header::from_consensus( - Sealed::new_unchecked(header, block_hash), - None, - Some(U256::from(block_length)), - ); + let uncles = + body.ommers().map(|o| o.iter().map(|h| h.hash_slow()).collect()).unwrap_or_default(); + let header = Header::from_consensus(header.into(), None, Some(U256::from(block_length))); Block { header, uncles, transactions, withdrawals } } diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index 6645188f31776..0584c30872919 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -1,7 +1,6 @@ //! Standalone Conversion Functions for Handling Different Versions of Execution Payloads in //! Ethereum's Engine -use alloy_consensus::Header; use alloy_eips::{eip2718::Encodable2718, eip4895::Withdrawals, eip7685::RequestsOrHash}; use alloy_primitives::U256; use alloy_rpc_types_engine::{ @@ -9,12 +8,14 @@ use alloy_rpc_types_engine::{ CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, PraguePayloadFields, }; -use reth_primitives::{BlockBody, SealedBlock}; +use reth_primitives::{Block, SealedBlock}; use reth_primitives_traits::{BlockBody as _, SignedTransaction}; -/// Converts [`SealedBlock`] to [`ExecutionPayload`] +/// Converts [`SealedBlock`] to [`ExecutionPayload`]. +/// +/// TODO(mattsse): remove after next alloy bump pub fn block_to_payload( - value: SealedBlock>, + value: SealedBlock>, ) -> (ExecutionPayload, ExecutionPayloadSidecar) { let cancun = value.parent_beacon_block_root.map(|parent_beacon_block_root| CancunPayloadFields { @@ -47,8 +48,8 @@ pub fn block_to_payload( } /// Converts [`SealedBlock`] to [`ExecutionPayloadV1`] -pub fn block_to_payload_v1( - value: SealedBlock>, +pub fn block_to_payload_v1( + value: SealedBlock>, ) -> ExecutionPayloadV1 { let transactions = value.body().transactions.iter().map(|tx| tx.encoded_2718().into()).collect::>(); @@ -71,8 +72,8 @@ pub fn block_to_payload_v1( } /// Converts [`SealedBlock`] to [`ExecutionPayloadV2`] -pub fn block_to_payload_v2( - value: SealedBlock>, +pub fn block_to_payload_v2( + value: SealedBlock>, ) -> ExecutionPayloadV2 { ExecutionPayloadV2 { withdrawals: value.body().withdrawals.clone().unwrap_or_default().into_inner(), @@ -81,8 +82,8 @@ pub fn block_to_payload_v2( } /// Converts [`SealedBlock`] to [`ExecutionPayloadV3`], and returns the parent beacon block root. -pub fn block_to_payload_v3( - value: SealedBlock>, +pub fn block_to_payload_v3( + value: SealedBlock>, ) -> ExecutionPayloadV3 { ExecutionPayloadV3 { blob_gas_used: value.blob_gas_used.unwrap_or_default(), @@ -92,8 +93,8 @@ pub fn block_to_payload_v3( } /// Converts [`SealedBlock`] to [`ExecutionPayloadFieldV2`] -pub fn convert_block_to_payload_field_v2( - value: SealedBlock>, +pub fn convert_block_to_payload_field_v2( + value: SealedBlock>, ) -> ExecutionPayloadFieldV2 { // if there are withdrawals, return V2 if value.body().withdrawals.is_some() { @@ -122,7 +123,8 @@ mod tests { CancunPayloadFields, ExecutionPayload, ExecutionPayloadSidecar, ExecutionPayloadV1, ExecutionPayloadV2, ExecutionPayloadV3, }; - use reth_primitives::{Block, BlockExt, TransactionSigned}; + use reth_primitives::{Block, TransactionSigned}; + use reth_primitives_traits::Block as _; #[test] fn roundtrip_payload_to_block() { diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b88bac816f8bf..ea79f0580dbe7 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -20,7 +20,7 @@ use reth_evm::{ execute::{BlockExecutorProvider, Executor}, ConfigureEvmEnv, }; -use reth_primitives::{BlockExt, NodePrimitives, ReceiptWithBloom, SealedBlockWithSenders}; +use reth_primitives::{NodePrimitives, ReceiptWithBloom, RecoveredBlock}; use reth_primitives_traits::{Block as _, BlockBody, SignedTransaction}; use reth_provider::{ BlockIdReader, BlockReaderIdExt, ChainSpecProvider, HeaderProvider, ProviderBlock, @@ -94,7 +94,7 @@ where /// Trace the entire block asynchronously async fn trace_block( &self, - block: Arc>>, + block: Arc>>, cfg: CfgEnvWithHandlerCfg, block_env: BlockEnv, opts: GethDebugTracingOptions, @@ -192,7 +192,7 @@ where }; self.trace_block( - Arc::new(block.with_senders_unchecked(senders).seal_slow()), + Arc::new(block.with_senders_unchecked(senders)), cfg_env_with_handler_cfg, block_env, opts, @@ -639,7 +639,7 @@ where let mut witness_record = ExecutionWitnessRecord::default(); let _ = block_executor - .execute_with_state_closure(&(*block).clone().unseal(), |statedb: &State<_>| { + .execute_with_state_closure(&(*block).clone(), |statedb: &State<_>| { witness_record.record_executed_state(statedb); }) .map_err(|err| EthApiError::Internal(err.into()))?; diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 337fbb91e06dd..e7337f6e7b234 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -9,7 +9,7 @@ use alloy_rpc_types_eth::{ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::RecoveredBlock; use reth_provider::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock, ProviderError, ProviderReceipt, @@ -546,7 +546,7 @@ where ) -> Result< Option<( Arc>>, - Option>>>, + Option>>>, )>, EthFilterError, > { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index d2faf0dd52e9d..c621c8b9790ca 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -17,8 +17,8 @@ use reth_consensus::{Consensus, FullConsensus, PostExecutionInput}; use reth_engine_primitives::PayloadValidator; use reth_errors::{BlockExecutionError, ConsensusError, ProviderError}; use reth_evm::execute::{BlockExecutorProvider, Executor}; -use reth_primitives::{GotExpected, NodePrimitives, SealedBlockWithSenders, SealedHeader}; -use reth_primitives_traits::{constants::GAS_LIMIT_BOUND_DIVISOR, Block as _, BlockBody}; +use reth_primitives::{GotExpected, NodePrimitives, RecoveredBlock, SealedHeader}; +use reth_primitives_traits::{constants::GAS_LIMIT_BOUND_DIVISOR, BlockBody, SealedBlock}; use reth_provider::{BlockExecutionOutput, BlockReaderIdExt, StateProviderFactory}; use reth_revm::{cached::CachedReads, database::StateProviderDatabase}; use reth_rpc_api::BlockSubmissionValidationApiServer; @@ -98,7 +98,7 @@ where /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( &self, - block: SealedBlockWithSenders<::Block>, + block: RecoveredBlock<::Block>, message: BidTrace, registered_gas_limit: u64, ) -> Result<(), ValidationApiError> { @@ -106,7 +106,7 @@ where self.consensus.validate_header_with_total_difficulty(block.sealed_header(), U256::MAX)?; self.consensus.validate_header(block.sealed_header())?; - self.consensus.validate_block_pre_execution(&block)?; + self.consensus.validate_block_pre_execution(block.sealed_block())?; if !self.disallow.is_empty() { if self.disallow.contains(&block.beneficiary()) { @@ -115,7 +115,7 @@ where if self.disallow.contains(&message.proposer_fee_recipient) { return Err(ValidationApiError::Blacklist(message.proposer_fee_recipient)) } - for (sender, tx) in block.senders_iter().zip(block.transactions()) { + for (sender, tx) in block.senders_iter().zip(block.body().transactions()) { if self.disallow.contains(sender) { return Err(ValidationApiError::Blacklist(*sender)) } @@ -147,7 +147,6 @@ where let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); let executor = self.executor_provider.executor(cached_db); - let block = block.unseal(); let mut accessed_blacklisted = None; let output = executor.execute_with_state_closure(&block, |state| { if !self.disallow.is_empty() { @@ -251,7 +250,7 @@ where /// to checking the latest block transaction. fn ensure_payment( &self, - block: &::Block, + block: &SealedBlock<::Block>, output: &BlockExecutionOutput<::Receipt>, message: &BidTrace, ) -> Result<(), ValidationApiError> { @@ -349,7 +348,7 @@ where versioned_hashes: self.validate_blobs_bundle(request.request.blobs_bundle)?, }), )? - .try_seal_with_senders() + .try_recover() .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; self.validate_message_against_block( @@ -382,7 +381,7 @@ where }, ), )? - .try_seal_with_senders() + .try_recover() .map_err(|_| ValidationApiError::InvalidTransactionSignature)?; self.validate_message_against_block( diff --git a/crates/stages/stages/benches/setup/mod.rs b/crates/stages/stages/benches/setup/mod.rs index d6bf4414450f9..c98dc05cca32a 100644 --- a/crates/stages/stages/benches/setup/mod.rs +++ b/crates/stages/stages/benches/setup/mod.rs @@ -153,8 +153,10 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_second = second_block.clone(); let mut updated_header = cloned_second.header().clone(); updated_header.state_root = root; - *second_block = - SealedBlock::new(SealedHeader::seal(updated_header), cloned_second.into_body()); + *second_block = SealedBlock::from_sealed_parts( + SealedHeader::seal_slow(updated_header), + cloned_second.into_body(), + ); let offset = transitions.len() as u64; @@ -187,7 +189,10 @@ pub(crate) fn txs_testdata(num_blocks: u64) -> TestStageDB { let cloned_last = last_block.clone(); let mut updated_header = cloned_last.header().clone(); updated_header.state_root = root; - *last_block = SealedBlock::new(SealedHeader::seal(updated_header), cloned_last.into_body()); + *last_block = SealedBlock::from_sealed_parts( + SealedHeader::seal_slow(updated_header), + cloned_last.into_body(), + ); db.insert_blocks(blocks.iter(), StorageKind::Static).unwrap(); diff --git a/crates/stages/stages/src/lib.rs b/crates/stages/stages/src/lib.rs index 20c780e24c613..76b88e3918d49 100644 --- a/crates/stages/stages/src/lib.rs +++ b/crates/stages/stages/src/lib.rs @@ -34,7 +34,7 @@ //! # use reth_consensus::test_utils::TestConsensus; //! # //! # let chain_spec = MAINNET.clone(); -//! # let consensus: Arc> = Arc::new(TestConsensus::default()); +//! # let consensus: Arc> = Arc::new(TestConsensus::default()); //! # let headers_downloader = ReverseHeadersDownloaderBuilder::default().build( //! # Arc::new(TestHeadersClient::default()), //! # consensus.clone().as_header_validator() diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 7b8205e25e177..37c84be83a5b2 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -47,6 +47,7 @@ use reth_config::config::StageConfig; use reth_consensus::{Consensus, ConsensusError}; use reth_evm::execute::BlockExecutorProvider; use reth_network_p2p::{bodies::downloader::BodyDownloader, headers::downloader::HeaderDownloader}; +use reth_primitives_traits::Block; use reth_provider::HeaderSyncGapProvider; use reth_prune_types::PruneModes; use reth_stages_api::Stage; @@ -102,7 +103,7 @@ where pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc>, + consensus: Arc>, header_downloader: H, body_downloader: B, executor_factory: E, @@ -185,7 +186,7 @@ where /// The tip for the headers stage. tip: watch::Receiver, /// The consensus engine used to validate incoming data. - consensus: Arc>, + consensus: Arc>, /// The block header downloader header_downloader: H, /// The block body downloader @@ -203,7 +204,7 @@ where pub fn new( provider: Provider, tip: watch::Receiver, - consensus: Arc>, + consensus: Arc>, header_downloader: H, body_downloader: B, stages_config: StageConfig, @@ -215,7 +216,7 @@ where impl OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
::Header> + 'static, B: BodyDownloader + 'static, { /// Create a new builder using the given headers stage. @@ -236,7 +237,7 @@ where provider: P, tip: watch::Receiver, header_downloader: H, - consensus: Arc>, + consensus: Arc>, stages_config: StageConfig, ) -> StageSetBuilder where @@ -258,7 +259,7 @@ where impl StageSet for OnlineStages where P: HeaderSyncGapProvider + 'static, - H: HeaderDownloader + 'static, + H: HeaderDownloader
::Header> + 'static, B: BodyDownloader + 'static, HeaderStage: Stage, BodyStage: Stage, diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 51941183953e8..7c796ec6ad108 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -1,11 +1,9 @@ use super::missing_static_data_error; use futures_util::TryStreamExt; -use reth_codecs::Compact; use reth_db::{tables, transaction::DbTx}; use reth_db_api::{cursor::DbCursorRO, transaction::DbTxMut}; use reth_network_p2p::bodies::{downloader::BodyDownloader, response::BlockResponse}; use reth_primitives::StaticFileSegment; -use reth_primitives_traits::{Block, BlockBody, BlockHeader}; use reth_provider::{ providers::StaticFileWriter, BlockReader, BlockWriter, DBProvider, ProviderError, StaticFileProviderFactory, StatsReader, StorageLocation, @@ -56,7 +54,7 @@ pub struct BodyStage { /// The body downloader. downloader: D, /// Block response buffer. - buffer: Option>>, + buffer: Option>>, } impl BodyStage { @@ -150,8 +148,8 @@ where + StaticFileProviderFactory + StatsReader + BlockReader - + BlockWriter>, - D: BodyDownloader>, + + BlockWriter, + D: BodyDownloader, { /// Return the id of the stage fn id(&self) -> StageId { @@ -762,8 +760,7 @@ mod tests { } impl BodyDownloader for TestBodyDownloader { - type Header = Header; - type Body = BlockBody; + type Block = reth_primitives::Block; fn set_download_range( &mut self, @@ -785,7 +782,7 @@ mod tests { } impl Stream for TestBodyDownloader { - type Item = BodyDownloaderResult; + type Item = BodyDownloaderResult; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -801,7 +798,9 @@ mod tests { } else { let body = this.responses.remove(&header.hash()).expect("requested unknown body"); - response.push(BlockResponse::Full(SealedBlock::new(header, body))); + response.push(BlockResponse::Full(SealedBlock::from_sealed_parts( + header, body, + ))); } if response.len() as u64 >= this.batch_size { diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 77b8a78df1021..5557beda519aa 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -422,11 +422,6 @@ where // Note: Since we only write to `blocks` if there are any ExExes, we don't need to perform // the `has_exexs` check here as well if !blocks.is_empty() { - let blocks = blocks.into_iter().map(|block| { - let hash = block.header().hash_slow(); - block.seal_unchecked(hash) - }); - let previous_input = self.post_execute_commit_input.replace(Chain::new(blocks, state.clone(), None)); @@ -713,18 +708,18 @@ mod tests { let provider = factory.provider_rw().unwrap(); let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); - let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); - let block = SealedBlock::decode(&mut block_rlp).unwrap(); + let block = SealedBlock::::decode(&mut block_rlp).unwrap(); provider .insert_historical_block( genesis - .try_seal_with_senders() + .try_recover() .map_err(|_| BlockValidationError::SenderRecoveryError) .unwrap(), ) .unwrap(); - provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -761,11 +756,11 @@ mod tests { let provider = factory.provider_rw().unwrap(); let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); - let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); - let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); - provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); + let block = SealedBlock::::decode(&mut block_rlp).unwrap(); + provider.insert_historical_block(genesis.try_recover().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -793,7 +788,7 @@ mod tests { total } }) if processed == previous_stage_checkpoint.progress.processed && - total == previous_stage_checkpoint.progress.total + block.gas_used); + total == previous_stage_checkpoint.progress.total + block.gas_used()); } #[test] @@ -802,11 +797,11 @@ mod tests { let provider = factory.provider_rw().unwrap(); let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); - let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); - let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); - provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); + let block = SealedBlock::::decode(&mut block_rlp).unwrap(); + provider.insert_historical_block(genesis.try_recover().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -835,11 +830,11 @@ mod tests { let provider = factory.provider_rw().unwrap(); let input = ExecInput { target: Some(1), checkpoint: None }; let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); - let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); - let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); - provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); + let block = SealedBlock::::decode(&mut block_rlp).unwrap(); + provider.insert_historical_block(genesis.try_recover().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -977,11 +972,11 @@ mod tests { let provider = factory.provider_rw().unwrap(); let input = ExecInput { target: Some(1), checkpoint: None }; let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); - let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); - let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); - provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); + let block = SealedBlock::::decode(&mut block_rlp).unwrap(); + provider.insert_historical_block(genesis.try_recover().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) @@ -1085,11 +1080,11 @@ mod tests { let provider = test_db.factory.database_provider_rw().unwrap(); let input = ExecInput { target: Some(1), checkpoint: None }; let mut genesis_rlp = hex!("f901f8f901f3a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa0c9ceb8372c88cb461724d8d3d87e8b933f6fc5f679d4841800e662f4428ffd0da056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000080830f4240808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); - let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f9025ff901f7a0c86e8cc0310ae7c531c758678ddbfd16fc51c8cef8cec650b032de9869e8b94fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa050554882fbbda2c2fd93fdc466db9946ea262a67f7a76cc169e714f105ab583da00967f09ef1dfed20c0eacfaa94d5cd4002eda3242ac47eae68972d07b106d192a0e3c8b47fbfc94667ef4cceb17e5cc21e3b1eebd442cebb27f07562b33836290db90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302000001830f42408238108203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f862f860800a83061a8094095e7baea6a6c7c4c2dfeb977efac326af552d8780801ba072ed817487b84ba367d15d2f039b5fc5f087d0a8882fbdf73e8cb49357e1ce30a0403d800545b8fc544f92ce8124e2255f8c3c6af93f28243a120585d4c4c6a2a3c0").as_slice(); - let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); - provider.insert_historical_block(block.clone().try_seal_with_senders().unwrap()).unwrap(); + let block = SealedBlock::::decode(&mut block_rlp).unwrap(); + provider.insert_historical_block(genesis.try_recover().unwrap()).unwrap(); + provider.insert_historical_block(block.clone().try_recover().unwrap()).unwrap(); provider .static_file_provider() .latest_writer(StaticFileSegment::Headers) diff --git a/crates/stages/stages/src/stages/hashing_account.rs b/crates/stages/stages/src/stages/hashing_account.rs index 976c775d1ab11..d256883e1618f 100644 --- a/crates/stages/stages/src/stages/hashing_account.rs +++ b/crates/stages/stages/src/stages/hashing_account.rs @@ -64,7 +64,7 @@ impl AccountHashingStage { ) -> Result, StageError> where N::Primitives: reth_primitives_traits::FullNodePrimitives< - BlockBody = reth_primitives::BlockBody, + Block = reth_primitives::Block, BlockHeader = reth_primitives::Header, >, { @@ -85,7 +85,7 @@ impl AccountHashingStage { ); for block in blocks { - provider.insert_historical_block(block.try_seal_with_senders().unwrap()).unwrap(); + provider.insert_historical_block(block.try_recover().unwrap()).unwrap(); } provider .static_file_provider() diff --git a/crates/stages/stages/src/stages/headers.rs b/crates/stages/stages/src/stages/headers.rs index f411060bcca31..cbec9c9ae4c37 100644 --- a/crates/stages/stages/src/stages/headers.rs +++ b/crates/stages/stages/src/stages/headers.rs @@ -407,7 +407,7 @@ mod tests { use alloy_primitives::B256; use assert_matches::assert_matches; use reth_execution_types::ExecutionOutcome; - use reth_primitives::{BlockBody, SealedBlock, SealedBlockWithSenders}; + use reth_primitives::{BlockBody, RecoveredBlock, SealedBlock}; use reth_provider::{BlockWriter, ProviderFactory, StaticFileProviderFactory}; use reth_stages_api::StageUnitCheckpoint; use reth_testing_utils::generators::{self, random_header, random_header_range}; @@ -521,7 +521,7 @@ mod tests { // validate the header let header = provider.header_by_number(block_num)?; assert!(header.is_some()); - let header = SealedHeader::seal(header.unwrap()); + let header = SealedHeader::seal_slow(header.unwrap()); assert_eq!(header.hash(), hash); // validate the header total difficulty @@ -535,7 +535,7 @@ mod tests { } async fn after_execution(&self, headers: Self::Seed) -> Result<(), TestRunnerError> { - self.client.extend(headers.iter().map(|h| h.clone().unseal())).await; + self.client.extend(headers.iter().map(|h| h.clone_header())).await; let tip = if headers.is_empty() { let tip = random_header(&mut generators::rng(), 0, None); self.db.insert_headers(std::iter::once(&tip))?; @@ -610,7 +610,7 @@ mod tests { let headers = runner.seed_execution(input).expect("failed to seed execution"); let rx = runner.execute(input); - runner.client.extend(headers.iter().rev().map(|h| h.clone().unseal())).await; + runner.client.extend(headers.iter().rev().map(|h| h.clone_header())).await; // skip `after_execution` hook for linear downloader let tip = headers.last().unwrap(); @@ -647,11 +647,10 @@ mod tests { let sealed_blocks = sealed_headers .iter() .map(|header| { - SealedBlockWithSenders::new( - SealedBlock::new(header.clone(), BlockBody::default()), + RecoveredBlock::new_sealed( + SealedBlock::from_sealed_parts(header.clone(), BlockBody::default()), vec![], ) - .unwrap() }) .collect(); @@ -693,7 +692,7 @@ mod tests { let headers = runner.seed_execution(input).expect("failed to seed execution"); let rx = runner.execute(input); - runner.client.extend(headers.iter().rev().map(|h| h.clone().unseal())).await; + runner.client.extend(headers.iter().rev().map(|h| h.clone_header())).await; // skip `after_execution` hook for linear downloader let tip = headers.last().unwrap(); diff --git a/crates/stages/stages/src/stages/merkle.rs b/crates/stages/stages/src/stages/merkle.rs index a2b4655835cc1..3d36964a713e0 100644 --- a/crates/stages/stages/src/stages/merkle.rs +++ b/crates/stages/stages/src/stages/merkle.rs @@ -1,5 +1,5 @@ use alloy_consensus::BlockHeader; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::{BlockNumber, Sealable, B256}; use reth_codecs::Compact; use reth_consensus::ConsensusError; use reth_db::tables; @@ -277,7 +277,7 @@ where // Reset the checkpoint self.save_execution_checkpoint(provider, None)?; - validate_state_root(trie_root, SealedHeader::seal(target_block), to_block)?; + validate_state_root(trie_root, SealedHeader::seal_slow(target_block), to_block)?; Ok(ExecOutput { checkpoint: StageCheckpoint::new(to_block) @@ -330,7 +330,7 @@ where .header_by_number(input.unwind_to)? .ok_or_else(|| ProviderError::HeaderNotFound(input.unwind_to.into()))?; - validate_state_root(block_root, SealedHeader::seal(target), input.unwind_to)?; + validate_state_root(block_root, SealedHeader::seal_slow(target), input.unwind_to)?; // Validation passed, apply unwind changes to the database. provider.write_trie_updates(&updates)?; @@ -344,7 +344,7 @@ where /// Check that the computed state root matches the root in the expected header. #[inline] -fn validate_state_root( +fn validate_state_root( got: B256, expected: SealedHeader, target_block: BlockNumber, @@ -525,7 +525,7 @@ mod tests { stage_progress, BlockParams { parent: preblocks.last().map(|b| b.hash()), ..Default::default() }, ) - .split(); + .split_sealed_header_body(); let mut header = header.unseal(); header.state_root = state_root( @@ -534,7 +534,10 @@ mod tests { .into_iter() .map(|(address, account)| (address, (account, std::iter::empty()))), ); - let sealed_head = SealedBlock::new(SealedHeader::seal(header), body); + let sealed_head = SealedBlock::::from_sealed_parts( + SealedHeader::seal_slow(header), + body, + ); let head_hash = sealed_head.hash(); let mut blocks = vec![sealed_head]; @@ -584,8 +587,8 @@ mod tests { let static_file_provider = self.db.factory.static_file_provider(); let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers).unwrap(); - let mut last_header = last_block.header().clone(); - last_header.state_root = root; + let mut last_header = last_block.clone_sealed_header(); + last_header.set_state_root(root); let hash = last_header.hash_slow(); writer.prune_headers(1).unwrap(); diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 33a4d76a11f97..cdc84e77270e3 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -82,13 +82,11 @@ mod tests { let tip = 66; let input = ExecInput { target: Some(tip), checkpoint: None }; let mut genesis_rlp = hex!("f901faf901f5a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa045571b40ae66ca7480791bbb2887286e4e4c4b1b298b191c889d6959023a32eda056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000808502540be400808000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0").as_slice(); - let genesis = SealedBlock::decode(&mut genesis_rlp).unwrap(); + let genesis = SealedBlock::::decode(&mut genesis_rlp).unwrap(); let mut block_rlp = hex!("f90262f901f9a075c371ba45999d87f4542326910a11af515897aebce5265d3f6acd1f1161f82fa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa098f2dcd87c8ae4083e7017a05456c14eea4b1db2032126e27b3b1563d57d7cc0a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba03f4e5c2ec5b2170b711d97ee755c160457bb58d8daa338e835ec02ae6860bbabb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8798203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0").as_slice(); - let block = SealedBlock::decode(&mut block_rlp).unwrap(); - provider_rw.insert_historical_block(genesis.try_seal_with_senders().unwrap()).unwrap(); - provider_rw - .insert_historical_block(block.clone().try_seal_with_senders().unwrap()) - .unwrap(); + let block = SealedBlock::::decode(&mut block_rlp).unwrap(); + provider_rw.insert_historical_block(genesis.try_recover().unwrap()).unwrap(); + provider_rw.insert_historical_block(block.clone().try_recover().unwrap()).unwrap(); // Fill with bogus blocks to respect PruneMode distance. let mut head = block.hash(); @@ -100,7 +98,7 @@ mod tests { generators::BlockParams { parent: Some(head), ..Default::default() }, ); head = nblock.hash(); - provider_rw.insert_historical_block(nblock.try_seal_with_senders().unwrap()).unwrap(); + provider_rw.insert_historical_block(nblock.try_recover().unwrap()).unwrap(); } provider_rw .static_file_provider() diff --git a/crates/stages/stages/src/test_utils/test_db.rs b/crates/stages/stages/src/test_utils/test_db.rs index 59ba08df8aa0a..9f13986d293eb 100644 --- a/crates/stages/stages/src/test_utils/test_db.rs +++ b/crates/stages/stages/src/test_utils/test_db.rs @@ -153,7 +153,7 @@ impl TestStageDB { let segment_header = writer.user_header(); if segment_header.block_end().is_none() && segment_header.expected_block_start() == 0 { for block_number in 0..header.number { - let mut prev = header.clone().unseal(); + let mut prev = header.clone_header(); prev.number = block_number; writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; } diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index ab1608eddeb7a..b3d4610f2f935 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -90,6 +90,7 @@ mdbx = [ ] test-utils = [ "dep:tempfile", + "mdbx", "arbitrary", "parking_lot", "reth-primitives/test-utils", diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 4a338d206c43a..707ce2212a813 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -27,8 +27,8 @@ use reth_evm::{env::EvmEnv, ConfigureEvmEnv}; use reth_execution_types::ExecutionOutcome; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; use reth_primitives::{ - Account, Block, BlockWithSenders, EthPrimitives, NodePrimitives, Receipt, SealedBlock, - SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, TransactionSigned, + Account, Block, EthPrimitives, NodePrimitives, Receipt, RecoveredBlock, SealedBlock, + SealedHeader, StorageEntry, TransactionSigned, }; use reth_primitives_traits::BlockBody; use reth_prune_types::{PruneCheckpoint, PruneSegment}; @@ -296,19 +296,17 @@ impl BlockReader for BlockchainProvider { self.consistent_provider()?.block(id) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { - Ok(self.canonical_in_memory_state.pending_block_with_senders()) + fn pending_block_with_senders(&self) -> ProviderResult>> { + Ok(self.canonical_in_memory_state.pending_recovered_block()) } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -322,7 +320,7 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.block_with_senders(id, transaction_kind) } @@ -330,7 +328,7 @@ impl BlockReader for BlockchainProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_block_with_senders(id, transaction_kind) } @@ -341,14 +339,14 @@ impl BlockReader for BlockchainProvider { fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.consistent_provider()?.sealed_block_with_senders_range(range) } } @@ -794,7 +792,7 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{BlockExt, EthPrimitives, Receipt, SealedBlock, StaticFileSegment}; + use reth_primitives::{EthPrimitives, Receipt, SealedBlock, StaticFileSegment}; use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, @@ -907,7 +905,7 @@ mod tests { } provider_rw.insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), )?; } @@ -1027,7 +1025,7 @@ mod tests { let provider_rw = factory.provider_rw()?; for block in database_blocks { provider_rw.insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), )?; } provider_rw.commit()?; @@ -1069,21 +1067,21 @@ mod tests { // Now the block should be found in memory assert_eq!( provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, - Some(first_in_mem_block.clone().into()) + Some(first_in_mem_block.clone().into_block()) ); assert_eq!( provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, - Some(first_in_mem_block.clone().into()) + Some(first_in_mem_block.clone().into_block()) ); // Find the first block in database by hash assert_eq!( provider.find_block_by_hash(first_db_block.hash(), BlockSource::Any)?, - Some(first_db_block.clone().into()) + Some(first_db_block.clone().into_block()) ); assert_eq!( provider.find_block_by_hash(first_db_block.hash(), BlockSource::Canonical)?, - Some(first_db_block.clone().into()) + Some(first_db_block.clone().into_block()) ); // No pending block in database @@ -1101,7 +1099,7 @@ mod tests { // Now the last block should be found in memory assert_eq!( provider.find_block_by_hash(last_in_mem_block.hash(), BlockSource::Pending)?, - Some(last_in_mem_block.clone().into()) + Some(last_in_mem_block.clone().into_block()) ); Ok(()) @@ -1125,7 +1123,7 @@ mod tests { let provider_rw = factory.provider_rw()?; for block in database_blocks { provider_rw.insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), )?; } provider_rw.commit()?; @@ -1159,21 +1157,21 @@ mod tests { // First in memory block should be found assert_eq!( provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, - Some(first_in_mem_block.clone().into()) + Some(first_in_mem_block.clone().into_block()) ); assert_eq!( provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, - Some(first_in_mem_block.clone().into()) + Some(first_in_mem_block.clone().into_block()) ); // First database block should be found assert_eq!( provider.block(BlockHashOrNumber::Hash(first_db_block.hash()))?, - Some(first_db_block.clone().into()) + Some(first_db_block.clone().into_block()) ); assert_eq!( provider.block(BlockHashOrNumber::Number(first_db_block.number))?, - Some(first_db_block.clone().into()) + Some(first_db_block.clone().into_block()) ); Ok(()) @@ -1211,7 +1209,7 @@ mod tests { assert_eq!( provider.pending_block_with_senders()?, - Some(reth_primitives::SealedBlockWithSenders::new_unchecked( + Some(reth_primitives::RecoveredBlock::new_sealed( block.clone(), block.senders().unwrap() )) @@ -1508,19 +1506,22 @@ mod tests { assert_eq!( provider.block_by_id(block_number.into()).unwrap(), - Some(database_block.clone().unseal()) + Some(database_block.clone().into_block()) + ); + assert_eq!( + provider.block_by_id(block_hash.into()).unwrap(), + Some(database_block.into_block()) ); - assert_eq!(provider.block_by_id(block_hash.into()).unwrap(), Some(database_block.unseal())); let block_number = in_memory_block.number; let block_hash = in_memory_block.hash(); assert_eq!( provider.block_by_id(block_number.into()).unwrap(), - Some(in_memory_block.clone().unseal()) + Some(in_memory_block.clone().into_block()) ); assert_eq!( provider.block_by_id(block_hash.into()).unwrap(), - Some(in_memory_block.unseal()) + Some(in_memory_block.into_block()) ); Ok(()) @@ -1806,7 +1807,7 @@ mod tests { provider_rw.append_blocks_with_state( database_blocks .into_iter() - .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) + .map(|b| b.try_recover().expect("failed to seal block with senders")) .collect(), &ExecutionOutcome { bundle: BundleState::new( @@ -2326,14 +2327,12 @@ mod tests { test_by_block_range!([ (headers_range, |block: &SealedBlock| block.header().clone()), (sealed_headers_range, |block: &SealedBlock| block.clone_sealed_header()), - (block_range, |block: &SealedBlock| block.clone().unseal()), - (block_with_senders_range, |block: &SealedBlock| block - .clone() - .unseal::() - .with_senders_unchecked(vec![])), + (block_range, |block: &SealedBlock| block.clone().into_block()), + (block_with_senders_range, |block: &SealedBlock| block.clone().try_recover().unwrap()), (sealed_block_with_senders_range, |block: &SealedBlock| block .clone() - .with_senders_unchecked(vec![])), + .try_recover() + .unwrap()), (transactions_by_block_range, |block: &SealedBlock| block.body().transactions.clone()), ]); @@ -2494,7 +2493,7 @@ mod tests { block, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( BlockHashOrNumber::Hash(block.hash()), - Some(block.clone().unseal()) + Some(block.clone().into_block()) ), BlockHashOrNumber::Hash(B256::random()) ), @@ -2503,7 +2502,7 @@ mod tests { block, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( BlockHashOrNumber::Number(block.number), - Some(block.clone().unseal()) + Some(block.clone().into_block()) ), BlockHashOrNumber::Number(u64::MAX) ), @@ -2524,7 +2523,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), - block.clone().unseal::().with_recovered_senders() + block.clone().try_recover().ok() ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) ), @@ -2533,7 +2532,7 @@ mod tests { block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), - block.clone().unseal::().with_recovered_senders() + block.clone().try_recover().ok() ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) ), @@ -2542,14 +2541,7 @@ mod tests { sealed_block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), - Some( - block - .clone() - .unseal::() - .with_recovered_senders() - .unwrap() - .seal_unchecked(block.hash()) - ) + block.clone().try_recover().ok() ), (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) ), @@ -2558,14 +2550,7 @@ mod tests { sealed_block_with_senders, |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), - Some( - block - .clone() - .unseal::() - .with_recovered_senders() - .unwrap() - .seal_unchecked(block.hash()) - ) + block.clone().try_recover().ok() ), (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) ), diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 4d6ce309fd5a0..1417be828f8b4 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -21,9 +21,7 @@ use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy}; -use reth_primitives::{ - Account, BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StorageEntry, -}; +use reth_primitives::{Account, RecoveredBlock, SealedBlock, SealedHeader, StorageEntry}; use reth_primitives_traits::BlockBody; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -804,11 +802,11 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block( hash.into(), |db_provider| db_provider.find_block_by_hash(hash, source), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + |block_state| Ok(Some(block_state.block_ref().block().clone_block())), ) } BlockSource::Pending => { - Ok(self.canonical_in_memory_state.pending_block().map(|block| block.unseal())) + Ok(self.canonical_in_memory_state.pending_block().map(|block| block.into_block())) } } } @@ -817,23 +815,21 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block(id), - |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), + |block_state| Ok(Some(block_state.block_ref().block().clone_block())), ) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(self.canonical_in_memory_state.pending_block()) } - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { - Ok(self.canonical_in_memory_state.pending_block_with_senders()) + fn pending_block_with_senders(&self) -> ProviderResult>> { + Ok(self.canonical_in_memory_state.pending_recovered_block()) } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -847,11 +843,11 @@ impl BlockReader for ConsistentProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.block_with_senders())), + |block_state| Ok(Some(block_state.clone_recovered_block())), ) } @@ -859,11 +855,11 @@ impl BlockReader for ConsistentProvider { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.sealed_block_with_senders())), + |block_state| Ok(Some(block_state.clone_recovered_block())), ) } @@ -871,7 +867,7 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_range(range), - |block_state, _| Some(block_state.block_ref().block().clone().unseal()), + |block_state, _| Some(block_state.block_ref().block().clone_block()), |_| true, ) } @@ -879,11 +875,11 @@ impl BlockReader for ConsistentProvider { fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_with_senders_range(range), - |block_state, _| Some(block_state.block_with_senders()), + |block_state, _| Some(block_state.clone_recovered_block()), |_| true, ) } @@ -891,11 +887,11 @@ impl BlockReader for ConsistentProvider { fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), - |block_state, _| Some(block_state.sealed_block_with_senders()), + |block_state, _| Some(block_state.clone_recovered_block()), |_| true, ) } @@ -1271,11 +1267,11 @@ impl BlockReaderIdExt for ConsistentProvider { BlockNumberOrTag::Safe => Ok(self.canonical_in_memory_state.get_safe_header()), BlockNumberOrTag::Earliest => self .header_by_number(0)? - .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal_slow(h)))), BlockNumberOrTag::Pending => Ok(self.canonical_in_memory_state.pending_sealed_header()), BlockNumberOrTag::Number(num) => self .header_by_number(num)? - .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))), + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal_slow(h)))), } } @@ -1285,7 +1281,7 @@ impl BlockReaderIdExt for ConsistentProvider { ) -> ProviderResult>>> { Ok(match id { BlockId::Number(num) => self.sealed_header_by_number_or_tag(num)?, - BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal), + BlockId::Hash(hash) => self.header(&hash.block_hash)?.map(SealedHeader::seal_slow), }) } @@ -1518,7 +1514,7 @@ mod tests { let provider_rw = factory.provider_rw()?; for block in database_blocks { provider_rw.insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), )?; } provider_rw.commit()?; @@ -1567,23 +1563,23 @@ mod tests { // Now the block should be found in memory assert_eq!( consistent_provider.find_block_by_hash(first_in_mem_block.hash(), BlockSource::Any)?, - Some(first_in_mem_block.clone().into()) + Some(first_in_mem_block.clone().into_block()) ); assert_eq!( consistent_provider .find_block_by_hash(first_in_mem_block.hash(), BlockSource::Canonical)?, - Some(first_in_mem_block.clone().into()) + Some(first_in_mem_block.clone().into_block()) ); // Find the first block in database by hash assert_eq!( consistent_provider.find_block_by_hash(first_db_block.hash(), BlockSource::Any)?, - Some(first_db_block.clone().into()) + Some(first_db_block.clone().into_block()) ); assert_eq!( consistent_provider .find_block_by_hash(first_db_block.hash(), BlockSource::Canonical)?, - Some(first_db_block.clone().into()) + Some(first_db_block.clone().into_block()) ); // No pending block in database @@ -1605,7 +1601,7 @@ mod tests { assert_eq!( consistent_provider .find_block_by_hash(last_in_mem_block.hash(), BlockSource::Pending)?, - Some(last_in_mem_block.clone().into()) + Some(last_in_mem_block.clone_block()) ); Ok(()) @@ -1629,7 +1625,7 @@ mod tests { let provider_rw = factory.provider_rw()?; for block in database_blocks { provider_rw.insert_historical_block( - block.clone().seal_with_senders().expect("failed to seal block with senders"), + block.clone().try_recover().expect("failed to seal block with senders"), )?; } provider_rw.commit()?; @@ -1672,21 +1668,21 @@ mod tests { // First in memory block should be found assert_eq!( consistent_provider.block(BlockHashOrNumber::Hash(first_in_mem_block.hash()))?, - Some(first_in_mem_block.clone().into()) + Some(first_in_mem_block.clone().into_block()) ); assert_eq!( consistent_provider.block(BlockHashOrNumber::Number(first_in_mem_block.number))?, - Some(first_in_mem_block.clone().into()) + Some(first_in_mem_block.clone().into_block()) ); // First database block should be found assert_eq!( consistent_provider.block(BlockHashOrNumber::Hash(first_db_block.hash()))?, - Some(first_db_block.clone().into()) + Some(first_db_block.clone().into_block()) ); assert_eq!( consistent_provider.block(BlockHashOrNumber::Number(first_db_block.number))?, - Some(first_db_block.clone().into()) + Some(first_db_block.clone().into_block()) ); Ok(()) @@ -1728,7 +1724,7 @@ mod tests { provider_rw.append_blocks_with_state( database_blocks .into_iter() - .map(|b| b.seal_with_senders().expect("failed to seal block with senders")) + .map(|b| b.try_recover().expect("failed to seal block with senders")) .collect(), &ExecutionOutcome { bundle: BundleState::new( diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 57ec9c0597085..4c781b304b16e 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -16,9 +16,7 @@ use reth_db::{init_db, mdbx::DatabaseArguments, DatabaseEnv}; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_errors::{RethError, RethResult}; use reth_node_types::{BlockTy, HeaderTy, NodeTypesWithDB, ReceiptTy, TxTy}; -use reth_primitives::{ - BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, -}; +use reth_primitives::{RecoveredBlock, SealedBlock, SealedHeader, StaticFileSegment}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -382,19 +380,17 @@ impl BlockReader for ProviderFactory { self.provider()?.block(id) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { self.provider()?.pending_block() } - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { + fn pending_block_with_senders(&self) -> ProviderResult>> { self.provider()?.pending_block_with_senders() } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { self.provider()?.pending_block_and_receipts() } @@ -402,7 +398,7 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.block_with_senders(id, transaction_kind) } @@ -410,7 +406,7 @@ impl BlockReader for ProviderFactory { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders(id, transaction_kind) } @@ -421,14 +417,14 @@ impl BlockReader for ProviderFactory { fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.block_with_senders_range(range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.provider()?.sealed_block_with_senders_range(range) } } @@ -703,10 +699,8 @@ mod tests { { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block( - block.clone().try_seal_with_senders().unwrap(), - StorageLocation::Database - ), + provider + .insert_block(block.clone().try_recover().unwrap(), StorageLocation::Database), Ok(_) ); assert_matches!( @@ -727,10 +721,8 @@ mod tests { }; let provider = factory.with_prune_modes(prune_modes).provider_rw().unwrap(); assert_matches!( - provider.insert_block( - block.clone().try_seal_with_senders().unwrap(), - StorageLocation::Database - ), + provider + .insert_block(block.clone().try_recover().unwrap(), StorageLocation::Database), Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); @@ -751,10 +743,8 @@ mod tests { let provider = factory.provider_rw().unwrap(); assert_matches!( - provider.insert_block( - block.clone().try_seal_with_senders().unwrap(), - StorageLocation::Database - ), + provider + .insert_block(block.clone().try_recover().unwrap(), StorageLocation::Database), Ok(_) ); diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 34713d108ba22..4a9e5affa29d8 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -47,8 +47,8 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives::{ - Account, BlockExt, BlockWithSenders, Bytecode, GotExpected, NodePrimitives, SealedBlock, - SealedBlockFor, SealedBlockWithSenders, SealedHeader, StaticFileSegment, StorageEntry, + Account, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, + StaticFileSegment, StorageEntry, }; use reth_primitives_traits::{Block as _, BlockBody as _, SignedTransaction}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; @@ -424,14 +424,14 @@ impl< /// Inserts an historical block. **Used for setting up test environments** pub fn insert_historical_block( &self, - block: SealedBlockWithSenders<::Block>, + block: RecoveredBlock<::Block>, ) -> ProviderResult { let ttd = if block.number() == 0 { - block.difficulty() + block.header().difficulty() } else { let parent_block_number = block.number() - 1; let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); - parent_ttd + block.difficulty() + parent_ttd + block.header().difficulty() }; let mut writer = self.static_file_provider.latest_writer(StaticFileSegment::Headers)?; @@ -440,7 +440,7 @@ impl< let segment_header = writer.user_header(); if segment_header.block_end().is_none() && segment_header.expected_block_start() == 0 { for block_number in 0..block.number() { - let mut prev = block.clone_sealed_header().unseal(); + let mut prev = block.clone_header(); prev.number = block_number; writer.append_header(&prev, U256::ZERO, &B256::ZERO)?; } @@ -1197,19 +1197,17 @@ impl BlockReader for DatabaseProvid Ok(None) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { + fn pending_block_with_senders(&self) -> ProviderResult>> { Ok(None) } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -1225,7 +1223,7 @@ impl BlockReader for DatabaseProvid &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, @@ -1246,13 +1244,13 @@ impl BlockReader for DatabaseProvid &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.block_with_senders( id, transaction_kind, |block_number| self.sealed_header(block_number), |header, body, senders| { - SealedBlock::new(header, body) + Self::Block::new_sealed(header, body) // Note: we're using unchecked here because we know the block contains valid txs // wrt to its height and can ignore the s value check so pre // EIP-2 txs are allowed @@ -1274,7 +1272,7 @@ impl BlockReader for DatabaseProvid fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.headers_range(range), @@ -1289,13 +1287,14 @@ impl BlockReader for DatabaseProvid fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { self.block_with_senders_range( range, |range| self.sealed_headers_range(range), |header, body, senders| { - SealedBlockWithSenders::new(SealedBlock::new(header, body), senders) - .ok_or(ProviderError::SenderRecoveryError) + Self::Block::new_sealed(header, body) + .try_with_senders(senders) + .map_err(|_| ProviderError::SenderRecoveryError) }, ) } @@ -2749,7 +2748,7 @@ impl BlockWrite /// [`TransactionHashNumbers`](tables::TransactionHashNumbers). fn insert_block( &self, - block: SealedBlockWithSenders, + block: RecoveredBlock, write_to: StorageLocation, ) -> ProviderResult { let block_number = block.number(); @@ -2758,12 +2757,12 @@ impl BlockWrite // total difficulty let ttd = if block_number == 0 { - block.difficulty() + block.header().difficulty() } else { let parent_block_number = block_number - 1; let parent_ttd = self.header_td_by_number(parent_block_number)?.unwrap_or_default(); durations_recorder.record_relative(metrics::Action::GetParentTD); - parent_ttd + block.difficulty() + parent_ttd + block.header().difficulty() }; if write_to.database() { @@ -2796,12 +2795,10 @@ impl BlockWrite durations_recorder.record_relative(metrics::Action::GetNextTxNum); let first_tx_num = next_tx_num; - let tx_count = block.block.body().transactions().len() as u64; + let tx_count = block.body().transaction_count() as u64; // Ensures we have all the senders for the block's transactions. - for (transaction, sender) in - block.block.body().transactions().iter().zip(block.senders_iter()) - { + for (transaction, sender) in block.body().transactions().iter().zip(block.senders_iter()) { let hash = transaction.tx_hash(); if self.prune_modes.sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { @@ -2814,7 +2811,7 @@ impl BlockWrite next_tx_num += 1; } - self.append_block_bodies(vec![(block_number, Some(block.block.into_body()))], write_to)?; + self.append_block_bodies(vec![(block_number, Some(block.into_body()))], write_to)?; debug!( target: "providers::db", @@ -2992,7 +2989,7 @@ impl BlockWrite /// TODO(joshie): this fn should be moved to `UnifiedStorageWriter` eventually fn append_blocks_with_state( &self, - blocks: Vec>, + blocks: Vec>, execution_outcome: &ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index f501d64d435c3..d601098490a46 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -33,8 +33,7 @@ use reth_primitives::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, DEFAULT_BLOCKS_PER_STATIC_FILE, }, - BlockWithSenders, Receipt, SealedBlockFor, SealedBlockWithSenders, SealedHeader, - StaticFileSegment, TransactionSigned, + Receipt, RecoveredBlock, SealedBlock, SealedHeader, StaticFileSegment, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; use reth_stages_types::{PipelineTarget, StageId}; @@ -1621,21 +1620,19 @@ impl> Err(ProviderError::UnsupportedProvider) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { + fn pending_block_with_senders(&self) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1644,7 +1641,7 @@ impl> &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1653,7 +1650,7 @@ impl> &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } @@ -1666,14 +1663,14 @@ impl> fn block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } fn sealed_block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Err(ProviderError::UnsupportedProvider) } } diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 897da8509d594..7d98cf2a34295 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -110,7 +110,7 @@ mod tests { let hash = header.hash(); tx.put::(header.number, hash).unwrap(); - tx.put::(header.number, header.clone().unseal()).unwrap(); + tx.put::(header.number, header.clone_header()).unwrap(); tx.put::(header.number, td.into()).unwrap(); tx.put::(hash, header.number).unwrap(); } diff --git a/crates/storage/provider/src/test_utils/blocks.rs b/crates/storage/provider/src/test_utils/blocks.rs index 44773402450dc..fec124ad12ab4 100644 --- a/crates/storage/provider/src/test_utils/blocks.rs +++ b/crates/storage/provider/src/test_utils/blocks.rs @@ -12,7 +12,7 @@ use reth_db::tables; use reth_db_api::{database::Database, models::StoredBlockBodyIndices}; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, BlockBody, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, + Account, BlockBody, Receipt, RecoveredBlock, SealedBlock, SealedHeader, Transaction, TransactionSigned, TxType, }; use reth_trie::root::{state_root_unhashed, storage_root_unhashed}; @@ -64,7 +64,7 @@ pub fn assert_genesis_block( } pub(crate) static TEST_BLOCK: LazyLock = LazyLock::new(|| { - SealedBlock::new( + SealedBlock::from_sealed_parts( SealedHeader::new( Header { parent_hash: hex!( @@ -127,7 +127,7 @@ pub struct BlockchainTestData { /// Genesis pub genesis: SealedBlock, /// Blocks with its execution result - pub blocks: Vec<(SealedBlockWithSenders, ExecutionOutcome)>, + pub blocks: Vec<(RecoveredBlock, ExecutionOutcome)>, } impl BlockchainTestData { @@ -163,7 +163,7 @@ impl Default for BlockchainTestData { /// Genesis block pub fn genesis() -> SealedBlock { - SealedBlock::new( + SealedBlock::from_sealed_parts( SealedHeader::new( Header { number: 0, difficulty: U256::from(1), ..Default::default() }, B256::ZERO, @@ -192,7 +192,7 @@ fn bundle_state_root(execution_outcome: &ExecutionOutcome) -> B256 { } /// Block one that points to genesis -fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { +fn block1(number: BlockNumber) -> (RecoveredBlock, ExecutionOutcome) { // block changes let account1: Address = [0x60; 20].into(); let account2: Address = [0x61; 20].into(); @@ -232,18 +232,14 @@ fn block1(number: BlockNumber) -> (SealedBlockWithSenders, ExecutionOutcome) { b256!("5d035ccb3e75a9057452ff060b773b213ec1fc353426174068edfc3971a0b6bd") ); - let (header, mut body) = TEST_BLOCK.clone().split(); + let (mut header, mut body) = TEST_BLOCK.clone().split_header_body(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); - let mut header = header.unseal(); header.number = number; header.state_root = state_root; header.parent_hash = B256::ZERO; - let block = SealedBlock::new(SealedHeader::seal(header), body); + let block = SealedBlock::seal_parts(header, body); - ( - SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x30; 20])]), - execution_outcome, - ) + (RecoveredBlock::new_sealed(block, vec![Address::new([0x30; 20])]), execution_outcome) } /// Block two that points to block 1 @@ -251,7 +247,7 @@ fn block2( number: BlockNumber, parent_hash: B256, prev_execution_outcome: &ExecutionOutcome, -) -> (SealedBlockWithSenders, ExecutionOutcome) { +) -> (RecoveredBlock, ExecutionOutcome) { // block changes let account: Address = [0x60; 20].into(); let slot = U256::from(5); @@ -297,20 +293,16 @@ fn block2( b256!("90101a13dd059fa5cca99ed93d1dc23657f63626c5b8f993a2ccbdf7446b64f8") ); - let (header, mut body) = TEST_BLOCK.clone().split(); + let (mut header, mut body) = TEST_BLOCK.clone().split_header_body(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); - let mut header = header.unseal(); header.number = number; header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let block = SealedBlock::new(SealedHeader::seal(header), body); + let block = SealedBlock::seal_parts(header, body); - ( - SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x31; 20])]), - execution_outcome, - ) + (RecoveredBlock::new_sealed(block, vec![Address::new([0x31; 20])]), execution_outcome) } /// Block three that points to block 2 @@ -318,7 +310,7 @@ fn block3( number: BlockNumber, parent_hash: B256, prev_execution_outcome: &ExecutionOutcome, -) -> (SealedBlockWithSenders, ExecutionOutcome) { +) -> (RecoveredBlock, ExecutionOutcome) { let address_range = 1..=20; let slot_range = 1..=100; @@ -365,19 +357,15 @@ fn block3( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split(); + let (mut header, mut body) = TEST_BLOCK.clone().split_header_body(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); - let mut header = header.unseal(); header.number = number; header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let block = SealedBlock::new(SealedHeader::seal(header), body); + let block = SealedBlock::seal_parts(header, body); - ( - SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x31; 20])]), - execution_outcome, - ) + (RecoveredBlock::new_sealed(block, vec![Address::new([0x31; 20])]), execution_outcome) } /// Block four that points to block 3 @@ -385,7 +373,7 @@ fn block4( number: BlockNumber, parent_hash: B256, prev_execution_outcome: &ExecutionOutcome, -) -> (SealedBlockWithSenders, ExecutionOutcome) { +) -> (RecoveredBlock, ExecutionOutcome) { let address_range = 1..=20; let slot_range = 1..=100; @@ -457,19 +445,15 @@ fn block4( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split(); + let (mut header, mut body) = TEST_BLOCK.clone().split_header_body(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); - let mut header = header.unseal(); header.number = number; header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let block = SealedBlock::new(SealedHeader::seal(header), body); + let block = SealedBlock::seal_parts(header, body); - ( - SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x31; 20])]), - execution_outcome, - ) + (RecoveredBlock::new_sealed(block, vec![Address::new([0x31; 20])]), execution_outcome) } /// Block five that points to block 4 @@ -477,7 +461,7 @@ fn block5( number: BlockNumber, parent_hash: B256, prev_execution_outcome: &ExecutionOutcome, -) -> (SealedBlockWithSenders, ExecutionOutcome) { +) -> (RecoveredBlock, ExecutionOutcome) { let address_range = 1..=20; let slot_range = 1..=100; @@ -546,17 +530,13 @@ fn block5( extended.extend(execution_outcome.clone()); let state_root = bundle_state_root(&extended); - let (header, mut body) = TEST_BLOCK.clone().split(); + let (mut header, mut body) = TEST_BLOCK.clone().split_header_body(); body.withdrawals = Some(Withdrawals::new(vec![Withdrawal::default()])); - let mut header = header.unseal(); header.number = number; header.state_root = state_root; // parent_hash points to block1 hash header.parent_hash = parent_hash; - let block = SealedBlock::new(SealedHeader::seal(header), body); + let block = SealedBlock::seal_parts(header, body); - ( - SealedBlockWithSenders::new_unchecked(block, vec![Address::new([0x31; 20])]), - execution_outcome, - ) + (RecoveredBlock::new_sealed(block, vec![Address::new([0x31; 20])]), execution_outcome) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 362bbc32dfab3..8c3b6422e9a47 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -19,8 +19,8 @@ use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypes; use reth_primitives::{ - Account, Block, BlockWithSenders, Bytecode, EthPrimitives, GotExpected, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionSigned, + Account, Block, Bytecode, EthPrimitives, GotExpected, Receipt, RecoveredBlock, SealedBlock, + SealedHeader, TransactionSigned, }; use reth_primitives_traits::SignedTransaction; use reth_stages_types::{StageCheckpoint, StageId}; @@ -220,7 +220,7 @@ impl HeaderProvider for MockEthProvider { } fn sealed_header(&self, number: BlockNumber) -> ProviderResult> { - Ok(self.header_by_number(number)?.map(SealedHeader::seal)) + Ok(self.header_by_number(number)?.map(SealedHeader::seal_slow)) } fn sealed_headers_while( @@ -231,7 +231,7 @@ impl HeaderProvider for MockEthProvider { Ok(self .headers_range(range)? .into_iter() - .map(SealedHeader::seal) + .map(SealedHeader::seal_slow) .take_while(|h| predicate(h)) .collect()) } @@ -497,7 +497,9 @@ impl BlockReader for MockEthProvider { Ok(None) } - fn pending_block_with_senders(&self) -> ProviderResult> { + fn pending_block_with_senders( + &self, + ) -> ProviderResult>> { Ok(None) } @@ -509,7 +511,7 @@ impl BlockReader for MockEthProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { Ok(None) } @@ -517,7 +519,7 @@ impl BlockReader for MockEthProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult> { + ) -> ProviderResult>> { Ok(None) } @@ -534,14 +536,14 @@ impl BlockReader for MockEthProvider { fn block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Ok(vec![]) } fn sealed_block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult> { + ) -> ProviderResult>> { Ok(vec![]) } } @@ -555,7 +557,7 @@ impl BlockReaderIdExt for MockEthProvider { } fn sealed_header_by_id(&self, id: BlockId) -> ProviderResult> { - self.header_by_id(id)?.map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) + self.header_by_id(id)?.map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal_slow(h)))) } fn header_by_id(&self, id: BlockId) -> ProviderResult> { diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index a0b9657e4032c..6116cfdd0434d 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -2,7 +2,7 @@ use alloy_primitives::BlockNumber; use reth_db_api::models::StoredBlockBodyIndices; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_node_types::NodePrimitives; -use reth_primitives::SealedBlockWithSenders; +use reth_primitives::RecoveredBlock; use reth_storage_api::{NodePrimitivesProvider, StorageLocation}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostStateSorted}; @@ -85,7 +85,7 @@ pub trait BlockWriter: Send + Sync { /// written. fn insert_block( &self, - block: SealedBlockWithSenders, + block: RecoveredBlock, write_to: StorageLocation, ) -> ProviderResult; @@ -124,7 +124,7 @@ pub trait BlockWriter: Send + Sync { /// /// # Parameters /// - /// - `blocks`: Vector of `SealedBlockWithSenders` instances to append. + /// - `blocks`: Vector of `RecoveredBlock` instances to append. /// - `state`: Post-state information to update after appending. /// /// # Returns @@ -132,7 +132,7 @@ pub trait BlockWriter: Send + Sync { /// Returns `Ok(())` on success, or an error if any operation fails. fn append_blocks_with_state( &self, - blocks: Vec>, + blocks: Vec>, execution_outcome: &ExecutionOutcome, hashed_state: HashedPostStateSorted, trie_updates: TrieUpdates, diff --git a/crates/storage/provider/src/traits/header_sync_gap.rs b/crates/storage/provider/src/traits/header_sync_gap.rs index b572750d4a23a..f9e628513e49c 100644 --- a/crates/storage/provider/src/traits/header_sync_gap.rs +++ b/crates/storage/provider/src/traits/header_sync_gap.rs @@ -1,6 +1,6 @@ use alloy_consensus::{BlockHeader, Header}; use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; +use alloy_primitives::{BlockNumber, Sealable, B256}; use reth_network_p2p::headers::downloader::SyncTarget; use reth_primitives::SealedHeader; use reth_storage_errors::provider::ProviderResult; @@ -16,7 +16,7 @@ pub struct HeaderSyncGap { pub target: SyncTarget, } -impl HeaderSyncGap { +impl HeaderSyncGap { /// Returns `true` if the gap from the head to the target was closed #[inline] pub fn is_closed(&self) -> bool { diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 5e55042a76c40..23c0f8460aa04 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -4,7 +4,7 @@ use crate::{ }; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; -use reth_primitives::{BlockWithSenders, SealedBlockFor, SealedBlockWithSenders, SealedHeader}; +use reth_primitives::{RecoveredBlock, SealedBlock, SealedHeader}; use reth_storage_errors::provider::ProviderResult; use std::ops::RangeInclusive; @@ -81,23 +81,21 @@ pub trait BlockReader: /// Returns the pending block if available /// - /// Note: This returns a [`SealedBlockFor`] because it's expected that this is sealed by the + /// Note: This returns a [`SealedBlock`] because it's expected that this is sealed by the /// provider and the caller does not know the hash. - fn pending_block(&self) -> ProviderResult>>; + fn pending_block(&self) -> ProviderResult>>; /// Returns the pending block if available /// - /// Note: This returns a [`SealedBlockWithSenders`] because it's expected that this is sealed by + /// Note: This returns a [`RecoveredBlock`] because it's expected that this is sealed by /// the provider and the caller does not know the hash. - fn pending_block_with_senders( - &self, - ) -> ProviderResult>>; + fn pending_block_with_senders(&self) -> ProviderResult>>; /// Returns the pending block and receipts if available. #[allow(clippy::type_complexity)] fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>>; + ) -> ProviderResult, Vec)>>; /// Returns the block with matching hash from the database. /// @@ -122,7 +120,7 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Returns the sealed block with senders with matching number or hash from database. /// @@ -133,7 +131,7 @@ pub trait BlockReader: &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Returns all blocks in the given inclusive range. /// @@ -145,14 +143,14 @@ pub trait BlockReader: fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>>; + ) -> ProviderResult>>; /// Returns a range of sealed blocks from the database, along with the senders of each /// transaction in the blocks. fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>>; + ) -> ProviderResult>>; } impl BlockReader for std::sync::Arc { @@ -168,17 +166,15 @@ impl BlockReader for std::sync::Arc { fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { T::block(self, id) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { T::pending_block(self) } - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { + fn pending_block_with_senders(&self) -> ProviderResult>> { T::pending_block_with_senders(self) } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { @@ -191,14 +187,14 @@ impl BlockReader for std::sync::Arc { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { T::block_with_senders(self, id, transaction_kind) } fn sealed_block_with_senders( &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { T::sealed_block_with_senders(self, id, transaction_kind) } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { @@ -207,13 +203,13 @@ impl BlockReader for std::sync::Arc { fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { T::block_with_senders_range(self, range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { T::sealed_block_with_senders_range(self, range) } } @@ -231,17 +227,15 @@ impl BlockReader for &T { fn block(&self, id: BlockHashOrNumber) -> ProviderResult> { T::block(self, id) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { T::pending_block(self) } - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { + fn pending_block_with_senders(&self) -> ProviderResult>> { T::pending_block_with_senders(self) } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { @@ -254,14 +248,14 @@ impl BlockReader for &T { &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { T::block_with_senders(self, id, transaction_kind) } fn sealed_block_with_senders( &self, id: BlockHashOrNumber, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { T::sealed_block_with_senders(self, id, transaction_kind) } fn block_range(&self, range: RangeInclusive) -> ProviderResult> { @@ -270,13 +264,13 @@ impl BlockReader for &T { fn block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { T::block_with_senders_range(self, range) } fn sealed_block_with_senders_range( &self, range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { T::sealed_block_with_senders_range(self, range) } } @@ -345,7 +339,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { &self, id: BlockId, transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { match id { BlockId::Hash(hash) => { self.block_with_senders(hash.block_hash.into(), transaction_kind) @@ -377,7 +371,7 @@ pub trait BlockReaderIdExt: BlockReader + ReceiptProviderIdExt { ) -> ProviderResult>> { self.convert_block_number(id)? .map_or_else(|| Ok(None), |num| self.header_by_hash_or_number(num.into()))? - .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal(h)))) + .map_or_else(|| Ok(None), |h| Ok(Some(SealedHeader::seal_slow(h)))) } /// Returns the sealed header with the matching `BlockId` from the database. diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 3e6ed7dbd52c1..a88fbbdd9ef06 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -16,7 +16,7 @@ use alloy_primitives::{ }; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; -use reth_primitives::{BlockWithSenders, EthPrimitives, SealedBlockFor, SealedBlockWithSenders}; +use reth_primitives::{EthPrimitives, RecoveredBlock, SealedBlock}; use reth_primitives_traits::{Account, Bytecode, NodePrimitives, SealedHeader}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; @@ -165,19 +165,17 @@ impl BlockReader for NoopProvider { Ok(None) } - fn pending_block(&self) -> ProviderResult>> { + fn pending_block(&self) -> ProviderResult>> { Ok(None) } - fn pending_block_with_senders( - &self, - ) -> ProviderResult>> { + fn pending_block_with_senders(&self) -> ProviderResult>> { Ok(None) } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } @@ -185,7 +183,7 @@ impl BlockReader for NoopProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(None) } @@ -193,7 +191,7 @@ impl BlockReader for NoopProvider { &self, _id: BlockHashOrNumber, _transaction_kind: TransactionVariant, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(None) } @@ -204,14 +202,14 @@ impl BlockReader for NoopProvider { fn block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(vec![]) } fn sealed_block_with_senders_range( &self, _range: RangeInclusive, - ) -> ProviderResult>> { + ) -> ProviderResult>> { Ok(vec![]) } } diff --git a/crates/transaction-pool/src/blobstore/tracker.rs b/crates/transaction-pool/src/blobstore/tracker.rs index de51b87c825fc..fee9a13eb8f99 100644 --- a/crates/transaction-pool/src/blobstore/tracker.rs +++ b/crates/transaction-pool/src/blobstore/tracker.rs @@ -92,8 +92,7 @@ mod tests { use alloy_primitives::PrimitiveSignature as Signature; use reth_execution_types::Chain; use reth_primitives::{ - BlockBody, SealedBlock, SealedBlockWithSenders, SealedHeader, Transaction, - TransactionSigned, + BlockBody, RecoveredBlock, SealedBlock, SealedHeader, Transaction, TransactionSigned, }; use super::*; @@ -127,8 +126,8 @@ mod tests { let tx3_hash = B256::random(); // Non-EIP-4844 transaction // Creating a first block with EIP-4844 transactions - let block1 = SealedBlockWithSenders::new_unchecked( - SealedBlock::new( + let block1 = RecoveredBlock::new_sealed( + SealedBlock::from_sealed_parts( SealedHeader::new(Header { number: 10, ..Default::default() }, B256::random()), BlockBody { transactions: vec![ @@ -157,8 +156,8 @@ mod tests { // Creating a second block with EIP-1559 and EIP-2930 transactions // Note: This block does not contain any EIP-4844 transactions - let block2 = SealedBlockWithSenders::new_unchecked( - SealedBlock::new( + let block2 = RecoveredBlock::new_sealed( + SealedBlock::from_sealed_parts( SealedHeader::new(Header { number: 11, ..Default::default() }, B256::random()), BlockBody { transactions: vec![ diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 56fe99421d5a4..c85d84a80ab79 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -177,7 +177,7 @@ use aquamarine as _; use reth_eth_wire_types::HandleMempoolData; use reth_execution_types::ChangedAccount; use reth_primitives::RecoveredTx; -use reth_primitives_traits::{BlockBody, BlockHeader}; +use reth_primitives_traits::Block; use reth_storage_api::StateProviderFactory; use std::{collections::HashSet, sync::Arc}; use tokio::sync::mpsc::Receiver; @@ -614,10 +614,9 @@ where self.pool.set_block_info(info) } - fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, H, B>) + fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, B>) where - H: BlockHeader, - B: BlockBody, + B: Block, { self.pool.on_canonical_state_change(update); } diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index 583b291685d5d..af8e8da33c682 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -108,7 +108,7 @@ pub async fn maintain_transaction_pool( let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config; // ensure the pool points to latest state if let Ok(Some(latest)) = client.header_by_number_or_tag(BlockNumberOrTag::Latest) { - let latest = SealedHeader::seal(latest); + let latest = SealedHeader::seal_slow(latest); let chain_spec = client.chain_spec(); let info = BlockInfo { block_gas_limit: latest.gas_limit(), @@ -272,11 +272,13 @@ pub async fn maintain_transaction_pool( // fees for the next block: `new_tip+1` let pending_block_base_fee = new_tip + .header() .next_block_base_fee( chain_spec.base_fee_params_at_timestamp(new_tip.timestamp() + 12), ) .unwrap_or_default(); - let pending_block_blob_fee = new_tip.next_block_blob_fee(BlobParams::cancun()); + let pending_block_blob_fee = + new_tip.header().next_block_blob_fee(BlobParams::cancun()); // we know all changed account in the new chain let new_changed_accounts: HashSet<_> = @@ -346,7 +348,7 @@ pub async fn maintain_transaction_pool( // update the pool first let update = CanonicalStateUpdate { - new_tip: &new_tip.block, + new_tip: new_tip.sealed_block(), pending_block_base_fee, pending_block_blob_fee, changed_accounts, @@ -375,11 +377,12 @@ pub async fn maintain_transaction_pool( // fees for the next block: `tip+1` let pending_block_base_fee = tip + .header() .next_block_base_fee( chain_spec.base_fee_params_at_timestamp(tip.timestamp() + 12), ) .unwrap_or_default(); - let pending_block_blob_fee = tip.next_block_blob_fee(BlobParams::cancun()); + let pending_block_blob_fee = tip.header().next_block_blob_fee(BlobParams::cancun()); let first_block = blocks.first(); trace!( @@ -397,7 +400,7 @@ pub async fn maintain_transaction_pool( maintained_state = MaintainedPoolState::Drifted; debug!(target: "txpool", ?depth, "skipping deep canonical update"); let info = BlockInfo { - block_gas_limit: tip.gas_limit(), + block_gas_limit: tip.header().gas_limit(), last_seen_block_hash: tip.hash(), last_seen_block_number: tip.number(), pending_basefee: pending_block_base_fee, @@ -430,7 +433,7 @@ pub async fn maintain_transaction_pool( // Canonical update let update = CanonicalStateUpdate { - new_tip: &tip.block, + new_tip: tip.sealed_block(), pending_block_base_fee, pending_block_blob_fee, changed_accounts, diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index a7c16d05aa8e5..983a86306c015 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -109,7 +109,7 @@ pub use events::{FullTransactionEvent, TransactionEvent}; pub use listener::{AllTransactionsEvents, TransactionEvents}; pub use parked::{BasefeeOrd, ParkedOrd, ParkedPool, QueuedOrd}; pub use pending::PendingPool; -use reth_primitives_traits::{BlockBody, BlockHeader}; +use reth_primitives_traits::Block; mod best; mod blob; @@ -379,10 +379,9 @@ where } /// Updates the entire pool after a new block was executed. - pub fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, H, B>) + pub fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, B>) where - H: BlockHeader, - B: BlockBody, + B: Block, { trace!(target: "txpool", ?update, "updating pool on canonical state change"); diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 862ecc15a7bf4..f7e2f310e8bf5 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -23,7 +23,7 @@ use reth_primitives::{ transaction::{SignedTransactionIntoRecoveredExt, TryFromRecoveredTransactionError}, PooledTransaction, RecoveredTx, SealedBlock, Transaction, TransactionSigned, }; -use reth_primitives_traits::{BlockBody, SignedTransaction}; +use reth_primitives_traits::{Block, SignedTransaction}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use std::{ @@ -518,10 +518,9 @@ pub trait TransactionPoolExt: TransactionPool { /// sidecar must not be removed from the blob store. Only after a blob transaction is /// finalized, its sidecar is removed from the blob store. This ensures that in case of a reorg, /// the sidecar is still available. - fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, H, B>) + fn on_canonical_state_change(&self, update: CanonicalStateUpdate<'_, B>) where - H: reth_primitives_traits::BlockHeader, - B: BlockBody; + B: Block; /// Updates the accounts in the pool fn update_accounts(&self, accounts: Vec); @@ -721,9 +720,9 @@ pub enum PoolUpdateKind { /// /// This is used to update the pool state accordingly. #[derive(Clone, Debug)] -pub struct CanonicalStateUpdate<'a, H, B> { +pub struct CanonicalStateUpdate<'a, B: Block> { /// Hash of the tip block. - pub new_tip: &'a SealedBlock, + pub new_tip: &'a SealedBlock, /// EIP-1559 Base fee of the _next_ (pending) block /// /// The base fee of a block depends on the utilization of the last block and its base fee. @@ -740,9 +739,9 @@ pub struct CanonicalStateUpdate<'a, H, B> { pub update_kind: PoolUpdateKind, } -impl CanonicalStateUpdate<'_, H, B> +impl CanonicalStateUpdate<'_, B> where - H: BlockHeader, + B: Block, { /// Returns the number of the tip block. pub fn number(&self) -> u64 { @@ -750,7 +749,7 @@ where } /// Returns the hash of the tip block. - pub const fn hash(&self) -> B256 { + pub fn hash(&self) -> B256 { self.new_tip.hash() } @@ -771,9 +770,9 @@ where } } -impl fmt::Display for CanonicalStateUpdate<'_, H, B> +impl fmt::Display for CanonicalStateUpdate<'_, B> where - H: BlockHeader, + B: Block, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("CanonicalStateUpdate") diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index f6733a5c1aa92..ba5b38c484496 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -24,7 +24,7 @@ use alloy_eips::{ }; use reth_chainspec::{ChainSpec, EthereumHardforks}; use reth_primitives::{InvalidTransactionError, SealedBlock}; -use reth_primitives_traits::{BlockBody, GotExpected}; +use reth_primitives_traits::{Block, GotExpected}; use reth_storage_api::{StateProvider, StateProviderFactory}; use reth_tasks::TaskSpawner; use std::{ @@ -106,10 +106,9 @@ where self.validate_all(transactions) } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) + fn on_new_head_block(&self, new_tip_block: &SealedBlock) where - H: reth_primitives_traits::BlockHeader, - B: BlockBody, + B: Block, { self.inner.on_new_head_block(new_tip_block.header()) } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index cb8f98660c65e..a567bdbe5863a 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -26,7 +26,7 @@ pub use task::{TransactionValidationTaskExecutor, ValidationTask}; pub use constants::{ DEFAULT_MAX_TX_INPUT_BYTES, MAX_CODE_BYTE_SIZE, MAX_INIT_CODE_BYTE_SIZE, TX_SLOT_BYTE_SIZE, }; -use reth_primitives_traits::{BlockBody, BlockHeader}; +use reth_primitives_traits::Block; /// A Result type returned after checking a transaction's validity. #[derive(Debug)] @@ -207,10 +207,9 @@ pub trait TransactionValidator: Send + Sync { /// Invoked when the head block changes. /// /// This can be used to update fork specific values (timestamp). - fn on_new_head_block(&self, _new_tip_block: &SealedBlock) + fn on_new_head_block(&self, _new_tip_block: &SealedBlock) where - H: BlockHeader, - B: BlockBody, + B: Block, { } } @@ -243,10 +242,9 @@ where } } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) + fn on_new_head_block(&self, new_tip_block: &SealedBlock) where - H: BlockHeader, - Body: BlockBody, + Bl: Block, { match self { Self::Left(v) => v.on_new_head_block(new_tip_block), diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 766752ef4bb93..22cc84bd9df16 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -9,7 +9,7 @@ use crate::{ use futures_util::{lock::Mutex, StreamExt}; use reth_chainspec::ChainSpec; use reth_primitives::SealedBlock; -use reth_primitives_traits::{BlockBody, BlockHeader}; +use reth_primitives_traits::Block; use reth_tasks::TaskSpawner; use std::{future::Future, pin::Pin, sync::Arc}; use tokio::{ @@ -206,10 +206,9 @@ where } } - fn on_new_head_block(&self, new_tip_block: &SealedBlock) + fn on_new_head_block(&self, new_tip_block: &SealedBlock) where - H: BlockHeader, - B: BlockBody, + B: Block, { self.validator.on_new_head_block(new_tip_block) } diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index c7f0083fe36be..32a4e910ae9dc 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,6 +1,6 @@ use crate::BeaconSidecarConfig; use alloy_consensus::{ - transaction::PooledTransaction, Signed, Transaction as _, TxEip4844WithSidecar, + transaction::PooledTransaction, BlockHeader, Signed, Transaction as _, TxEip4844WithSidecar, }; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; @@ -8,7 +8,7 @@ use eyre::Result; use futures_util::{stream::FuturesUnordered, Future, Stream, StreamExt}; use reqwest::{Error, StatusCode}; use reth::{ - primitives::SealedBlockWithSenders, + primitives::RecoveredBlock, providers::CanonStateNotification, transaction_pool::{BlobStoreError, TransactionPoolExt}, }; @@ -97,10 +97,10 @@ where St: Stream + Send + Unpin + 'static, P: TransactionPoolExt + Unpin + 'static, { - fn process_block(&mut self, block: &SealedBlockWithSenders) { + fn process_block(&mut self, block: &RecoveredBlock) { let txs: Vec<_> = block + .body() .transactions() - .iter() .filter(|tx| tx.is_eip4844()) .map(|tx| (tx.clone(), tx.blob_versioned_hashes().unwrap().len())) .collect(); @@ -195,17 +195,15 @@ where // handle reorged blocks for (_, block) in old.blocks().iter() { let txs: Vec = block + .body() .transactions() - .iter() - .filter(|tx: &&reth::primitives::TransactionSigned| { - tx.is_eip4844() - }) + .filter(|tx| tx.is_eip4844()) .map(|tx| { let transaction_hash = tx.hash(); let block_metadata = BlockMetadata { - block_hash: new.tip().block.hash(), - block_number: new.tip().block.number, - gas_used: new.tip().block.gas_used, + block_hash: new.tip().hash(), + block_number: new.tip().number(), + gas_used: new.tip().gas_used(), }; BlobTransactionEvent::Reorged(ReorgedBlob { transaction_hash, @@ -231,7 +229,7 @@ where async fn fetch_blobs_for_block( client: reqwest::Client, url: String, - block: SealedBlockWithSenders, + block: RecoveredBlock, txs: Vec<(reth::primitives::TransactionSigned, usize)>, ) -> Result, SideCarError> { let response = match client.get(url).header("Accept", "application/json").send().await { diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 7d20b298f7caf..7bb8a77d25982 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -3,6 +3,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +use alloy_consensus::BlockHeader; use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; use alloy_sol_macro::sol; use alloy_sol_types::SolCall; @@ -26,7 +27,7 @@ use reth_evm::execute::{ }; use reth_evm_ethereum::EthEvmConfig; use reth_node_ethereum::{node::EthereumAddOns, BasicBlockExecutorProvider, EthereumNode}; -use reth_primitives::{BlockWithSenders, EthPrimitives, Receipt}; +use reth_primitives::{EthPrimitives, Receipt, RecoveredBlock}; use std::{fmt::Display, sync::Arc}; pub const SYSTEM_ADDRESS: Address = address!("fffffffffffffffffffffffffffffffffffffffe"); @@ -125,10 +126,13 @@ where type Primitives = EthPrimitives; type Error = BlockExecutionError; - fn apply_pre_execution_changes(&mut self, block: &BlockWithSenders) -> Result<(), Self::Error> { + fn apply_pre_execution_changes( + &mut self, + block: &RecoveredBlock, + ) -> Result<(), Self::Error> { // Set state clear flag if the block is after the Spurious Dragon hardfork. let state_clear_flag = - (*self.chain_spec).is_spurious_dragon_active_at_block(block.header.number); + (*self.chain_spec).is_spurious_dragon_active_at_block(block.number()); self.state.set_state_clear_flag(state_clear_flag); Ok(()) @@ -136,19 +140,19 @@ where fn execute_transactions( &mut self, - _block: &BlockWithSenders, + _block: &RecoveredBlock, ) -> Result, Self::Error> { Ok(ExecuteOutput { receipts: vec![], gas_used: 0 }) } fn apply_post_execution_changes( &mut self, - block: &BlockWithSenders, + block: &RecoveredBlock, _receipts: &[Receipt], ) -> Result { - let mut evm = self.evm_config.evm_for_block(&mut self.state, &block.header); + let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); - if let Some(withdrawals) = block.body.withdrawals.as_ref() { + if let Some(withdrawals) = block.body().withdrawals.as_ref() { apply_withdrawals_contract_call(withdrawals, &mut evm)?; } diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index 42bb83782aa3e..7688c1ce2be67 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -50,7 +50,7 @@ async fn main() -> eyre::Result<()> { let head = notifications.next().await.unwrap(); - let tx = &head.tip().transactions()[0]; + let tx = &head.tip().body().transactions().next().unwrap(); assert_eq!(tx.hash(), hash); println!("mined transaction: {hash}"); Ok(()) diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index ce25eedaacca8..2de7d3bf5545b 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -38,7 +38,7 @@ use reth::{ }, network::NetworkHandle, payload::ExecutionPayloadValidator, - primitives::{Block, EthPrimitives, SealedBlockFor, TransactionSigned}, + primitives::{Block, EthPrimitives, SealedBlock, TransactionSigned}, providers::{CanonStateSubscriptions, EthStorage, StateProviderFactory}, rpc::{ compat::engine::payload::block_to_payload, @@ -177,7 +177,7 @@ impl EngineTypes for CustomEngineTypes { type ExecutionPayloadEnvelopeV4 = ExecutionPayloadEnvelopeV4; fn block_to_payload( - block: SealedBlockFor< + block: SealedBlock< <::Primitives as reth_node_api::NodePrimitives>::Block, >, ) -> (ExecutionPayload, ExecutionPayloadSidecar) { @@ -211,7 +211,7 @@ impl PayloadValidator for CustomEngineValidator { &self, payload: ExecutionPayload, sidecar: ExecutionPayloadSidecar, - ) -> Result, PayloadError> { + ) -> Result, PayloadError> { self.inner.ensure_well_formed_payload(payload, sidecar) } } diff --git a/examples/custom-payload-builder/src/generator.rs b/examples/custom-payload-builder/src/generator.rs index 6620170fe897c..76222e0ccde52 100644 --- a/examples/custom-payload-builder/src/generator.rs +++ b/examples/custom-payload-builder/src/generator.rs @@ -1,6 +1,7 @@ use crate::job::EmptyBlockPayloadJob; use alloy_eips::BlockNumberOrTag; use reth::{ + api::Block, providers::{BlockReaderIdExt, BlockSource, StateProviderFactory}, tasks::TaskSpawner, transaction_pool::TransactionPool, @@ -8,7 +9,7 @@ use reth::{ use reth_basic_payload_builder::{BasicPayloadJobGeneratorConfig, PayloadBuilder, PayloadConfig}; use reth_node_api::PayloadBuilderAttributes; use reth_payload_builder::{PayloadBuilderError, PayloadJobGenerator}; -use reth_primitives::{BlockExt, SealedHeader}; +use reth_primitives::SealedHeader; use std::sync::Arc; /// The generator type that creates new jobs that builds empty blocks. diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index 4c4aaa25704e6..bda1ea26cdb8a 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -5,7 +5,7 @@ use reth_chainspec::ChainSpecBuilder; use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; -use reth_primitives::{BlockExt, SealedHeader, TransactionSigned}; +use reth_primitives::{SealedBlock, SealedHeader, TransactionSigned}; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, @@ -64,7 +64,7 @@ fn header_provider_example(provider: T, number: u64) -> eyre: // We can convert a header to a sealed header which contains the hash w/o needing to re-compute // it every time. - let sealed_header = SealedHeader::seal(header); + let sealed_header = SealedHeader::seal_slow(header); // Can also query the header by hash! let header_by_hash = @@ -134,7 +134,7 @@ fn block_provider_example>( let block = provider.block(number.into())?.ok_or(eyre::eyre!("block num not found"))?; // Can seal the block to cache the hash, like the Header above. - let sealed_block = block.clone().seal_slow(); + let sealed_block = SealedBlock::seal_slow(block.clone()); // Can also query the block by hash directly let block_by_hash = provider diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 7d80ec6c47fa3..a3c168cb54dea 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -91,11 +91,11 @@ impl Case for BlockchainTestCase { // Insert initial test state into the provider. provider.insert_historical_block( - SealedBlock::new( + SealedBlock::::from_sealed_parts( case.genesis_block_header.clone().into(), BlockBody::default(), ) - .try_seal_with_senders() + .try_recover() .unwrap(), )?; case.pre.write_to_db(provider.tx_ref())?; @@ -111,10 +111,9 @@ impl Case for BlockchainTestCase { // Decode and insert blocks, creating a chain of blocks for the test case. let last_block = case.blocks.iter().try_fold(None, |_, block| { - let decoded = SealedBlock::decode(&mut block.rlp.as_ref())?; - provider.insert_historical_block( - decoded.clone().try_seal_with_senders().unwrap(), - )?; + let decoded = + SealedBlock::::decode(&mut block.rlp.as_ref())?; + provider.insert_historical_block(decoded.clone().try_recover().unwrap())?; Ok::, Error>(Some(decoded)) })?; provider diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 9cf6515eb6ce0..307ef1cda32b9 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,6 +1,6 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. -use alloy_consensus::{Header, Transaction as _, TxLegacy}; +use alloy_consensus::{Block, Header, Transaction as _, TxLegacy}; use alloy_eips::{ eip1898::BlockWithParent, eip4895::{Withdrawal, Withdrawals}, @@ -16,7 +16,7 @@ use reth_primitives::{ TransactionSigned, }; -use reth_primitives_traits::crypto::secp256k1::sign_message; +use reth_primitives_traits::{crypto::secp256k1::sign_message, Block as _}; use secp256k1::{Keypair, Secp256k1}; use std::{ cmp::{max, min}, @@ -125,7 +125,7 @@ pub fn random_header(rng: &mut R, number: u64, parent: Option) -> parent_hash: parent.unwrap_or_default(), ..Default::default() }; - SealedHeader::seal(header) + SealedHeader::seal_slow(header) } /// Generates a random legacy [Transaction]. @@ -234,10 +234,11 @@ pub fn random_block(rng: &mut R, number: u64, block_params: BlockParams) ..Default::default() }; - SealedBlock::new( - SealedHeader::seal(header), - BlockBody { transactions, ommers, withdrawals: withdrawals.map(Withdrawals::new) }, - ) + Block { + header, + body: BlockBody { transactions, ommers, withdrawals: withdrawals.map(Withdrawals::new) }, + } + .seal_slow() } /// Generate a range of random blocks. From 4b8714d61a8dc1ca6d0573ac6f02920891dde68d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 15 Jan 2025 03:04:06 +0100 Subject: [PATCH 073/113] fix: correct trusted peer excemptions (#13801) --- crates/net/network/src/peers.rs | 91 ++++++++++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index f8d18e1599460..a7e981b5890d7 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -252,10 +252,10 @@ impl PeersManager { // we still want to limit concurrent pending connections let max_inbound = self.trusted_peer_ids.len().max(self.connection_info.config.max_inbound); - if self.connection_info.num_pending_in <= max_inbound { + if self.connection_info.num_pending_in < max_inbound { self.connection_info.inc_pending_in(); + return Ok(()) } - return Ok(()) } // all trusted peers are either connected or connecting @@ -1659,6 +1659,93 @@ mod tests { assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_err()); } + #[tokio::test] + async fn test_reject_incoming_at_pending_capacity_trusted_peers() { + let mut peers = PeersManager::new(PeersConfig::test().with_max_inbound(2)); + let trusted = PeerId::random(); + peers.add_trusted_peer_id(trusted); + + // connect the trusted peer + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 0)), 8008); + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + peers.on_incoming_session_established(trusted, addr); + + match event!(peers) { + PeerAction::PeerAdded(id) => { + assert_eq!(id, trusted); + } + _ => unreachable!(), + } + + // saturate the remaining inbound slots with untrusted peers + let mut connected_untrusted_peer_ids = Vec::new(); + for i in 0..(peers.connection_info.config.max_inbound - 1) { + let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, (i + 1) as u8)), 8008); + assert!(peers.on_incoming_pending_session(addr.ip()).is_ok()); + let peer_id = PeerId::random(); + peers.on_incoming_session_established(peer_id, addr); + connected_untrusted_peer_ids.push(peer_id); + + match event!(peers) { + PeerAction::PeerAdded(id) => { + assert_eq!(id, peer_id); + } + _ => unreachable!(), + } + } + + let mut pending_addrs = Vec::new(); + + // saturate available slots + for i in 0..2 { + let socket_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, (i + 10) as u8)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_ok()); + + pending_addrs.push(socket_addr); + } + + assert_eq!(peers.connection_info.num_pending_in, 2); + + // try to handle additional incoming connections at capacity + for i in 0..2 { + let socket_addr = + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, (i + 20) as u8)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_err()); + } + + let err = PendingSessionHandshakeError::Eth(EthStreamError::P2PStreamError( + P2PStreamError::HandshakeError(P2PHandshakeError::Disconnected( + DisconnectReason::UselessPeer, + )), + )); + + // Remove all pending peers + for pending_addr in pending_addrs { + peers.on_incoming_pending_session_dropped(pending_addr, &err); + } + + println!("num_pending_in: {}", peers.connection_info.num_pending_in); + + println!( + "num_inbound: {}, has_in_capacity: {}", + peers.connection_info.num_inbound, + peers.connection_info.has_in_capacity() + ); + + // disconnect a connected peer + peers.on_active_session_gracefully_closed(connected_untrusted_peer_ids[0]); + + println!( + "num_inbound: {}, has_in_capacity: {}", + peers.connection_info.num_inbound, + peers.connection_info.has_in_capacity() + ); + + let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 99)), 8008); + assert!(peers.on_incoming_pending_session(socket_addr.ip()).is_ok()); + } + #[tokio::test] async fn test_closed_incoming() { let socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 1, 2)), 8008); From f2bf02413fbc6f8af18b643f7815940a6cc58c53 Mon Sep 17 00:00:00 2001 From: Moe Mahhouk Date: Wed, 15 Jan 2025 03:38:53 +0100 Subject: [PATCH 074/113] chore: add distroless minimal base image (#13788) --- Dockerfile.reproducible | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible index 12c12dd7c7d40..89b9305d0b568 100644 --- a/Dockerfile.reproducible +++ b/Dockerfile.reproducible @@ -8,9 +8,6 @@ RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 RUN git clone https://github.com/paradigmxyz/reth /app WORKDIR /app -# Checkout the reproducible-build branch -RUN git checkout reproducible-build - # Get the latest commit timestamp and set SOURCE_DATE_EPOCH RUN SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) && \ echo "SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH" >> /etc/environment @@ -30,8 +27,10 @@ ARG FEATURES="jemalloc asm-keccak" RUN . /etc/environment && \ cargo build --bin reth --features "${FEATURES}" --profile "reproducible" --locked --target x86_64-unknown-linux-gnu -# Create a minimal final image with just the binary -FROM scratch AS binaries +RUN . /etc/environment && mv /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth -# Copy the compiled binary from the builder stage -COPY --from=builder /app/target/x86_64-unknown-linux-gnu/reproducible/reth /reth +# Create a minimal final image with just the binary +FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a +COPY --from=builder /reth /reth +EXPOSE 30303 30303/udp 9001 8545 8546 +ENTRYPOINT [ "/reth" ] From d5978a78b4202c6bd40e166d929aa858dc65a4eb Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 15 Jan 2025 20:09:03 +0400 Subject: [PATCH 075/113] refactor: use `EvmEnv` when setting up `Evm` (#13800) --- Cargo.lock | 2 + crates/ethereum/evm/src/lib.rs | 108 +++++---- crates/ethereum/payload/src/lib.rs | 46 ++-- crates/evm/src/env.rs | 13 + crates/evm/src/lib.rs | 44 ++-- crates/optimism/evm/src/lib.rs | 108 +++++---- crates/optimism/payload/src/builder.rs | 48 ++-- crates/rpc/rpc-eth-api/src/helpers/call.rs | 222 ++++++++---------- .../rpc/rpc-eth-api/src/helpers/estimate.rs | 102 ++++---- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 122 ++++------ crates/rpc/rpc/src/debug.rs | 191 ++++++++------- crates/rpc/rpc/src/eth/bundle.rs | 47 ++-- crates/rpc/rpc/src/eth/sim_bundle.rs | 41 ++-- crates/rpc/rpc/src/trace.rs | 30 +-- examples/custom-inspector/Cargo.toml | 2 + examples/custom-inspector/src/main.rs | 14 +- 16 files changed, 553 insertions(+), 587 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f095efdb8779..69b5d07900b57 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2985,7 +2985,9 @@ dependencies = [ "clap", "futures-util", "reth", + "reth-evm", "reth-node-ethereum", + "revm-primitives", ] [[package]] diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 9cec2f9a92b71..2aaa063d745b9 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -198,7 +198,7 @@ mod tests { primitives::{BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::HandlerCfg; use std::collections::HashSet; #[test] @@ -272,12 +272,13 @@ mod tests { let db = CacheDB::>::default(); - let env_with_handler = EnvWithHandlerCfg::default(); + let evm_env = EvmEnv::default(); - let evm = evm_config.evm_with_env(db, env_with_handler.clone()); + let evm = evm_config.evm_with_env(db, evm_env.clone(), Default::default()); // Check that the EVM environment - assert_eq!(evm.context.evm.env, env_with_handler.env); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.cfg, evm_env.cfg_env_with_handler_cfg.cfg_env); // Default spec ID assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -296,16 +297,15 @@ mod tests { // Create a custom configuration environment with a chain ID of 111 let cfg = CfgEnv::default().with_chain_id(111); - let env_with_handler = EnvWithHandlerCfg { - env: Box::new(Env { - cfg: cfg.clone(), - block: BlockEnv::default(), - tx: TxEnv::default(), - }), - handler_cfg: Default::default(), + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: cfg.clone(), + handler_cfg: Default::default(), + }, + ..Default::default() }; - let evm = evm_config.evm_with_env(db, env_with_handler); + let evm = evm_config.evm_with_env(db, evm_env, Default::default()); // Check that the EVM environment is initialized with the custom environment assert_eq!(evm.context.evm.inner.env.cfg, cfg); @@ -333,16 +333,19 @@ mod tests { }; let tx = TxEnv { gas_limit: 5_000_000, gas_price: U256::from(50), ..Default::default() }; - let env_with_handler = EnvWithHandlerCfg { - env: Box::new(Env { cfg: CfgEnv::default(), block, tx }), - handler_cfg: Default::default(), + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: CfgEnv::default(), + handler_cfg: Default::default(), + }, + block_env: block, }; - let evm = evm_config.evm_with_env(db, env_with_handler.clone()); + let evm = evm_config.evm_with_env(db, evm_env.clone(), tx.clone()); // Verify that the block and transaction environments are set correctly - assert_eq!(evm.context.evm.env.block, env_with_handler.env.block); - assert_eq!(evm.context.evm.env.tx, env_with_handler.env.tx); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.tx, tx); // Default spec ID assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -360,9 +363,15 @@ mod tests { let handler_cfg = HandlerCfg { spec_id: SpecId::CONSTANTINOPLE, ..Default::default() }; - let env_with_handler = EnvWithHandlerCfg { env: Box::new(Env::default()), handler_cfg }; + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: Default::default(), + handler_cfg, + }, + ..Default::default() + }; - let evm = evm_config.evm_with_env(db, env_with_handler); + let evm = evm_config.evm_with_env(db, evm_env, Default::default()); // Check that the spec ID is setup properly assert_eq!(evm.handler.spec_id(), SpecId::CONSTANTINOPLE); @@ -422,13 +431,18 @@ mod tests { let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); - let env_with_handler = EnvWithHandlerCfg::default(); + let evm_env = EvmEnv::default(); - let evm = - evm_config.evm_with_env_and_inspector(db, env_with_handler.clone(), NoOpInspector); + let evm = evm_config.evm_with_env_and_inspector( + db, + evm_env.clone(), + Default::default(), + NoOpInspector, + ); // Check that the EVM environment is set to default values - assert_eq!(evm.context.evm.env, env_with_handler.env); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.cfg, evm_env.cfg_env_with_handler_cfg.cfg_env); assert_eq!(evm.context.external, NoOpInspector); assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -442,18 +456,21 @@ mod tests { let evm_config = EthEvmConfig::new(MAINNET.clone()); let db = CacheDB::>::default(); - let cfg = CfgEnv::default().with_chain_id(111); + let cfg_env = CfgEnv::default().with_chain_id(111); let block = BlockEnv::default(); let tx = TxEnv::default(); - let env_with_handler = EnvWithHandlerCfg { - env: Box::new(Env { cfg: cfg.clone(), block, tx }), - handler_cfg: Default::default(), + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: cfg_env.clone(), + handler_cfg: Default::default(), + }, + block_env: block, }; - let evm = evm_config.evm_with_env_and_inspector(db, env_with_handler, NoOpInspector); + let evm = evm_config.evm_with_env_and_inspector(db, evm_env, tx, NoOpInspector); // Check that the EVM environment is set with custom configuration - assert_eq!(evm.context.evm.env.cfg, cfg); + assert_eq!(evm.context.evm.env.cfg, cfg_env); assert_eq!(evm.context.external, NoOpInspector); assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -475,17 +492,14 @@ mod tests { ..Default::default() }; let tx = TxEnv { gas_limit: 5_000_000, gas_price: U256::from(50), ..Default::default() }; - let env_with_handler = EnvWithHandlerCfg { - env: Box::new(Env { cfg: CfgEnv::default(), block, tx }), - handler_cfg: Default::default(), - }; + let evm_env = EvmEnv { block_env: block, ..Default::default() }; let evm = - evm_config.evm_with_env_and_inspector(db, env_with_handler.clone(), NoOpInspector); + evm_config.evm_with_env_and_inspector(db, evm_env.clone(), tx.clone(), NoOpInspector); // Verify that the block and transaction environments are set correctly - assert_eq!(evm.context.evm.env.block, env_with_handler.env.block); - assert_eq!(evm.context.evm.env.tx, env_with_handler.env.tx); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.tx, tx); assert_eq!(evm.context.external, NoOpInspector); assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -500,14 +514,26 @@ mod tests { let db = CacheDB::>::default(); let handler_cfg = HandlerCfg { spec_id: SpecId::CONSTANTINOPLE, ..Default::default() }; - let env_with_handler = EnvWithHandlerCfg { env: Box::new(Env::default()), handler_cfg }; + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + handler_cfg, + cfg_env: Default::default(), + }, + ..Default::default() + }; - let evm = - evm_config.evm_with_env_and_inspector(db, env_with_handler.clone(), NoOpInspector); + let evm = evm_config.evm_with_env_and_inspector( + db, + evm_env.clone(), + Default::default(), + NoOpInspector, + ); // Check that the spec ID is set properly assert_eq!(evm.handler.spec_id(), SpecId::CONSTANTINOPLE); - assert_eq!(evm.context.evm.env, env_with_handler.env); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.cfg, evm_env.cfg_env_with_handler_cfg.cfg_env); + assert_eq!(evm.context.evm.env.tx, Default::default()); assert_eq!(evm.context.external, NoOpInspector); // No Optimism diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 4f5fe530161a3..7d49570ff8d4d 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -41,10 +41,7 @@ use reth_transaction_pool::{ }; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, InvalidTransaction, - ResultAndState, TxEnv, - }, + primitives::{EVMError, InvalidTransaction, ResultAndState}, DatabaseCommit, }; use std::sync::Arc; @@ -77,7 +74,7 @@ impl EthereumPayloadBuilder where EvmConfig: ConfigureEvm
, { - /// Returns the configured [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the targeted payload + /// Returns the configured [`EvmEnv`] for the targeted payload /// (that has the `parent` as its parent). fn cfg_and_block_env( &self, @@ -108,7 +105,7 @@ where &self, args: BuildArguments, ) -> Result, PayloadBuilderError> { - let EvmEnv { cfg_env_with_handler_cfg, block_env } = self + let evm_env = self .cfg_and_block_env(&args.config, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; @@ -117,8 +114,7 @@ where self.evm_config.clone(), self.builder_config.clone(), args, - cfg_env_with_handler_cfg, - block_env, + evm_env, |attributes| pool.best_transactions_with_attributes(attributes), ) } @@ -138,7 +134,7 @@ where None, ); - let EvmEnv { cfg_env_with_handler_cfg, block_env } = self + let evm_env = self .cfg_and_block_env(&args.config, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; @@ -148,8 +144,7 @@ where self.evm_config.clone(), self.builder_config.clone(), args, - cfg_env_with_handler_cfg, - block_env, + evm_env, |attributes| pool.best_transactions_with_attributes(attributes), )? .into_payload() @@ -167,8 +162,7 @@ pub fn default_ethereum_payload( evm_config: EvmConfig, builder_config: EthereumBuilderConfig, args: BuildArguments, - initialized_cfg: CfgEnvWithHandlerCfg, - initialized_block_env: BlockEnv, + evm_env: EvmEnv, best_txs: F, ) -> Result, PayloadBuilderError> where @@ -189,19 +183,20 @@ where debug!(target: "payload_builder", id=%attributes.id, parent_header = ?parent_header.hash(), parent_number = parent_header.number, "building new payload"); let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; - let block_gas_limit: u64 = initialized_block_env.gas_limit.to::(); - let base_fee = initialized_block_env.basefee.to::(); + let block_gas_limit: u64 = evm_env.block_env.gas_limit.to::(); + let base_fee = evm_env.block_env.basefee.to::(); let mut executed_txs = Vec::new(); let mut executed_senders = Vec::new(); let mut best_txs = best_txs(BestTransactionsAttributes::new( base_fee, - initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), + evm_env.block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); let mut total_fees = U256::ZERO; - let block_number = initialized_block_env.number.to::(); + let block_number = evm_env.block_env.number.to::(); + let beneficiary = evm_env.block_env.coinbase; let mut system_caller = SystemCaller::new(evm_config.clone(), chain_spec.clone()); @@ -209,8 +204,8 @@ where system_caller .pre_block_beacon_root_contract_call( &mut db, - &initialized_cfg, - &initialized_block_env, + evm_env.cfg_env_with_handler_cfg(), + evm_env.block_env(), attributes.parent_beacon_block_root, ) .map_err(|err| { @@ -225,8 +220,8 @@ where // apply eip-2935 blockhashes update system_caller.pre_block_blockhashes_contract_call( &mut db, - &initialized_cfg, - &initialized_block_env, + evm_env.cfg_env_with_handler_cfg(), + evm_env.block_env(), parent_header.hash(), ) .map_err(|err| { @@ -234,12 +229,7 @@ where PayloadBuilderError::Internal(err.into()) })?; - let env = EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - TxEnv::default(), - ); - let mut evm = evm_config.evm_with_env(&mut db, env); + let mut evm = evm_config.evm_with_env(&mut db, evm_env, Default::default()); let mut receipts = Vec::new(); while let Some(pool_tx) = best_txs.next() { @@ -458,7 +448,7 @@ where let header = Header { parent_hash: parent_header.hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: initialized_block_env.coinbase, + beneficiary, state_root, transactions_root, receipts_root, diff --git a/crates/evm/src/env.rs b/crates/evm/src/env.rs index 7d6583a879ec2..df69d7bdd83b3 100644 --- a/crates/evm/src/env.rs +++ b/crates/evm/src/env.rs @@ -9,6 +9,19 @@ pub struct EvmEnv { pub block_env: BlockEnv, } +impl Default for EvmEnv { + fn default() -> Self { + Self { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: Default::default(), + // Will set `is_optimism` if `revm/optimism-default-handler` is enabled. + handler_cfg: Default::default(), + }, + block_env: BlockEnv::default(), + } + } +} + impl EvmEnv { /// Create a new `EvmEnv` from its components. /// diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index cd49785efd45b..2b5c98adcbb33 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -18,19 +18,18 @@ extern crate alloc; use crate::builder::RethEvmBuilder; -use alloc::boxed::Box; use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; use reth_primitives_traits::{BlockHeader, SignedTransaction}; use revm::{Database, Evm, GetInspector}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, SpecId, TxEnv}; +use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv}; pub mod builder; pub mod either; /// EVM environment configuration. pub mod env; pub mod execute; -use env::EvmEnv; +pub use env::EvmEnv; #[cfg(feature = "std")] pub mod metrics; @@ -53,13 +52,14 @@ pub trait ConfigureEvm: ConfigureEvmEnv { } /// Returns a new EVM with the given database configured with the given environment settings, - /// including the spec id. + /// including the spec id and transaction environment. /// /// This will preserve any handler modifications - fn evm_with_env(&self, db: DB, env: EnvWithHandlerCfg) -> Evm<'_, (), DB> { + fn evm_with_env(&self, db: DB, evm_env: EvmEnv, tx: TxEnv) -> Evm<'_, (), DB> { let mut evm = self.evm(db); - evm.modify_spec_id(env.spec_id()); - evm.context.evm.env = env.env; + evm.modify_spec_id(evm_env.cfg_env_with_handler_cfg.handler_cfg.spec_id); + evm.context.evm.env = + Env::boxed(evm_env.cfg_env_with_handler_cfg.cfg_env, evm_env.block_env, tx); evm } @@ -71,17 +71,8 @@ pub trait ConfigureEvm: ConfigureEvmEnv { /// /// This does not initialize the tx environment. fn evm_for_block(&self, db: DB, header: &Self::Header) -> Evm<'_, (), DB> { - let EvmEnv { - cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { cfg_env, handler_cfg }, - block_env, - } = self.cfg_and_block_env(header); - self.evm_with_env( - db, - EnvWithHandlerCfg { - env: Box::new(Env { cfg: cfg_env, block: block_env, tx: Default::default() }), - handler_cfg, - }, - ) + let evm_env = self.cfg_and_block_env(header); + self.evm_with_env(db, evm_env, Default::default()) } /// Returns a new EVM with the given database configured with the given environment settings, @@ -93,7 +84,8 @@ pub trait ConfigureEvm: ConfigureEvmEnv { fn evm_with_env_and_inspector( &self, db: DB, - env: EnvWithHandlerCfg, + evm_env: EvmEnv, + tx: TxEnv, inspector: I, ) -> Evm<'_, I, DB> where @@ -101,8 +93,9 @@ pub trait ConfigureEvm: ConfigureEvmEnv { I: GetInspector, { let mut evm = self.evm_with_inspector(db, inspector); - evm.modify_spec_id(env.spec_id()); - evm.context.evm.env = env.env; + evm.modify_spec_id(evm_env.cfg_env_with_handler_cfg.handler_cfg.spec_id); + evm.context.evm.env = + Env::boxed(evm_env.cfg_env_with_handler_cfg.cfg_env, evm_env.block_env, tx); evm } @@ -133,21 +126,22 @@ where (*self).evm_for_block(db, header) } - fn evm_with_env(&self, db: DB, env: EnvWithHandlerCfg) -> Evm<'_, (), DB> { - (*self).evm_with_env(db, env) + fn evm_with_env(&self, db: DB, evm_env: EvmEnv, tx: TxEnv) -> Evm<'_, (), DB> { + (*self).evm_with_env(db, evm_env, tx) } fn evm_with_env_and_inspector( &self, db: DB, - env: EnvWithHandlerCfg, + evm_env: EvmEnv, + tx_env: TxEnv, inspector: I, ) -> Evm<'_, I, DB> where DB: Database, I: GetInspector, { - (*self).evm_with_env_and_inspector(db, env, inspector) + (*self).evm_with_env_and_inspector(db, evm_env, tx_env, inspector) } fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index a9a88f67295d5..dbadd23b03171 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -211,7 +211,7 @@ mod tests { primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, JournaledState, }; - use revm_primitives::{EnvWithHandlerCfg, HandlerCfg}; + use revm_primitives::HandlerCfg; use std::sync::Arc; fn test_evm_config() -> OpEvmConfig { @@ -291,12 +291,12 @@ mod tests { let db = CacheDB::>::default(); - let env_with_handler = EnvWithHandlerCfg::default(); + let evm_env = EvmEnv::default(); - let evm = evm_config.evm_with_env(db, env_with_handler.clone()); + let evm = evm_config.evm_with_env(db, evm_env.clone(), Default::default()); // Check that the EVM environment - assert_eq!(evm.context.evm.env, env_with_handler.env); + assert_eq!(evm.context.evm.env.cfg, evm_env.cfg_env_with_handler_cfg.cfg_env); // Default spec ID assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -314,16 +314,15 @@ mod tests { // Create a custom configuration environment with a chain ID of 111 let cfg = CfgEnv::default().with_chain_id(111); - let env_with_handler = EnvWithHandlerCfg { - env: Box::new(Env { - cfg: cfg.clone(), - block: BlockEnv::default(), - tx: TxEnv::default(), - }), - handler_cfg: Default::default(), + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: cfg.clone(), + handler_cfg: Default::default(), + }, + ..Default::default() }; - let evm = evm_config.evm_with_env(db, env_with_handler); + let evm = evm_config.evm_with_env(db, evm_env, Default::default()); // Check that the EVM environment is initialized with the custom environment assert_eq!(evm.context.evm.inner.env.cfg, cfg); @@ -350,16 +349,13 @@ mod tests { }; let tx = TxEnv { gas_limit: 5_000_000, gas_price: U256::from(50), ..Default::default() }; - let env_with_handler = EnvWithHandlerCfg { - env: Box::new(Env { cfg: CfgEnv::default(), block, tx }), - handler_cfg: Default::default(), - }; + let evm_env = EvmEnv { block_env: block, ..Default::default() }; - let evm = evm_config.evm_with_env(db, env_with_handler.clone()); + let evm = evm_config.evm_with_env(db, evm_env.clone(), tx.clone()); // Verify that the block and transaction environments are set correctly - assert_eq!(evm.context.evm.env.block, env_with_handler.env.block); - assert_eq!(evm.context.evm.env.tx, env_with_handler.env.tx); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.tx, tx); // Default spec ID assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -376,9 +372,15 @@ mod tests { let handler_cfg = HandlerCfg { spec_id: SpecId::ECOTONE, ..Default::default() }; - let env_with_handler = EnvWithHandlerCfg { env: Box::new(Env::default()), handler_cfg }; + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + handler_cfg, + cfg_env: Default::default(), + }, + ..Default::default() + }; - let evm = evm_config.evm_with_env(db, env_with_handler); + let evm = evm_config.evm_with_env(db, evm_env, Default::default()); // Check that the spec ID is setup properly assert_eq!(evm.handler.spec_id(), SpecId::ECOTONE); @@ -436,13 +438,25 @@ mod tests { let evm_config = test_evm_config(); let db = CacheDB::>::default(); - let env_with_handler = EnvWithHandlerCfg::default(); + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: Default::default(), + handler_cfg: HandlerCfg { is_optimism: true, ..Default::default() }, + }, + ..Default::default() + }; - let evm = - evm_config.evm_with_env_and_inspector(db, env_with_handler.clone(), NoOpInspector); + let evm = evm_config.evm_with_env_and_inspector( + db, + evm_env.clone(), + Default::default(), + NoOpInspector, + ); // Check that the EVM environment is set to default values - assert_eq!(evm.context.evm.env, env_with_handler.env); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.cfg, evm_env.cfg_env_with_handler_cfg.cfg_env); + assert_eq!(evm.context.evm.env.tx, Default::default()); assert_eq!(evm.context.external, NoOpInspector); assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -458,15 +472,21 @@ mod tests { let cfg = CfgEnv::default().with_chain_id(111); let block = BlockEnv::default(); let tx = TxEnv::default(); - let env_with_handler = EnvWithHandlerCfg { - env: Box::new(Env { cfg: cfg.clone(), block, tx }), - handler_cfg: Default::default(), + let evm_env = EvmEnv { + block_env: block, + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: cfg.clone(), + handler_cfg: Default::default(), + }, }; - let evm = evm_config.evm_with_env_and_inspector(db, env_with_handler, NoOpInspector); + let evm = + evm_config.evm_with_env_and_inspector(db, evm_env.clone(), tx.clone(), NoOpInspector); // Check that the EVM environment is set with custom configuration assert_eq!(evm.context.evm.env.cfg, cfg); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.tx, tx); assert_eq!(evm.context.external, NoOpInspector); assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -487,17 +507,14 @@ mod tests { ..Default::default() }; let tx = TxEnv { gas_limit: 5_000_000, gas_price: U256::from(50), ..Default::default() }; - let env_with_handler = EnvWithHandlerCfg { - env: Box::new(Env { cfg: CfgEnv::default(), block, tx }), - handler_cfg: Default::default(), - }; + let evm_env = EvmEnv { block_env: block, ..Default::default() }; let evm = - evm_config.evm_with_env_and_inspector(db, env_with_handler.clone(), NoOpInspector); + evm_config.evm_with_env_and_inspector(db, evm_env.clone(), tx.clone(), NoOpInspector); // Verify that the block and transaction environments are set correctly - assert_eq!(evm.context.evm.env.block, env_with_handler.env.block); - assert_eq!(evm.context.evm.env.tx, env_with_handler.env.tx); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); + assert_eq!(evm.context.evm.env.tx, tx); assert_eq!(evm.context.external, NoOpInspector); assert_eq!(evm.handler.spec_id(), SpecId::LATEST); @@ -511,14 +528,25 @@ mod tests { let db = CacheDB::>::default(); let handler_cfg = HandlerCfg { spec_id: SpecId::ECOTONE, ..Default::default() }; - let env_with_handler = EnvWithHandlerCfg { env: Box::new(Env::default()), handler_cfg }; + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { + cfg_env: Default::default(), + handler_cfg, + }, + ..Default::default() + }; - let evm = - evm_config.evm_with_env_and_inspector(db, env_with_handler.clone(), NoOpInspector); + let evm = evm_config.evm_with_env_and_inspector( + db, + evm_env.clone(), + Default::default(), + NoOpInspector, + ); // Check that the spec ID is set properly assert_eq!(evm.handler.spec_id(), SpecId::ECOTONE); - assert_eq!(evm.context.evm.env, env_with_handler.env); + assert_eq!(evm.context.evm.env.cfg, evm_env.cfg_env_with_handler_cfg.cfg_env); + assert_eq!(evm.context.evm.env.block, evm_env.block_env); assert_eq!(evm.context.external, NoOpInspector); // Check that the spec ID is setup properly diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 505843370a141..12dbd0b5bca22 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -38,10 +38,7 @@ use reth_transaction_pool::{ }; use revm::{ db::{states::bundle_state::BundleRetention, State}, - primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EVMError, EnvWithHandlerCfg, InvalidTransaction, - ResultAndState, TxEnv, - }, + primitives::{EVMError, InvalidTransaction, ResultAndState}, Database, DatabaseCommit, }; use std::{fmt::Display, sync::Arc}; @@ -127,7 +124,6 @@ where let evm_env = self .cfg_and_block_env(&args.config.attributes, &args.config.parent_header) .map_err(PayloadBuilderError::other)?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; let BuildArguments { client, pool: _, mut cached_reads, config, cancel, best_payload } = args; @@ -136,8 +132,7 @@ where evm_config: self.evm_config.clone(), chain_spec: client.chain_spec(), config, - initialized_cfg: cfg_env_with_handler_cfg, - initialized_block_env: block_env, + evm_env, cancel, best_payload, }; @@ -192,15 +187,13 @@ where let evm_env = self.cfg_and_block_env(&attributes, &parent).map_err(PayloadBuilderError::other)?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; let config = PayloadConfig { parent_header: Arc::new(parent), attributes }; let ctx = OpPayloadBuilderCtx { evm_config: self.evm_config.clone(), chain_spec: client.chain_spec(), config, - initialized_cfg: cfg_env_with_handler_cfg, - initialized_block_env: block_env, + evm_env, cancel: Default::default(), best_payload: Default::default(), }; @@ -402,7 +395,7 @@ where let header = Header { parent_hash: ctx.parent().hash(), ommers_hash: EMPTY_OMMER_ROOT_HASH, - beneficiary: ctx.initialized_block_env.coinbase, + beneficiary: ctx.evm_env.block_env.coinbase, state_root, transactions_root, receipts_root, @@ -557,9 +550,7 @@ pub struct OpPayloadBuilderCtx { /// How to build the payload. pub config: PayloadConfig, /// Evm Settings - pub initialized_cfg: CfgEnvWithHandlerCfg, - /// Block config - pub initialized_block_env: BlockEnv, + pub evm_env: EvmEnv, /// Marker to check whether the job has been cancelled. pub cancel: Cancelled, /// The currently best payload. @@ -588,22 +579,22 @@ impl OpPayloadBuilderCtx { pub fn block_gas_limit(&self) -> u64 { self.attributes() .gas_limit - .unwrap_or_else(|| self.initialized_block_env.gas_limit.saturating_to()) + .unwrap_or_else(|| self.evm_env.block_env.gas_limit.saturating_to()) } /// Returns the block number for the block. pub fn block_number(&self) -> u64 { - self.initialized_block_env.number.to() + self.evm_env.block_env.number.to() } /// Returns the current base fee pub fn base_fee(&self) -> u64 { - self.initialized_block_env.basefee.to() + self.evm_env.block_env.basefee.to() } /// Returns the current blob gas price. pub fn get_blob_gasprice(&self) -> Option { - self.initialized_block_env.get_blob_gasprice().map(|gasprice| gasprice as u64) + self.evm_env.block_env.get_blob_gasprice().map(|gasprice| gasprice as u64) } /// Returns the blob fields for the header. @@ -722,8 +713,8 @@ where SystemCaller::new(self.evm_config.clone(), self.chain_spec.clone()) .pre_block_beacon_root_contract_call( db, - &self.initialized_cfg, - &self.initialized_block_env, + &self.evm_env.cfg_env_with_handler_cfg, + &self.evm_env.block_env, self.attributes().payload_attributes.parent_beacon_block_root, ) .map_err(|err| { @@ -747,13 +738,8 @@ where DB: Database, { let mut info = ExecutionInfo::with_capacity(self.attributes().transactions.len()); - - let env = EnvWithHandlerCfg::new_with_cfg_env( - self.initialized_cfg.clone(), - self.initialized_block_env.clone(), - TxEnv::default(), - ); - let mut evm = self.evm_config.evm_with_env(&mut *db, env); + let mut evm = + self.evm_config.evm_with_env(&mut *db, self.evm_env.clone(), Default::default()); for sequencer_tx in &self.attributes().transactions { // A sequencer's block should never contain blob transactions. @@ -862,12 +848,8 @@ where let block_gas_limit = self.block_gas_limit(); let base_fee = self.base_fee(); - let env = EnvWithHandlerCfg::new_with_cfg_env( - self.initialized_cfg.clone(), - self.initialized_block_env.clone(), - TxEnv::default(), - ); - let mut evm = self.evm_config.evm_with_env(&mut *db, env); + let mut evm = + self.evm_config.evm_with_env(&mut *db, self.evm_env.clone(), Default::default()); while let Some(tx) = best_txs.next(()) { // ensure we still have capacity for this transaction diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 91bedbeb532ea..538c980801491 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -41,6 +41,7 @@ use reth_rpc_eth_types::{ }; use revm::{Database, DatabaseCommit, GetInspector}; use revm_inspectors::{access_list::AccessListInspector, transfer::TransferInspector}; +use revm_primitives::Env; use tracing::trace; /// Result type for `eth_simulateV1` RPC method. @@ -86,8 +87,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } // Build cfg and block env, we'll reuse those. - let (evm_env, block) = self.evm_env_at(block.unwrap_or_default()).await?; - let EvmEnv { mut cfg_env_with_handler_cfg, mut block_env } = evm_env; + let (mut evm_env, block) = self.evm_env_at(block.unwrap_or_default()).await?; // Gas cap for entire operation let total_gas_limit = self.call_gas_limit(); @@ -97,9 +97,9 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let mut parent_hash = base_block.hash(); // Only enforce base fee if validation is enabled - cfg_env_with_handler_cfg.disable_base_fee = !validation; + evm_env.cfg_env_with_handler_cfg.disable_base_fee = !validation; // Always disable EIP-3607 - cfg_env_with_handler_cfg.disable_eip3607 = true; + evm_env.cfg_env_with_handler_cfg.disable_eip3607 = true; let this = self.clone(); self.spawn_with_state_at_block(block, move |state| { @@ -110,13 +110,13 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let mut block_state_calls = block_state_calls.into_iter().peekable(); while let Some(block) = block_state_calls.next() { // Increase number and timestamp for every new block - block_env.number += U256::from(1); - block_env.timestamp += U256::from(1); + evm_env.block_env.number += U256::from(1); + evm_env.block_env.timestamp += U256::from(1); if validation { let chain_spec = RpcNodeCore::provider(&this).chain_spec(); - let base_fee_params = - chain_spec.base_fee_params_at_timestamp(block_env.timestamp.to()); + let base_fee_params = chain_spec + .base_fee_params_at_timestamp(evm_env.block_env.timestamp.to()); let base_fee = if let Some(latest) = blocks.last() { let header = &latest.inner.header; calc_next_block_base_fee( @@ -128,21 +128,21 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } else { base_block.next_block_base_fee(base_fee_params).unwrap_or_default() }; - block_env.basefee = U256::from(base_fee); + evm_env.block_env.basefee = U256::from(base_fee); } else { - block_env.basefee = U256::ZERO; + evm_env.block_env.basefee = U256::ZERO; } let SimBlock { block_overrides, state_overrides, calls } = block; if let Some(block_overrides) = block_overrides { - apply_block_overrides(block_overrides, &mut db, &mut block_env); + apply_block_overrides(block_overrides, &mut db, &mut evm_env.block_env); } if let Some(state_overrides) = state_overrides { apply_state_overrides(state_overrides, &mut db)?; } - if (total_gas_limit - gas_used) < block_env.gas_limit.to() { + if (total_gas_limit - gas_used) < evm_env.block_env.gas_limit.to() { return Err( EthApiError::Other(Box::new(EthSimulateError::GasLimitReached)).into() ) @@ -153,7 +153,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let txs_without_gas_limit = calls.iter().filter(|tx| tx.gas.is_none()).count(); - if total_specified_gas > block_env.gas_limit.to() { + if total_specified_gas > evm_env.block_env.gas_limit.to() { return Err(EthApiError::Other(Box::new( EthSimulateError::BlockGasLimitExceeded, )) @@ -161,7 +161,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } if txs_without_gas_limit > 0 { - (block_env.gas_limit.to::() - total_specified_gas) / + (evm_env.block_env.gas_limit.to::() - total_specified_gas) / txs_without_gas_limit as u64 } else { 0 @@ -182,27 +182,23 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA call, validation, default_gas_limit, - cfg_env_with_handler_cfg.chain_id, + evm_env.cfg_env_with_handler_cfg.chain_id, &mut db, this.tx_resp_builder(), )?; let tx_env = this.evm_config().tx_env(&tx, sender); - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg.clone(), - block_env.clone(), - tx_env, - ); - let (res, env) = { + let (res, (_, tx_env)) = { if trace_transfers { this.transact_with_inspector( &mut db, - env, + evm_env.clone(), + tx_env, TransferInspector::new(false).with_logs(true), )? } else { - this.transact(&mut db, env)? + this.transact(&mut db, evm_env.clone(), tx_env.clone())? } }; @@ -213,12 +209,12 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } transactions.push(tx); - senders.push(env.tx.caller); + senders.push(tx_env.caller); results.push(res.result); } let (block, _) = this.assemble_block_and_receipts( - &block_env, + &evm_env.block_env, parent_hash, // state root calculation is skipped for performance reasons B256::ZERO, @@ -300,7 +296,6 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA self.evm_env_at(target_block), self.block_with_senders(target_block) )?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; let block = block.ok_or(EthApiError::HeaderNotFound(target_block))?; @@ -330,12 +325,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA // to be replayed let transactions = block.transactions_with_sender().take(num_txs); for (signer, tx) in transactions { - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg.clone(), - block_env.clone(), - RpcNodeCore::evm_config(&this).tx_env(tx, *signer), - ); - let (res, _) = this.transact(&mut db, env)?; + let tx_env = RpcNodeCore::evm_config(&this).tx_env(tx, *signer); + let (res, _) = this.transact(&mut db, evm_env.clone(), tx_env)?; db.commit(res.state); } } @@ -348,14 +339,9 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let state_overrides = state_override.take(); let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); - let env = this.prepare_call_env( - cfg_env_with_handler_cfg.clone(), - block_env.clone(), - tx, - &mut db, - overrides, - )?; - let (res, _) = this.transact(&mut db, env)?; + let (evm_env, tx) = + this.prepare_call_env(evm_env.clone(), tx, &mut db, overrides)?; + let (res, _) = this.transact(&mut db, evm_env, tx)?; match ensure_success(res.result) { Ok(output) => { @@ -395,12 +381,9 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA async move { let block_id = block_number.unwrap_or_default(); let (evm_env, at) = self.evm_env_at(block_id).await?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; - self.spawn_blocking_io(move |this| { - this.create_access_list_with(cfg_env_with_handler_cfg, block_env, at, request) - }) - .await + self.spawn_blocking_io(move |this| this.create_access_list_with(evm_env, at, request)) + .await } } @@ -408,8 +391,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// [`BlockId`]. fn create_access_list_with( &self, - cfg: CfgEnvWithHandlerCfg, - block: BlockEnv, + mut evm_env: EvmEnv, at: BlockId, mut request: TransactionRequest, ) -> Result @@ -418,23 +400,23 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA { let state = self.state_at_block_id(at)?; - let mut env = self.build_call_evm_env(cfg, block, request.clone())?; + let mut tx_env = self.create_txn_env(&evm_env.block_env, request.clone())?; // we want to disable this in eth_createAccessList, since this is common practice used by // other node impls and providers - env.cfg.disable_block_gas_limit = true; + evm_env.cfg_env_with_handler_cfg.disable_block_gas_limit = true; // The basefee should be ignored for eth_createAccessList // See: // - env.cfg.disable_base_fee = true; + evm_env.cfg_env_with_handler_cfg.disable_base_fee = true; let mut db = CacheDB::new(StateProviderDatabase::new(state)); - if request.gas.is_none() && env.tx.gas_price > U256::ZERO { - let cap = caller_gas_allowance(&mut db, &env.tx)?; + if request.gas.is_none() && tx_env.gas_price > U256::ZERO { + let cap = caller_gas_allowance(&mut db, &tx_env)?; // no gas limit was provided in the request, so we need to cap the request's gas limit - env.tx.gas_limit = cap.min(env.block.gas_limit).saturating_to(); + tx_env.gas_limit = cap.min(evm_env.block_env.gas_limit).saturating_to(); } let from = request.from.unwrap_or_default(); @@ -449,16 +431,17 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA // can consume the list since we're not using the request anymore let initial = request.access_list.take().unwrap_or_default(); - let precompiles = get_precompiles(env.handler_cfg.spec_id); + let precompiles = get_precompiles(evm_env.cfg_env_with_handler_cfg.handler_cfg.spec_id); let mut inspector = AccessListInspector::new(initial, from, to, precompiles); - let (result, mut env) = self.inspect(&mut db, env, &mut inspector)?; + let (result, (evm_env, mut tx_env)) = + self.inspect(&mut db, evm_env, tx_env, &mut inspector)?; let access_list = inspector.into_access_list(); - env.tx.access_list = access_list.to_vec(); + tx_env.access_list = access_list.to_vec(); match result.result { ExecutionResult::Halt { reason, gas_used } => { let error = - Some(RpcInvalidTransactionError::halt(reason, env.tx.gas_limit).to_string()); + Some(RpcInvalidTransactionError::halt(reason, tx_env.gas_limit).to_string()); return Ok(AccessListResult { access_list, gas_used: U256::from(gas_used), error }) } ExecutionResult::Revert { output, gas_used } => { @@ -469,11 +452,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA }; // transact again to get the exact gas used - let (result, env) = self.transact(&mut db, env)?; + let (result, (_, tx_env)) = self.transact(&mut db, evm_env, tx_env)?; let res = match result.result { ExecutionResult::Halt { reason, gas_used } => { let error = - Some(RpcInvalidTransactionError::halt(reason, env.tx.gas_limit).to_string()); + Some(RpcInvalidTransactionError::halt(reason, tx_env.gas_limit).to_string()); AccessListResult { access_list, gas_used: U256::from(gas_used), error } } ExecutionResult::Revert { output, gas_used } => { @@ -515,16 +498,25 @@ pub trait Call: fn transact( &self, db: DB, - env: EnvWithHandlerCfg, - ) -> Result<(ResultAndState, EnvWithHandlerCfg), Self::Error> + evm_env: EvmEnv, + tx_env: TxEnv, + ) -> Result<(ResultAndState, (EvmEnv, TxEnv)), Self::Error> where DB: Database, EthApiError: From, { - let mut evm = self.evm_config().evm_with_env(db, env); + let mut evm = self.evm_config().evm_with_env(db, evm_env, tx_env); let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (_, env) = evm.into_db_and_env_with_handler_cfg(); - Ok((res, env)) + + let EnvWithHandlerCfg { env, handler_cfg } = env; + let Env { cfg, block, tx } = *env; + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { cfg_env: cfg, handler_cfg }, + block_env: block, + }; + + Ok((res, (evm_env, tx))) } /// Executes the [`EnvWithHandlerCfg`] against the given [Database] without committing state @@ -532,17 +524,26 @@ pub trait Call: fn transact_with_inspector( &self, db: DB, - env: EnvWithHandlerCfg, + evm_env: EvmEnv, + tx_env: TxEnv, inspector: impl GetInspector, - ) -> Result<(ResultAndState, EnvWithHandlerCfg), Self::Error> + ) -> Result<(ResultAndState, (EvmEnv, TxEnv)), Self::Error> where DB: Database, EthApiError: From, { - let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); + let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, tx_env, inspector); let res = evm.transact().map_err(Self::Error::from_evm_err)?; let (_, env) = evm.into_db_and_env_with_handler_cfg(); - Ok((res, env)) + + let EnvWithHandlerCfg { env, handler_cfg } = env; + let Env { cfg, block, tx } = *env; + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { cfg_env: cfg, handler_cfg }, + block_env: block, + }; + + Ok((res, (evm_env, tx))) } /// Executes the call request at the given [`BlockId`]. @@ -551,12 +552,14 @@ pub trait Call: request: TransactionRequest, at: BlockId, overrides: EvmOverrides, - ) -> impl Future> + Send + ) -> impl Future> + Send where Self: LoadPendingBlock, { let this = self.clone(); - self.spawn_with_call_at(request, at, overrides, move |db, env| this.transact(db, env)) + self.spawn_with_call_at(request, at, overrides, move |db, evm_env, tx_env| { + this.transact(db, evm_env, tx_env) + }) } /// Executes the closure with the state that corresponds to the given [`BlockId`] on a new task @@ -599,29 +602,23 @@ pub trait Call: ) -> impl Future> + Send where Self: LoadPendingBlock, - F: FnOnce(StateCacheDbRefMutWrapper<'_, '_>, EnvWithHandlerCfg) -> Result + F: FnOnce(StateCacheDbRefMutWrapper<'_, '_>, EvmEnv, TxEnv) -> Result + Send + 'static, R: Send + 'static, { async move { let (evm_env, at) = self.evm_env_at(at).await?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; let this = self.clone(); self.spawn_blocking_io(move |_| { let state = this.state_at_block_id(at)?; let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); - let env = this.prepare_call_env( - cfg_env_with_handler_cfg, - block_env, - request, - &mut db, - overrides, - )?; + let (evm_env, tx_env) = + this.prepare_call_env(evm_env, request, &mut db, overrides)?; - f(StateCacheDbRefMutWrapper(&mut db), env) + f(StateCacheDbRefMutWrapper(&mut db), evm_env, tx_env) }) .await } @@ -656,7 +653,6 @@ pub trait Call: let (tx, tx_info) = transaction.split(); let (evm_env, _) = self.evm_env_at(block.hash().into()).await?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in @@ -668,21 +664,11 @@ pub trait Call: let block_txs = block.transactions_with_sender(); // replay all transactions prior to the targeted transaction - this.replay_transactions_until( - &mut db, - cfg_env_with_handler_cfg.clone(), - block_env.clone(), - block_txs, - *tx.tx_hash(), - )?; - - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg, - block_env, - RpcNodeCore::evm_config(&this).tx_env(tx.tx(), tx.signer()), - ); - - let (res, _) = this.transact(&mut db, env)?; + this.replay_transactions_until(&mut db, evm_env.clone(), block_txs, *tx.tx_hash())?; + + let tx_env = RpcNodeCore::evm_config(&this).tx_env(tx.tx(), tx.signer()); + + let (res, _) = this.transact(&mut db, evm_env, tx_env)?; f(tx_info, res, db) }) .await @@ -700,8 +686,7 @@ pub trait Call: fn replay_transactions_until<'a, DB, I>( &self, db: &mut DB, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + evm_env: EvmEnv, transactions: I, target_tx_hash: B256, ) -> Result @@ -711,9 +696,7 @@ pub trait Call: I: IntoIterator::Transaction)>, ::Transaction: SignedTransaction, { - let env = EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()); - - let mut evm = self.evm_config().evm_with_env(db, env); + let mut evm = self.evm_config().evm_with_env(db, evm_env, Default::default()); let mut index = 0; for (sender, tx) in transactions { if *tx.tx_hash() == target_tx_hash { @@ -807,20 +790,6 @@ pub trait Call: Ok(env) } - /// Creates a new [`EnvWithHandlerCfg`] to be used for executing the [`TransactionRequest`] in - /// `eth_call`. - /// - /// Note: this does _not_ access the Database to check the sender. - fn build_call_evm_env( - &self, - cfg: CfgEnvWithHandlerCfg, - block: BlockEnv, - request: TransactionRequest, - ) -> Result { - let tx = self.create_txn_env(&block, request)?; - Ok(EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx)) - } - /// Prepares the [`EnvWithHandlerCfg`] for execution of calls. /// /// Does not commit any changes to the underlying database. @@ -836,12 +805,11 @@ pub trait Call: /// In addition, this changes the block's gas limit to the configured [`Self::call_gas_limit`]. fn prepare_call_env( &self, - mut cfg: CfgEnvWithHandlerCfg, - mut block: BlockEnv, + mut evm_env: EvmEnv, mut request: TransactionRequest, db: &mut CacheDB, overrides: EvmOverrides, - ) -> Result + ) -> Result<(EvmEnv, TxEnv), Self::Error> where DB: DatabaseRef, EthApiError: From<::Error>, @@ -854,41 +822,41 @@ pub trait Call: } // apply configured gas cap - block.gas_limit = U256::from(self.call_gas_limit()); + evm_env.block_env.gas_limit = U256::from(self.call_gas_limit()); // Disabled because eth_call is sometimes used with eoa senders // See - cfg.disable_eip3607 = true; + evm_env.cfg_env_with_handler_cfg.disable_eip3607 = true; // The basefee should be ignored for eth_call // See: // - cfg.disable_base_fee = true; + evm_env.cfg_env_with_handler_cfg.disable_base_fee = true; // set nonce to None so that the correct nonce is chosen by the EVM request.nonce = None; if let Some(block_overrides) = overrides.block { - apply_block_overrides(*block_overrides, db, &mut block); + apply_block_overrides(*block_overrides, db, &mut evm_env.block_env); } if let Some(state_overrides) = overrides.state { apply_state_overrides(state_overrides, db)?; } let request_gas = request.gas; - let mut env = self.build_call_evm_env(cfg, block, request)?; + let mut tx_env = self.create_txn_env(&evm_env.block_env, request)?; if request_gas.is_none() { // No gas limit was provided in the request, so we need to cap the transaction gas limit - if env.tx.gas_price > U256::ZERO { + if tx_env.gas_price > U256::ZERO { // If gas price is specified, cap transaction gas limit with caller allowance - trace!(target: "rpc::eth::call", ?env, "Applying gas limit cap with caller allowance"); - let cap = caller_gas_allowance(db, &env.tx)?; + trace!(target: "rpc::eth::call", ?tx_env, "Applying gas limit cap with caller allowance"); + let cap = caller_gas_allowance(db, &tx_env)?; // ensure we cap gas_limit to the block's - env.tx.gas_limit = cap.min(env.block.gas_limit).saturating_to(); + tx_env.gas_limit = cap.min(evm_env.block_env.gas_limit).saturating_to(); } } - Ok(env) + Ok((evm_env, tx_env)) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index fe2fa482d54d7..f8fc02ce3625d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -11,14 +11,14 @@ use reth_provider::StateProvider; use reth_revm::{ database::StateProviderDatabase, db::CacheDB, - primitives::{BlockEnv, CfgEnvWithHandlerCfg, ExecutionResult, HaltReason, TransactTo}, + primitives::{ExecutionResult, HaltReason, TransactTo}, }; use reth_rpc_eth_types::{ revm_utils::{apply_state_overrides, caller_gas_allowance}, EthApiError, RevertError, RpcInvalidTransactionError, }; use reth_rpc_server_types::constants::gas_oracle::{CALL_STIPEND_GAS, ESTIMATE_GAS_ERROR_RATIO}; -use revm_primitives::{db::Database, EnvWithHandlerCfg}; +use revm_primitives::{db::Database, TxEnv}; use tracing::trace; /// Gas execution estimates @@ -36,8 +36,7 @@ pub trait EstimateCall: Call { /// - `nonce` is set to `None` fn estimate_gas_with( &self, - mut cfg: CfgEnvWithHandlerCfg, - block: BlockEnv, + mut evm_env: EvmEnv, mut request: TransactionRequest, state: S, state_override: Option, @@ -47,12 +46,12 @@ pub trait EstimateCall: Call { { // Disabled because eth_estimateGas is sometimes used with eoa senders // See - cfg.disable_eip3607 = true; + evm_env.cfg_env_with_handler_cfg.disable_eip3607 = true; // The basefee should be ignored for eth_estimateGas and similar // See: // - cfg.disable_base_fee = true; + evm_env.cfg_env_with_handler_cfg.disable_base_fee = true; // set nonce to None so that the correct nonce is chosen by the EVM request.nonce = None; @@ -61,7 +60,7 @@ pub trait EstimateCall: Call { let tx_request_gas_limit = request.gas.map(U256::from); let tx_request_gas_price = request.gas_price; // the gas limit of the corresponding block - let block_env_gas_limit = block.gas_limit; + let block_env_gas_limit = evm_env.block_env.gas_limit; // Determine the highest possible gas limit, considering both the request's specified limit // and the block's limit. @@ -76,7 +75,7 @@ pub trait EstimateCall: Call { .unwrap_or(block_env_gas_limit); // Configure the evm env - let mut env = self.build_call_evm_env(cfg, block, request)?; + let mut tx_env = self.create_txn_env(&evm_env.block_env, request)?; let mut db = CacheDB::new(StateProviderDatabase::new(state)); // Apply any state overrides if specified. @@ -85,8 +84,8 @@ pub trait EstimateCall: Call { } // Optimize for simple transfer transactions, potentially reducing the gas estimate. - if env.tx.data.is_empty() { - if let TransactTo::Call(to) = env.tx.transact_to { + if tx_env.data.is_empty() { + if let TransactTo::Call(to) = tx_env.transact_to { if let Ok(code) = db.db.account_code(&to) { let no_code_callee = code.map(|code| code.is_empty()).unwrap_or(true); if no_code_callee { @@ -95,9 +94,9 @@ pub trait EstimateCall: Call { // `MIN_TRANSACTION_GAS` is dangerous because there might be additional // field combos that bump the price up, so we try executing the function // with the minimum gas limit to make sure. - let mut env = env.clone(); - env.tx.gas_limit = MIN_TRANSACTION_GAS; - if let Ok((res, _)) = self.transact(&mut db, env) { + let mut tx_env = tx_env.clone(); + tx_env.gas_limit = MIN_TRANSACTION_GAS; + if let Ok((res, _)) = self.transact(&mut db, evm_env.clone(), tx_env) { if res.result.is_success() { return Ok(U256::from(MIN_TRANSACTION_GAS)) } @@ -110,35 +109,41 @@ pub trait EstimateCall: Call { // Check funds of the sender (only useful to check if transaction gas price is more than 0). // // The caller allowance is check by doing `(account.balance - tx.value) / tx.gas_price` - if env.tx.gas_price > U256::ZERO { + if tx_env.gas_price > U256::ZERO { // cap the highest gas limit by max gas caller can afford with given gas price highest_gas_limit = highest_gas_limit - .min(caller_gas_allowance(&mut db, &env.tx).map_err(Self::Error::from_eth_err)?); + .min(caller_gas_allowance(&mut db, &tx_env).map_err(Self::Error::from_eth_err)?); } // We can now normalize the highest gas limit to a u64 let mut highest_gas_limit = highest_gas_limit.saturating_to::(); // If the provided gas limit is less than computed cap, use that - env.tx.gas_limit = env.tx.gas_limit.min(highest_gas_limit); + tx_env.gas_limit = tx_env.gas_limit.min(highest_gas_limit); - trace!(target: "rpc::eth::estimate", ?env, "Starting gas estimation"); + trace!(target: "rpc::eth::estimate", ?evm_env, ?tx_env, "Starting gas estimation"); // Execute the transaction with the highest possible gas limit. - let (mut res, mut env) = match self.transact(&mut db, env.clone()) { - // Handle the exceptional case where the transaction initialization uses too much gas. - // If the gas price or gas limit was specified in the request, retry the transaction - // with the block's gas limit to determine if the failure was due to - // insufficient gas. - Err(err) - if err.is_gas_too_high() && - (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => - { - return Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) - } - // Propagate other results (successful or other errors). - ethres => ethres?, - }; + let (mut res, (mut evm_env, mut tx_env)) = + match self.transact(&mut db, evm_env.clone(), tx_env.clone()) { + // Handle the exceptional case where the transaction initialization uses too much + // gas. If the gas price or gas limit was specified in the request, + // retry the transaction with the block's gas limit to determine if + // the failure was due to insufficient gas. + Err(err) + if err.is_gas_too_high() && + (tx_request_gas_limit.is_some() || tx_request_gas_price.is_some()) => + { + return Err(self.map_out_of_gas_err( + block_env_gas_limit, + evm_env, + tx_env, + &mut db, + )) + } + // Propagate other results (successful or other errors). + ethres => ethres?, + }; let gas_refund = match res.result { ExecutionResult::Success { gas_refunded, .. } => gas_refunded, @@ -151,7 +156,7 @@ pub trait EstimateCall: Call { // if price or limit was included in the request then we can execute the request // again with the block's gas limit to check if revert is gas related or not return if tx_request_gas_limit.is_some() || tx_request_gas_price.is_some() { - Err(self.map_out_of_gas_err(block_env_gas_limit, env, &mut db)) + Err(self.map_out_of_gas_err(block_env_gas_limit, evm_env, tx_env, &mut db)) } else { // the transaction did revert Err(RpcInvalidTransactionError::Revert(RevertError::new(output)).into_eth_err()) @@ -164,7 +169,7 @@ pub trait EstimateCall: Call { // we know the tx succeeded with the configured gas limit, so we can use that as the // highest, in case we applied a gas cap due to caller allowance above - highest_gas_limit = env.tx.gas_limit; + highest_gas_limit = tx_env.gas_limit; // NOTE: this is the gas the transaction used, which is less than the // transaction requires to succeed. @@ -181,10 +186,10 @@ pub trait EstimateCall: Call { let optimistic_gas_limit = (gas_used + gas_refund + CALL_STIPEND_GAS) * 64 / 63; if optimistic_gas_limit < highest_gas_limit { // Set the transaction's gas limit to the calculated optimistic gas limit. - env.tx.gas_limit = optimistic_gas_limit; + tx_env.gas_limit = optimistic_gas_limit; // Re-execute the transaction with the new gas limit and update the result and // environment. - (res, env) = self.transact(&mut db, env)?; + (res, (evm_env, tx_env)) = self.transact(&mut db, evm_env, tx_env)?; // Update the gas used based on the new result. gas_used = res.result.gas_used(); // Update the gas limit estimates (highest and lowest) based on the execution result. @@ -202,7 +207,7 @@ pub trait EstimateCall: Call { ((highest_gas_limit as u128 + lowest_gas_limit as u128) / 2) as u64, ); - trace!(target: "rpc::eth::estimate", ?env, ?highest_gas_limit, ?lowest_gas_limit, ?mid_gas_limit, "Starting binary search for gas"); + trace!(target: "rpc::eth::estimate", ?evm_env, ?tx_env, ?highest_gas_limit, ?lowest_gas_limit, ?mid_gas_limit, "Starting binary search for gas"); // Binary search narrows the range to find the minimum gas limit needed for the transaction // to succeed. @@ -216,10 +221,10 @@ pub trait EstimateCall: Call { break }; - env.tx.gas_limit = mid_gas_limit; + tx_env.gas_limit = mid_gas_limit; // Execute transaction and handle potential gas errors, adjusting limits accordingly. - match self.transact(&mut db, env.clone()) { + match self.transact(&mut db, evm_env.clone(), tx_env.clone()) { Err(err) if err.is_gas_too_high() => { // Decrease the highest gas limit if gas is too high highest_gas_limit = mid_gas_limit; @@ -231,7 +236,7 @@ pub trait EstimateCall: Call { // Handle other cases, including successful transactions. ethres => { // Unpack the result and environment if the transaction was successful. - (res, env) = ethres?; + (res, (evm_env, tx_env)) = ethres?; // Update the estimated gas range based on the transaction result. update_estimated_gas_range( res.result, @@ -261,18 +266,10 @@ pub trait EstimateCall: Call { { async move { let (evm_env, at) = self.evm_env_at(at).await?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(at)?; - EstimateCall::estimate_gas_with( - &this, - cfg_env_with_handler_cfg, - block_env, - request, - state, - state_override, - ) + EstimateCall::estimate_gas_with(&this, evm_env, request, state, state_override) }) .await } @@ -284,16 +281,17 @@ pub trait EstimateCall: Call { fn map_out_of_gas_err( &self, env_gas_limit: U256, - mut env: EnvWithHandlerCfg, + evm_env: EvmEnv, + mut tx_env: TxEnv, db: &mut DB, ) -> Self::Error where DB: Database, EthApiError: From, { - let req_gas_limit = env.tx.gas_limit; - env.tx.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); - let (res, _) = match self.transact(db, env) { + let req_gas_limit = tx_env.gas_limit; + tx_env.gas_limit = env_gas_limit.try_into().unwrap_or(u64::MAX); + let (res, _) = match self.transact(db, evm_env, tx_env) { Ok(res) => res, Err(err) => return err, }; diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index ccb0cd84fec2c..810a49f07ac3f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -1,7 +1,7 @@ //! Loads a pending block from database. Helper trait for `eth_` call and trace RPC methods. use super::{Call, LoadBlock, LoadPendingBlock, LoadState, LoadTransaction}; -use crate::{FromEvmError, RpcNodeCore}; +use crate::FromEvmError; use alloy_consensus::BlockHeader; use alloy_primitives::B256; use alloy_rpc_types_eth::{BlockId, TransactionInfo}; @@ -19,7 +19,7 @@ use reth_rpc_eth_types::{ use revm::{db::CacheDB, Database, DatabaseCommit, GetInspector, Inspector}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use revm_primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState, + CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg, EvmState, ExecutionResult, ResultAndState, TxEnv, }; use std::{fmt::Display, sync::Arc}; @@ -38,38 +38,25 @@ pub trait Trace: fn inspect( &self, db: DB, - env: EnvWithHandlerCfg, + evm_env: EvmEnv, + tx_env: TxEnv, inspector: I, - ) -> Result<(ResultAndState, EnvWithHandlerCfg), Self::Error> + ) -> Result<(ResultAndState, (EvmEnv, TxEnv)), Self::Error> where DB: Database, EthApiError: From, I: GetInspector, { - self.inspect_and_return_db(db, env, inspector).map(|(res, env, _)| (res, env)) - } - - /// Same as [`inspect`](Self::inspect) but also returns the database again. - /// - /// Even though [Database] is also implemented on `&mut` - /// this is still useful if there are certain trait bounds on the Inspector's database generic - /// type - fn inspect_and_return_db( - &self, - db: DB, - env: EnvWithHandlerCfg, - inspector: I, - ) -> Result<(ResultAndState, EnvWithHandlerCfg, DB), Self::Error> - where - DB: Database, - EthApiError: From, - - I: GetInspector, - { - let mut evm = self.evm_config().evm_with_env_and_inspector(db, env, inspector); + let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, tx_env, inspector); let res = evm.transact().map_err(Self::Error::from_evm_err)?; - let (db, env) = evm.into_db_and_env_with_handler_cfg(); - Ok((res, env, db)) + let (_, env) = evm.into_db_and_env_with_handler_cfg(); + let EnvWithHandlerCfg { env, handler_cfg } = env; + let Env { cfg, block, tx } = *env; + let evm_env = EvmEnv { + cfg_env_with_handler_cfg: CfgEnvWithHandlerCfg { cfg_env: cfg, handler_cfg }, + block_env: block, + }; + Ok((res, (evm_env, tx))) } /// Executes the transaction on top of the given [`BlockId`] with a tracer configured by the @@ -81,7 +68,8 @@ pub trait Trace: /// Caution: this is blocking fn trace_at( &self, - env: EnvWithHandlerCfg, + evm_env: EvmEnv, + tx_env: TxEnv, config: TracingInspectorConfig, at: BlockId, f: F, @@ -93,7 +81,7 @@ pub trait Trace: self.with_state_at_block(at, |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = self.inspect(&mut db, env, &mut inspector)?; + let (res, _) = self.inspect(&mut db, evm_env, tx_env, &mut inspector)?; f(inspector, res) }) } @@ -107,7 +95,8 @@ pub trait Trace: /// the configured [`EnvWithHandlerCfg`] was inspected. fn spawn_trace_at_with_state( &self, - env: EnvWithHandlerCfg, + evm_env: EvmEnv, + tx_env: TxEnv, config: TracingInspectorConfig, at: BlockId, f: F, @@ -123,7 +112,7 @@ pub trait Trace: self.spawn_with_state_at_block(at, move |state| { let mut db = CacheDB::new(StateProviderDatabase::new(state)); let mut inspector = TracingInspector::new(config); - let (res, _) = this.inspect(&mut db, env, &mut inspector)?; + let (res, _) = this.inspect(&mut db, evm_env, tx_env, &mut inspector)?; f(inspector, res, db) }) } @@ -194,7 +183,6 @@ pub trait Trace: let (tx, tx_info) = transaction.split(); let (evm_env, _) = self.evm_env_at(block.hash().into()).await?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in @@ -205,29 +193,18 @@ pub trait Trace: let mut db = CacheDB::new(StateProviderDatabase::new(state)); let block_txs = block.transactions_with_sender(); - this.apply_pre_execution_changes( - &block, - &mut db, - &cfg_env_with_handler_cfg, - &block_env, - )?; + this.apply_pre_execution_changes(&block, &mut db, &evm_env)?; // replay all transactions prior to the targeted transaction - this.replay_transactions_until( - &mut db, - cfg_env_with_handler_cfg.clone(), - block_env.clone(), - block_txs, - *tx.tx_hash(), + this.replay_transactions_until(&mut db, evm_env.clone(), block_txs, *tx.tx_hash())?; + + let tx_env = this.evm_config().tx_env(tx.tx(), tx.signer()); + let (res, _) = this.inspect( + StateCacheDbRefMutWrapper(&mut db), + evm_env, + tx_env, + &mut inspector, )?; - - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg, - block_env, - RpcNodeCore::evm_config(&this).tx_env(tx.tx(), tx.signer()), - ); - let (res, _) = - this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; f(tx_info, inspector, res, db) }) .await @@ -314,8 +291,6 @@ pub trait Trace: let ((evm_env, _), block) = futures::try_join!(self.evm_env_at(block_id), block)?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; - let Some(block) = block else { return Ok(None) }; if block.body().transactions().is_empty() { @@ -330,20 +305,15 @@ pub trait Trace: let state_at = block.parent_hash(); let block_hash = block.hash(); - let block_number = block_env.number.saturating_to::(); - let base_fee = block_env.basefee.saturating_to::(); + let block_number = evm_env.block_env.number.saturating_to::(); + let base_fee = evm_env.block_env.basefee.saturating_to::(); // now get the state let state = this.state_at_block_id(state_at.into())?; let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); - this.apply_pre_execution_changes( - &block, - &mut db, - &cfg_env_with_handler_cfg, - &block_env, - )?; + this.apply_pre_execution_changes(&block, &mut db, &evm_env)?; // prepare transactions, we do everything upfront to reduce time spent with open // state @@ -372,15 +342,13 @@ pub trait Trace: .peekable(); while let Some((tx_info, tx)) = transactions.next() { - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg.clone(), - block_env.clone(), - tx, - ); - let mut inspector = inspector_setup(); - let (res, _) = - this.inspect(StateCacheDbRefMutWrapper(&mut db), env, &mut inspector)?; + let (res, _) = this.inspect( + StateCacheDbRefMutWrapper(&mut db), + evm_env.clone(), + tx, + &mut inspector, + )?; let ResultAndState { result, state } = res; results.push(f(tx_info, inspector, result, &state, &db)?); @@ -483,8 +451,7 @@ pub trait Trace: &self, block: &RecoveredBlock>, db: &mut DB, - cfg: &CfgEnvWithHandlerCfg, - block_env: &BlockEnv, + evm_env: &EvmEnv, ) -> Result<(), Self::Error> { let mut system_caller = SystemCaller::new(self.evm_config().clone(), self.provider().chain_spec()); @@ -492,14 +459,19 @@ pub trait Trace: system_caller .pre_block_beacon_root_contract_call( db, - cfg, - block_env, + evm_env.cfg_env_with_handler_cfg(), + evm_env.block_env(), block.parent_beacon_block_root(), ) .map_err(|_| EthApiError::EvmCustom("failed to apply 4788 system call".to_string()))?; system_caller - .pre_block_blockhashes_contract_call(db, cfg, block_env, block.parent_hash()) + .pre_block_blockhashes_contract_call( + db, + evm_env.cfg_env_with_handler_cfg(), + evm_env.block_env(), + block.parent_hash(), + ) .map_err(|_| { EthApiError::EvmCustom("failed to apply blockhashes system call".to_string()) })?; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index ea79f0580dbe7..efcb040fb3d7f 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -37,11 +37,12 @@ use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::{CacheDB, State}, - primitives::{db::DatabaseCommit, BlockEnv, CfgEnvWithHandlerCfg, Env, EnvWithHandlerCfg}, + primitives::{db::DatabaseCommit, Env}, }; use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, }; +use revm_primitives::TxEnv; use std::sync::Arc; use tokio::sync::{AcquireError, OwnedSemaphorePermit}; @@ -95,8 +96,7 @@ where async fn trace_block( &self, block: Arc>>, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + evm_env: EvmEnv, opts: GethDebugTracingOptions, ) -> Result, Eth::Error> { // replay all transactions of the block @@ -106,24 +106,19 @@ where let mut results = Vec::with_capacity(block.body().transactions().len()); let mut db = CacheDB::new(StateProviderDatabase::new(state)); - this.eth_api().apply_pre_execution_changes(&block, &mut db, &cfg, &block_env)?; + this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?; let mut transactions = block.transactions_with_sender().enumerate().peekable(); let mut inspector = None; while let Some((index, (signer, tx))) = transactions.next() { let tx_hash = *tx.tx_hash(); - let env = EnvWithHandlerCfg { - env: Env::boxed( - cfg.cfg_env.clone(), - block_env.clone(), - this.eth_api().evm_config().tx_env(tx, *signer), - ), - handler_cfg: cfg.handler_cfg, - }; + let tx_env = this.eth_api().evm_config().tx_env(tx, *signer); + let (result, state_changes) = this.trace_transaction( &opts, - env, + evm_env.clone(), + tx_env, &mut db, Some(TransactionContext { block_hash: Some(block.hash()), @@ -162,8 +157,7 @@ where .map_err(BlockError::RlpDecodeRawBlock) .map_err(Eth::Error::from_eth_err)?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = - self.eth_api().evm_config().cfg_and_block_env(block.header()); + let evm_env = self.eth_api().evm_config().cfg_and_block_env(block.header()); // Depending on EIP-2 we need to recover the transactions differently let senders = @@ -191,13 +185,7 @@ where .collect::, Eth::Error>>()? }; - self.trace_block( - Arc::new(block.with_senders_unchecked(senders)), - cfg_env_with_handler_cfg, - block_env, - opts, - ) - .await + self.trace_block(Arc::new(block.with_senders_unchecked(senders)), evm_env, opts).await } /// Replays a block and returns the trace of each transaction. @@ -216,11 +204,10 @@ where self.eth_api().evm_env_at(block_hash.into()), self.eth_api().block_with_senders(block_hash.into()), )?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; let block = block.ok_or(EthApiError::HeaderNotFound(block_id))?; - self.trace_block(block, cfg_env_with_handler_cfg, block_env, opts).await + self.trace_block(block, evm_env, opts).await } /// Trace the transaction according to the provided options. @@ -236,7 +223,6 @@ where Some(res) => res, }; let (evm_env, _) = self.eth_api().evm_env_at(block.hash().into()).await?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in @@ -253,34 +239,22 @@ where let mut db = CacheDB::new(StateProviderDatabase::new(state)); - this.eth_api().apply_pre_execution_changes( - &block, - &mut db, - &cfg_env_with_handler_cfg, - &block_env, - )?; + this.eth_api().apply_pre_execution_changes(&block, &mut db, &evm_env)?; // replay all transactions prior to the targeted transaction let index = this.eth_api().replay_transactions_until( &mut db, - cfg_env_with_handler_cfg.clone(), - block_env.clone(), + evm_env.clone(), block_txs, *tx.tx_hash(), )?; - let env = EnvWithHandlerCfg { - env: Env::boxed( - cfg_env_with_handler_cfg.cfg_env.clone(), - block_env, - this.eth_api().evm_config().tx_env(tx.tx(), tx.signer()), - ), - handler_cfg: cfg_env_with_handler_cfg.handler_cfg, - }; + let tx_env = this.eth_api().evm_config().tx_env(tx.tx(), tx.signer()); this.trace_transaction( &opts, - env, + evm_env, + tx_env, &mut db, Some(TransactionContext { block_hash: Some(block_hash), @@ -319,8 +293,8 @@ where let mut inspector = FourByteInspector::default(); let inspector = self .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, env| { - this.eth_api().inspect(db, env, &mut inspector)?; + .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { + this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; Ok(inspector) }) .await?; @@ -337,10 +311,11 @@ where let frame = self .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, env) = this.eth_api().inspect(db, env, &mut inspector)?; + .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { + let (res, (_, tx_env)) = + this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) + .with_transaction_gas_limit(tx_env.gas_limit) .into_geth_builder() .geth_call_traces(call_config, res.result.gas_used()); Ok(frame.into()) @@ -358,15 +333,19 @@ where let frame = self .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, env| { + .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { // wrapper is hack to get around 'higher-ranked lifetime error', // see let db = db.0; - let (res, env) = - this.eth_api().inspect(&mut *db, env, &mut inspector)?; + let (res, (_, tx_env)) = this.eth_api().inspect( + &mut *db, + evm_env, + tx_env, + &mut inspector, + )?; let frame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) + .with_transaction_gas_limit(tx_env.gas_limit) .into_geth_builder() .geth_prestate_traces(&res, &prestate_config, db) .map_err(Eth::Error::from_eth_err)?; @@ -387,25 +366,29 @@ where let frame = self .inner .eth_api - .spawn_with_call_at(call, at, overrides, move |db, env| { + .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { // wrapper is hack to get around 'higher-ranked lifetime error', see // let db = db.0; let tx_info = TransactionInfo { block_number: Some( - env.block.number.try_into().unwrap_or_default(), + evm_env.block_env.number.try_into().unwrap_or_default(), ), base_fee: Some( - env.block.basefee.try_into().unwrap_or_default(), + evm_env.block_env.basefee.try_into().unwrap_or_default(), ), hash: None, block_hash: None, index: None, }; - let (res, _) = - this.eth_api().inspect(&mut *db, env, &mut inspector)?; + let (res, _) = this.eth_api().inspect( + &mut *db, + evm_env, + tx_env, + &mut inspector, + )?; let frame = inspector .try_into_mux_frame(&res, db, tx_info) .map_err(Eth::Error::from_eth_err)?; @@ -426,12 +409,12 @@ where let frame: FlatCallFrame = self .inner .eth_api - .spawn_with_call_at(call, at, overrides, move |db, env| { - let (_res, env) = - this.eth_api().inspect(db, env, &mut inspector)?; + .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { + let (_res, (_, tx_env)) = + this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; let tx_info = TransactionInfo::default(); let frame: FlatCallFrame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) + .with_transaction_gas_limit(tx_env.gas_limit) .into_parity_builder() .into_localized_transaction_traces(tx_info); Ok(frame) @@ -453,7 +436,7 @@ where let res = self .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, env| { + .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { // wrapper is hack to get around 'higher-ranked lifetime error', see // let db = db.0; @@ -461,8 +444,17 @@ where let mut inspector = revm_inspectors::tracing::js::JsInspector::new(code, config) .map_err(Eth::Error::from_eth_err)?; - let (res, _) = - this.eth_api().inspect(&mut *db, env.clone(), &mut inspector)?; + let (res, _) = this.eth_api().inspect( + &mut *db, + evm_env.clone(), + tx_env.clone(), + &mut inspector, + )?; + let env = Env::boxed( + evm_env.cfg_env_with_handler_cfg.cfg_env, + evm_env.block_env, + tx_env, + ); inspector.json_result(res, &env, db).map_err(Eth::Error::from_eth_err) }) .await?; @@ -479,9 +471,10 @@ where let (res, tx_gas_limit, inspector) = self .eth_api() - .spawn_with_call_at(call, at, overrides, move |db, env| { - let (res, env) = this.eth_api().inspect(db, env, &mut inspector)?; - Ok((res, env.tx.gas_limit, inspector)) + .spawn_with_call_at(call, at, overrides, move |db, evm_env, tx_env| { + let (res, (_, tx_env)) = + this.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; + Ok((res, tx_env.gas_limit, inspector)) }) .await?; let gas_used = res.result.gas_used(); @@ -511,11 +504,10 @@ where let transaction_index = transaction_index.unwrap_or_default(); let target_block = block_number.unwrap_or_default(); - let ((evm_env, _), block) = futures::try_join!( + let ((mut evm_env, _), block) = futures::try_join!( self.eth_api().evm_env_at(target_block), self.eth_api().block_with_senders(target_block), )?; - let EvmEnv { cfg_env_with_handler_cfg, mut block_env } = evm_env; let opts = opts.unwrap_or_default(); let block = block.ok_or(EthApiError::HeaderNotFound(target_block))?; @@ -552,15 +544,8 @@ where // Execute all transactions until index for (signer, tx) in transactions { - let env = EnvWithHandlerCfg { - env: Env::boxed( - cfg_env_with_handler_cfg.cfg_env.clone(), - block_env.clone(), - this.eth_api().evm_config().tx_env(tx, *signer), - ), - handler_cfg: cfg_env_with_handler_cfg.handler_cfg, - }; - let (res, _) = this.eth_api().transact(&mut db, env)?; + let tx_env = this.eth_api().evm_config().tx_env(tx, *signer); + let (res, _) = this.eth_api().transact(&mut db, evm_env.clone(), tx_env)?; db.commit(res.state); } } @@ -580,9 +565,8 @@ where let state_overrides = state_overrides.take(); let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); - let env = this.eth_api().prepare_call_env( - cfg_env_with_handler_cfg.clone(), - block_env.clone(), + let (evm_env, tx_env) = this.eth_api().prepare_call_env( + evm_env.clone(), tx, &mut db, overrides, @@ -590,7 +574,8 @@ where let (trace, state) = this.trace_transaction( &tracing_options, - env, + evm_env, + tx_env, &mut db, None, &mut inspector, @@ -606,8 +591,8 @@ where results.push(trace); } // Increment block_env number and timestamp for the next bundle - block_env.number += U256::from(1); - block_env.timestamp += U256::from(12); + evm_env.block_env.number += U256::from(1); + evm_env.block_env.timestamp += U256::from(12); all_bundles.push(results); } @@ -670,7 +655,8 @@ where fn trace_transaction( &self, opts: &GethDebugTracingOptions, - env: EnvWithHandlerCfg, + evm_env: EvmEnv, + tx_env: TxEnv, db: &mut StateCacheDb<'_>, transaction_context: Option, fused_inspector: &mut Option, @@ -684,8 +670,8 @@ where .map(|c| c.tx_index.map(|i| i as u64)) .unwrap_or_default(), block_hash: transaction_context.as_ref().map(|c| c.block_hash).unwrap_or_default(), - block_number: Some(env.block.number.try_into().unwrap_or_default()), - base_fee: Some(env.block.basefee.try_into().unwrap_or_default()), + block_number: Some(evm_env.block_env.number.try_into().unwrap_or_default()), + base_fee: Some(evm_env.block_env.basefee.try_into().unwrap_or_default()), }; if let Some(tracer) = tracer { @@ -693,7 +679,8 @@ where GethDebugTracerType::BuiltInTracer(tracer) => match tracer { GethDebugBuiltInTracerType::FourByteTracer => { let mut inspector = FourByteInspector::default(); - let (res, _) = self.eth_api().inspect(db, env, &mut inspector)?; + let (res, _) = + self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; return Ok((FourByteFrame::from(&inspector).into(), res.state)) } GethDebugBuiltInTracerType::CallTracer => { @@ -708,9 +695,10 @@ where )) }); - let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + let (res, (_, tx_env)) = + self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; - inspector.set_transaction_gas_limit(env.tx.gas_limit); + inspector.set_transaction_gas_limit(tx_env.gas_limit); let frame = inspector .geth_builder() @@ -729,9 +717,10 @@ where TracingInspectorConfig::from_geth_prestate_config(&prestate_config), ) }); - let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + let (res, (_, tx_env)) = + self.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?; - inspector.set_transaction_gas_limit(env.tx.gas_limit); + inspector.set_transaction_gas_limit(tx_env.gas_limit); let frame = inspector .geth_builder() .geth_prestate_traces(&res, &prestate_config, db) @@ -751,7 +740,8 @@ where let mut inspector = MuxInspector::try_from_config(mux_config) .map_err(Eth::Error::from_eth_err)?; - let (res, _) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + let (res, _) = + self.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?; let frame = inspector .try_into_mux_frame(&res, db, tx_info) .map_err(Eth::Error::from_eth_err)?; @@ -767,9 +757,10 @@ where TracingInspectorConfig::from_flat_call_config(&flat_call_config), ); - let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + let (res, (_, tx_env)) = + self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; let frame: FlatCallFrame = inspector - .with_transaction_gas_limit(env.tx.gas_limit) + .with_transaction_gas_limit(tx_env.gas_limit) .into_parity_builder() .into_localized_transaction_traces(tx_info); @@ -790,9 +781,15 @@ where transaction_context.unwrap_or_default(), ) .map_err(Eth::Error::from_eth_err)?; - let (res, env) = self.eth_api().inspect(&mut *db, env, &mut inspector)?; + let (res, (evm_env, tx_env)) = + self.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?; let state = res.state.clone(); + let env = Env::boxed( + evm_env.cfg_env_with_handler_cfg.cfg_env, + evm_env.block_env, + tx_env, + ); let result = inspector.json_result(res, &env, db).map_err(Eth::Error::from_eth_err)?; Ok((GethTrace::JS(result), state)) @@ -805,10 +802,10 @@ where let inspector_config = TracingInspectorConfig::from_geth_config(config); TracingInspector::new(inspector_config) }); - let (res, env) = self.eth_api().inspect(db, env, &mut inspector)?; + let (res, (_, tx_env)) = self.eth_api().inspect(db, evm_env, tx_env, &mut inspector)?; let gas_used = res.result.gas_used(); let return_value = res.result.into_output().unwrap_or_default(); - inspector.set_transaction_gas_limit(env.tx.gas_limit); + inspector.set_transaction_gas_limit(tx_env.gas_limit); let frame = inspector.geth_builder().geth_traces(gas_used, return_value, *config); Ok((frame.into(), res.state)) diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 3cc1c1391638e..e44c5634d10bd 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -6,7 +6,7 @@ use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; -use reth_evm::{env::EvmEnv, ConfigureEvm, ConfigureEvmEnv}; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_primitives_traits::SignedTransaction; use reth_provider::{ChainSpecProvider, HeaderProvider}; use reth_revm::database::StateProviderDatabase; @@ -21,9 +21,9 @@ use reth_transaction_pool::{ }; use revm::{ db::{CacheDB, DatabaseCommit, DatabaseRef}, - primitives::{ResultAndState, TxEnv}, + primitives::ResultAndState, }; -use revm_primitives::{EnvKzgSettings, EnvWithHandlerCfg, SpecId}; +use revm_primitives::{EnvKzgSettings, SpecId}; use std::sync::Arc; /// `Eth` bundle implementation. @@ -101,40 +101,40 @@ where let block_id: alloy_rpc_types_eth::BlockId = state_block_number.into(); // Note: the block number is considered the `parent` block: - let (evm_env, at) = self.eth_api().evm_env_at(block_id).await?; - let EvmEnv { cfg_env_with_handler_cfg, mut block_env } = evm_env; + let (mut evm_env, at) = self.eth_api().evm_env_at(block_id).await?; if let Some(coinbase) = coinbase { - block_env.coinbase = coinbase; + evm_env.block_env.coinbase = coinbase; } // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { - block_env.timestamp = U256::from(timestamp); + evm_env.block_env.timestamp = U256::from(timestamp); } else { - block_env.timestamp += U256::from(12); + evm_env.block_env.timestamp += U256::from(12); } if let Some(difficulty) = difficulty { - block_env.difficulty = U256::from(difficulty); + evm_env.block_env.difficulty = U256::from(difficulty); } // default to call gas limit unless user requests a smaller limit - block_env.gas_limit = U256::from(self.inner.eth_api.call_gas_limit()); + evm_env.block_env.gas_limit = U256::from(self.inner.eth_api.call_gas_limit()); if let Some(gas_limit) = gas_limit { let gas_limit = U256::from(gas_limit); - if gas_limit > block_env.gas_limit { + if gas_limit > evm_env.block_env.gas_limit { return Err( EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() ) } - block_env.gas_limit = gas_limit; + evm_env.block_env.gas_limit = gas_limit; } if let Some(base_fee) = base_fee { - block_env.basefee = U256::from(base_fee); - } else if cfg_env_with_handler_cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { - let parent_block = block_env.number.saturating_to::(); + evm_env.block_env.basefee = U256::from(base_fee); + } else if evm_env.cfg_env_with_handler_cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) + { + let parent_block = evm_env.block_env.number.saturating_to::(); // here we need to fetch the _next_ block's basefee based on the parent block let parent = RpcNodeCore::provider(self.eth_api()) .header_by_number(parent_block) @@ -145,25 +145,20 @@ where .chain_spec() .base_fee_params_at_block(parent_block), ) { - block_env.basefee = U256::from(base_fee); + evm_env.block_env.basefee = U256::from(base_fee); } } - let state_block_number = block_env.number; + let state_block_number = evm_env.block_env.number; // use the block number of the request - block_env.number = U256::from(block_number); + evm_env.block_env.number = U256::from(block_number); let eth_api = self.eth_api().clone(); self.eth_api() .spawn_with_state_at_block(at, move |state| { - let coinbase = block_env.coinbase; - let basefee = Some(block_env.basefee.to::()); - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg, - block_env, - TxEnv::default(), - ); + let coinbase = evm_env.block_env.coinbase; + let basefee = Some(evm_env.block_env.basefee.to::()); let db = CacheDB::new(StateProviderDatabase::new(state)); let initial_coinbase = db @@ -177,7 +172,7 @@ where let mut total_gas_fess = U256::ZERO; let mut hasher = Keccak256::new(); - let mut evm = eth_api.evm_config().evm_with_env(db, env); + let mut evm = eth_api.evm_config().evm_with_env(db, evm_env, Default::default()); let mut results = Vec::with_capacity(transactions.len()); let mut transactions = transactions.into_iter().peekable(); diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index de2597b64a3fd..1c24a5f4a4b8e 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -10,7 +10,7 @@ use alloy_rpc_types_mev::{ }; use jsonrpsee::core::RpcResult; use reth_chainspec::EthChainSpec; -use reth_evm::{env::EvmEnv, ConfigureEvm, ConfigureEvmEnv}; +use reth_evm::{ConfigureEvm, ConfigureEvmEnv}; use reth_provider::{ChainSpecProvider, HeaderProvider, ProviderTx}; use reth_revm::database::StateProviderDatabase; use reth_rpc_api::MevSimApiServer; @@ -23,7 +23,7 @@ use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolConsensusTx, PoolPooledTx, PoolTransaction, TransactionPool}; use revm::{ db::CacheDB, - primitives::{Address, EnvWithHandlerCfg, ResultAndState, SpecId, TxEnv}, + primitives::{Address, ResultAndState, SpecId}, DatabaseCommit, DatabaseRef, }; use std::{sync::Arc, time::Duration}; @@ -244,42 +244,44 @@ where let flattened_bundle = self.parse_and_flatten_bundle(&request)?; let block_id = parent_block.unwrap_or(BlockId::Number(BlockNumberOrTag::Pending)); - let (evm_env, current_block) = self.eth_api().evm_env_at(block_id).await?; - let EvmEnv { cfg_env_with_handler_cfg, mut block_env } = evm_env; + let (mut evm_env, current_block) = self.eth_api().evm_env_at(block_id).await?; let parent_header = RpcNodeCore::provider(&self.inner.eth_api) - .header_by_number(block_env.number.saturating_to::()) + .header_by_number(evm_env.block_env.number.saturating_to::()) .map_err(EthApiError::from_eth_err)? // Explicitly map the error .ok_or_else(|| { - EthApiError::HeaderNotFound((block_env.number.saturating_to::()).into()) + EthApiError::HeaderNotFound( + (evm_env.block_env.number.saturating_to::()).into(), + ) })?; // apply overrides if let Some(block_number) = block_number { - block_env.number = U256::from(block_number); + evm_env.block_env.number = U256::from(block_number); } if let Some(coinbase) = coinbase { - block_env.coinbase = coinbase; + evm_env.block_env.coinbase = coinbase; } if let Some(timestamp) = timestamp { - block_env.timestamp = U256::from(timestamp); + evm_env.block_env.timestamp = U256::from(timestamp); } if let Some(gas_limit) = gas_limit { - block_env.gas_limit = U256::from(gas_limit); + evm_env.block_env.gas_limit = U256::from(gas_limit); } if let Some(base_fee) = base_fee { - block_env.basefee = U256::from(base_fee); - } else if cfg_env_with_handler_cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { + evm_env.block_env.basefee = U256::from(base_fee); + } else if evm_env.cfg_env_with_handler_cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) + { if let Some(base_fee) = parent_header.next_block_base_fee( RpcNodeCore::provider(&self.inner.eth_api) .chain_spec() - .base_fee_params_at_block(block_env.number.saturating_to::()), + .base_fee_params_at_block(evm_env.block_env.number.saturating_to::()), ) { - block_env.basefee = U256::from(base_fee); + evm_env.block_env.basefee = U256::from(base_fee); } } @@ -291,13 +293,8 @@ where .spawn_with_state_at_block(current_block, move |state| { // Setup environment let current_block_number = current_block.as_u64().unwrap(); - let coinbase = block_env.coinbase; - let basefee = block_env.basefee; - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg, - block_env, - TxEnv::default(), - ); + let coinbase = evm_env.block_env.coinbase; + let basefee = evm_env.block_env.basefee; let db = CacheDB::new(StateProviderDatabase::new(state)); let initial_coinbase_balance = DatabaseRef::basic_ref(&db, coinbase) @@ -311,7 +308,7 @@ where let mut refundable_value = U256::ZERO; let mut body_logs: Vec = Vec::new(); - let mut evm = eth_api.evm_config().evm_with_env(db, env); + let mut evm = eth_api.evm_config().evm_with_env(db, evm_env, Default::default()); for item in &flattened_bundle { // Check inclusion constraints diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index d906419021b90..b7bf38eb608d6 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -16,7 +16,7 @@ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::{EthChainSpec, EthereumHardfork, MAINNET, SEPOLIA}; use reth_consensus_common::calc::{base_block_reward_pre_merge, block_reward, ommer_reward}; -use reth_evm::{env::EvmEnv, ConfigureEvmEnv}; +use reth_evm::ConfigureEvmEnv; use reth_primitives_traits::{BlockBody, BlockHeader}; use reth_provider::{BlockNumReader, BlockReader, ChainSpecProvider}; use reth_revm::database::StateProviderDatabase; @@ -25,10 +25,7 @@ use reth_rpc_eth_api::{helpers::TraceExt, FromEthApiError, RpcNodeCore}; use reth_rpc_eth_types::{error::EthApiError, utils::recover_raw_transaction}; use reth_tasks::pool::BlockingTaskGuard; use reth_transaction_pool::{PoolPooledTx, PoolTransaction, TransactionPool}; -use revm::{ - db::{CacheDB, DatabaseCommit}, - primitives::EnvWithHandlerCfg, -}; +use revm::db::{CacheDB, DatabaseCommit}; use revm_inspectors::{ opcode::OpcodeGasInspector, tracing::{parity::populate_state_diff, TracingInspector, TracingInspectorConfig}, @@ -90,12 +87,12 @@ where let mut inspector = TracingInspector::new(config); let this = self.clone(); self.eth_api() - .spawn_with_call_at(trace_request.call, at, overrides, move |db, env| { + .spawn_with_call_at(trace_request.call, at, overrides, move |db, evm_env, tx_env| { // wrapper is hack to get around 'higher-ranked lifetime error', see // let db = db.0; - let (res, _) = this.eth_api().inspect(&mut *db, env, &mut inspector)?; + let (res, _) = this.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?; let trace_res = inspector .into_parity_builder() .into_trace_results_with_state(&res, &trace_request.trace_types, &db) @@ -116,18 +113,12 @@ where .map_transaction(::Transaction::pooled_into_consensus); let (evm_env, at) = self.eth_api().evm_env_at(block_id.unwrap_or_default()).await?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; - - let env = EnvWithHandlerCfg::new_with_cfg_env( - cfg_env_with_handler_cfg, - block_env, - self.eth_api().evm_config().tx_env(tx.tx(), tx.signer()), - ); + let tx_env = self.eth_api().evm_config().tx_env(tx.tx(), tx.signer()); let config = TracingInspectorConfig::from_parity_config(&trace_types); self.eth_api() - .spawn_trace_at_with_state(env, config, at, move |inspector, res, db| { + .spawn_trace_at_with_state(evm_env, tx_env, config, at, move |inspector, res, db| { inspector .into_parity_builder() .into_trace_results_with_state(&res, &trace_types, &db) @@ -147,7 +138,6 @@ where ) -> Result, Eth::Error> { let at = block_id.unwrap_or(BlockId::pending()); let (evm_env, at) = self.eth_api().evm_env_at(at).await?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = evm_env; let this = self.clone(); // execute all transactions on top of each other and record the traces @@ -159,16 +149,16 @@ where let mut calls = calls.into_iter().peekable(); while let Some((call, trace_types)) = calls.next() { - let env = this.eth_api().prepare_call_env( - cfg_env_with_handler_cfg.clone(), - block_env.clone(), + let (evm_env, tx_env) = this.eth_api().prepare_call_env( + evm_env.clone(), call, &mut db, Default::default(), )?; let config = TracingInspectorConfig::from_parity_config(&trace_types); let mut inspector = TracingInspector::new(config); - let (res, _) = this.eth_api().inspect(&mut db, env, &mut inspector)?; + let (res, _) = + this.eth_api().inspect(&mut db, evm_env, tx_env, &mut inspector)?; let trace_res = inspector .into_parity_builder() diff --git a/examples/custom-inspector/Cargo.toml b/examples/custom-inspector/Cargo.toml index ee6f887e64c0d..5d9753da7253a 100644 --- a/examples/custom-inspector/Cargo.toml +++ b/examples/custom-inspector/Cargo.toml @@ -7,7 +7,9 @@ license.workspace = true [dependencies] reth.workspace = true +reth-evm.workspace = true reth-node-ethereum.workspace = true +revm-primitives.workspace = true alloy-rpc-types-eth.workspace = true clap = { workspace = true, features = ["derive"] } futures-util.workspace = true diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 6b25c46b76ca3..5df076008bdd1 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -22,12 +22,15 @@ use reth::{ revm::{ inspector_handle_register, interpreter::{Interpreter, OpCode}, + primitives::{Env, EnvWithHandlerCfg}, Database, Evm, EvmContext, Inspector, }, rpc::{api::eth::helpers::Call, compat::transaction::transaction_to_call_request}, transaction_pool::TransactionPool, }; +use reth_evm::EvmEnv; use reth_node_ethereum::node::EthereumNode; +use revm_primitives::CfgEnvWithHandlerCfg; fn main() { Cli::::parse() @@ -61,7 +64,16 @@ fn main() { call_request, BlockNumberOrTag::Latest.into(), EvmOverrides::default(), - move |db, env| { + move |db, evm_env, tx_env| { + let EvmEnv { + cfg_env_with_handler_cfg: + CfgEnvWithHandlerCfg { handler_cfg, cfg_env }, + block_env, + } = evm_env; + let env = EnvWithHandlerCfg { + handler_cfg, + env: Env::boxed(cfg_env, block_env, tx_env), + }; let mut dummy_inspector = DummyInspector::default(); { // configure the evm with the custom inspector From fd52b634ed255ddd2294d4cdd1b0dfe6f629050f Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 15 Jan 2025 22:31:16 +0400 Subject: [PATCH 076/113] chore: trim ConfigureEvm trait (#13807) --- crates/ethereum/evm/src/lib.rs | 133 +++++++-------------- crates/evm/src/builder.rs | 141 ----------------------- crates/evm/src/lib.rs | 53 +-------- crates/optimism/evm/src/lib.rs | 124 +++++--------------- examples/custom-evm/src/main.rs | 20 +++- examples/stateful-precompile/src/main.rs | 20 +++- 6 files changed, 105 insertions(+), 386 deletions(-) delete mode 100644 crates/evm/src/builder.rs diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 2aaa063d745b9..50907223a1a2f 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -26,6 +26,7 @@ use reth_chainspec::ChainSpec; use reth_evm::{env::EvmEnv, ConfigureEvm, ConfigureEvmEnv, NextBlockEnvAttributes}; use reth_primitives::TransactionSigned; use reth_primitives_traits::transaction::execute::FillTxEnv; +use reth_revm::{inspector_handle_register, EvmBuilder}; use revm_primitives::{ AnalysisKind, BlobExcessGasAndPrice, BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv, }; @@ -182,24 +183,57 @@ impl ConfigureEvmEnv for EthEvmConfig { } } -impl ConfigureEvm for EthEvmConfig {} +impl ConfigureEvm for EthEvmConfig { + fn evm_with_env( + &self, + db: DB, + evm_env: EvmEnv, + tx: TxEnv, + ) -> reth_revm::Evm<'_, (), DB> { + EvmBuilder::default() + .with_db(db) + .with_cfg_env_with_handler_cfg(evm_env.cfg_env_with_handler_cfg) + .with_block_env(evm_env.block_env) + .with_tx_env(tx) + .build() + } + + fn evm_with_env_and_inspector( + &self, + db: DB, + evm_env: EvmEnv, + tx: TxEnv, + inspector: I, + ) -> reth_revm::Evm<'_, I, DB> + where + DB: reth_revm::Database, + I: reth_revm::GetInspector, + { + EvmBuilder::default() + .with_db(db) + .with_external_context(inspector) + .with_cfg_env_with_handler_cfg(evm_env.cfg_env_with_handler_cfg) + .with_block_env(evm_env.block_env) + .with_tx_env(tx) + .append_handler_register(inspector_handle_register) + .build() + } +} #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{constants::KECCAK_EMPTY, Header}; + use alloy_consensus::Header; use alloy_genesis::Genesis; - use alloy_primitives::{B256, U256}; + use alloy_primitives::U256; use reth_chainspec::{Chain, ChainSpec, MAINNET}; use reth_evm::{env::EvmEnv, execute::ProviderError}; use reth_revm::{ db::{CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, primitives::{BlockEnv, CfgEnv, SpecId}, - JournaledState, }; use revm_primitives::HandlerCfg; - use std::collections::HashSet; #[test] fn test_fill_cfg_and_block_env() { @@ -226,45 +260,6 @@ mod tests { assert_eq!(cfg_env_with_handler_cfg.chain_id, chain_spec.chain().id()); } - #[test] - #[allow(clippy::needless_update)] - fn test_evm_configure() { - // Create a default `EthEvmConfig` - let evm_config = EthEvmConfig::new(MAINNET.clone()); - - // Initialize an empty database wrapped in CacheDB - let db = CacheDB::>::default(); - - // Create an EVM instance using the configuration and the database - let evm = evm_config.evm(db); - - // Check that the EVM environment is initialized with default values - assert_eq!(evm.context.evm.inner.env, Box::default()); - - // Latest spec ID and no warm preloaded addresses - assert_eq!( - evm.context.evm.inner.journaled_state, - JournaledState::new(SpecId::LATEST, HashSet::default()) - ); - - // Ensure that the accounts database is empty - assert!(evm.context.evm.inner.db.accounts.is_empty()); - - // Ensure that the block hashes database is empty - assert!(evm.context.evm.inner.db.block_hashes.is_empty()); - - // Verify that there are two default contracts in the contracts database - assert_eq!(evm.context.evm.inner.db.contracts.len(), 2); - assert!(evm.context.evm.inner.db.contracts.contains_key(&KECCAK_EMPTY)); - assert!(evm.context.evm.inner.db.contracts.contains_key(&B256::ZERO)); - - // Ensure that the logs database is empty - assert!(evm.context.evm.inner.db.logs.is_empty()); - - // No Optimism - assert_eq!(evm.handler.cfg, HandlerCfg { spec_id: SpecId::LATEST, ..Default::default() }); - } - #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_default_spec() { @@ -374,57 +369,15 @@ mod tests { let evm = evm_config.evm_with_env(db, evm_env, Default::default()); // Check that the spec ID is setup properly - assert_eq!(evm.handler.spec_id(), SpecId::CONSTANTINOPLE); + assert_eq!(evm.handler.spec_id(), SpecId::PETERSBURG); // No Optimism assert_eq!( evm.handler.cfg, - HandlerCfg { spec_id: SpecId::CONSTANTINOPLE, ..Default::default() } + HandlerCfg { spec_id: SpecId::PETERSBURG, ..Default::default() } ); } - #[test] - #[allow(clippy::needless_update)] - fn test_evm_with_inspector() { - let evm_config = EthEvmConfig::new(MAINNET.clone()); - - let db = CacheDB::>::default(); - - // No operation inspector - let noop = NoOpInspector; - - let evm = evm_config.evm_with_inspector(db, noop); - - // Check that the inspector is set correctly - assert_eq!(evm.context.external, noop); - - // Check that the EVM environment is initialized with default values - assert_eq!(evm.context.evm.inner.env, Box::default()); - - // Latest spec ID and no warm preloaded addresses - assert_eq!( - evm.context.evm.inner.journaled_state, - JournaledState::new(SpecId::LATEST, HashSet::default()) - ); - - // Ensure that the accounts database is empty - assert!(evm.context.evm.inner.db.accounts.is_empty()); - - // Ensure that the block hashes database is empty - assert!(evm.context.evm.inner.db.block_hashes.is_empty()); - - // Verify that there are two default contracts in the contracts database - assert_eq!(evm.context.evm.inner.db.contracts.len(), 2); - assert!(evm.context.evm.inner.db.contracts.contains_key(&KECCAK_EMPTY)); - assert!(evm.context.evm.inner.db.contracts.contains_key(&B256::ZERO)); - - // Ensure that the logs database is empty - assert!(evm.context.evm.inner.db.logs.is_empty()); - - // No Optimism - assert_eq!(evm.handler.cfg, HandlerCfg { spec_id: SpecId::LATEST, ..Default::default() }); - } - #[test] #[allow(clippy::needless_update)] fn test_evm_with_env_and_default_inspector() { @@ -530,7 +483,7 @@ mod tests { ); // Check that the spec ID is set properly - assert_eq!(evm.handler.spec_id(), SpecId::CONSTANTINOPLE); + assert_eq!(evm.handler.spec_id(), SpecId::PETERSBURG); assert_eq!(evm.context.evm.env.block, evm_env.block_env); assert_eq!(evm.context.evm.env.cfg, evm_env.cfg_env_with_handler_cfg.cfg_env); assert_eq!(evm.context.evm.env.tx, Default::default()); @@ -539,7 +492,7 @@ mod tests { // No Optimism assert_eq!( evm.handler.cfg, - HandlerCfg { spec_id: SpecId::CONSTANTINOPLE, ..Default::default() } + HandlerCfg { spec_id: SpecId::PETERSBURG, ..Default::default() } ); } } diff --git a/crates/evm/src/builder.rs b/crates/evm/src/builder.rs deleted file mode 100644 index 94531dd0ff011..0000000000000 --- a/crates/evm/src/builder.rs +++ /dev/null @@ -1,141 +0,0 @@ -//! Builder for creating an EVM with a database and environment. - -use alloc::boxed::Box; -use revm::{inspector_handle_register, Database, Evm, EvmBuilder, GetInspector}; -use revm_primitives::EnvWithHandlerCfg; - -/// Builder for creating an EVM with a database and environment. -/// -/// Wrapper around [`EvmBuilder`] that allows for setting the database and environment for the EVM. -/// -/// This is useful for creating an EVM with a custom database and environment without having to -/// necessarily rely on Revm inspector. -#[derive(Debug)] -pub struct RethEvmBuilder { - /// The database to use for the EVM. - db: DB, - /// The environment to use for the EVM. - env: Option>, - /// The external context for the EVM. - external_context: EXT, -} - -impl RethEvmBuilder { - /// Create a new EVM builder with the given database. - pub const fn new(db: DB) -> Self { - Self { db, env: None, external_context: () } - } -} - -impl RethEvmBuilder -where - DB: Database, -{ - /// Set the environment for the EVM. - pub fn with_env(mut self, env: Box) -> Self { - self.env = Some(env); - self - } - - /// Set the external context for the EVM. - pub fn with_external_context(self, external_context: EXT1) -> RethEvmBuilder { - RethEvmBuilder { db: self.db, env: self.env, external_context } - } - - /// Build the EVM with the given database and environment. - pub fn build<'a>(self) -> Evm<'a, EXT, DB> { - let mut builder = - EvmBuilder::default().with_db(self.db).with_external_context(self.external_context); - if let Some(env) = self.env { - builder = builder.with_spec_id(env.spec_id()); - builder = builder.with_env(env.env); - } - - builder.build() - } - - /// Build the EVM with the given database and environment, using the given inspector. - pub fn build_with_inspector<'a, I>(self, inspector: I) -> Evm<'a, I, DB> - where - I: GetInspector, - EXT: 'a, - { - let mut builder = - EvmBuilder::default().with_db(self.db).with_external_context(self.external_context); - if let Some(env) = self.env { - builder = builder.with_spec_id(env.spec_id()); - builder = builder.with_env(env.env); - } - builder - .with_external_context(inspector) - .append_handler_register(inspector_handle_register) - .build() - } -} - -/// Trait for configuring an EVM builder. -pub trait ConfigureEvmBuilder { - /// The type of EVM builder that this trait can configure. - type Builder<'a, DB: Database>: EvmFactory; -} - -/// Trait for configuring the EVM for executing full blocks. -pub trait EvmFactory { - /// Returns new EVM with the given database - /// - /// This does not automatically configure the EVM with [`crate::ConfigureEvmEnv`] methods. It is - /// up to the caller to call an appropriate method to fill the transaction and block - /// environment before executing any transactions using the provided EVM. - fn evm(self, db: DB) -> Evm<'static, (), DB> - where - Self: Sized, - { - RethEvmBuilder::new(db).build() - } - - /// Returns a new EVM with the given database configured with the given environment settings, - /// including the spec id. - /// - /// This will preserve any handler modifications - fn evm_with_env<'a, DB: Database + 'a>( - &self, - db: DB, - env: EnvWithHandlerCfg, - ) -> Evm<'a, (), DB> { - RethEvmBuilder::new(db).with_env(env.into()).build() - } - - /// Returns a new EVM with the given database configured with the given environment settings, - /// including the spec id. - /// - /// This will use the given external inspector as the EVM external context. - /// - /// This will preserve any handler modifications - fn evm_with_env_and_inspector( - &self, - db: DB, - env: EnvWithHandlerCfg, - inspector: I, - ) -> Evm<'_, I, DB> - where - DB: Database, - I: GetInspector, - { - RethEvmBuilder::new(db).with_env(env.into()).build_with_inspector(inspector) - } - - /// Returns a new EVM with the given inspector. - /// - /// Caution: This does not automatically configure the EVM with [`crate::ConfigureEvmEnv`] - /// methods. It is up to the caller to call an appropriate method to fill the transaction - /// and block environment before executing any transactions using the provided EVM. - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> - where - DB: Database, - I: GetInspector, - { - RethEvmBuilder::new(db).build_with_inspector(inspector) - } -} - -impl EvmFactory for RethEvmBuilder {} diff --git a/crates/evm/src/lib.rs b/crates/evm/src/lib.rs index 2b5c98adcbb33..1271bb14b77b0 100644 --- a/crates/evm/src/lib.rs +++ b/crates/evm/src/lib.rs @@ -17,14 +17,12 @@ extern crate alloc; -use crate::builder::RethEvmBuilder; use alloy_consensus::BlockHeader as _; use alloy_primitives::{Address, Bytes, B256, U256}; use reth_primitives_traits::{BlockHeader, SignedTransaction}; use revm::{Database, Evm, GetInspector}; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, Env, SpecId, TxEnv}; -pub mod builder; pub mod either; /// EVM environment configuration. pub mod env; @@ -42,26 +40,11 @@ pub mod test_utils; /// Trait for configuring the EVM for executing full blocks. pub trait ConfigureEvm: ConfigureEvmEnv { - /// Returns new EVM with the given database - /// - /// This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It is up to - /// the caller to call an appropriate method to fill the transaction and block environment - /// before executing any transactions using the provided EVM. - fn evm(&self, db: DB) -> Evm<'_, (), DB> { - RethEvmBuilder::new(db).build() - } - /// Returns a new EVM with the given database configured with the given environment settings, /// including the spec id and transaction environment. /// /// This will preserve any handler modifications - fn evm_with_env(&self, db: DB, evm_env: EvmEnv, tx: TxEnv) -> Evm<'_, (), DB> { - let mut evm = self.evm(db); - evm.modify_spec_id(evm_env.cfg_env_with_handler_cfg.handler_cfg.spec_id); - evm.context.evm.env = - Env::boxed(evm_env.cfg_env_with_handler_cfg.cfg_env, evm_env.block_env, tx); - evm - } + fn evm_with_env(&self, db: DB, evm_env: EvmEnv, tx: TxEnv) -> Evm<'_, (), DB>; /// Returns a new EVM with the given database configured with `cfg` and `block_env` /// configuration derived from the given header. Relies on @@ -90,27 +73,7 @@ pub trait ConfigureEvm: ConfigureEvmEnv { ) -> Evm<'_, I, DB> where DB: Database, - I: GetInspector, - { - let mut evm = self.evm_with_inspector(db, inspector); - evm.modify_spec_id(evm_env.cfg_env_with_handler_cfg.handler_cfg.spec_id); - evm.context.evm.env = - Env::boxed(evm_env.cfg_env_with_handler_cfg.cfg_env, evm_env.block_env, tx); - evm - } - - /// Returns a new EVM with the given inspector. - /// - /// Caution: This does not automatically configure the EVM with [`ConfigureEvmEnv`] methods. It - /// is up to the caller to call an appropriate method to fill the transaction and block - /// environment before executing any transactions using the provided EVM. - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> - where - DB: Database, - I: GetInspector, - { - RethEvmBuilder::new(db).build_with_inspector(inspector) - } + I: GetInspector; } impl<'b, T> ConfigureEvm for &'b T @@ -118,10 +81,6 @@ where T: ConfigureEvm, &'b T: ConfigureEvmEnv
, { - fn evm(&self, db: DB) -> Evm<'_, (), DB> { - (*self).evm(db) - } - fn evm_for_block(&self, db: DB, header: &Self::Header) -> Evm<'_, (), DB> { (*self).evm_for_block(db, header) } @@ -143,14 +102,6 @@ where { (*self).evm_with_env_and_inspector(db, evm_env, tx_env, inspector) } - - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> - where - DB: Database, - I: GetInspector, - { - (*self).evm_with_inspector(db, inspector) - } } /// This represents the set of methods used to configure the EVM's environment before block diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index dbadd23b03171..ad172c7644113 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -168,19 +168,41 @@ impl ConfigureEvmEnv for OpEvmConfig { } impl ConfigureEvm for OpEvmConfig { - fn evm(&self, db: DB) -> Evm<'_, (), DB> { - EvmBuilder::default().with_db(db).optimism().build() + fn evm_with_env( + &self, + db: DB, + mut evm_env: EvmEnv, + tx: TxEnv, + ) -> Evm<'_, (), DB> { + evm_env.cfg_env_with_handler_cfg.handler_cfg.is_optimism = true; + + EvmBuilder::default() + .with_db(db) + .with_cfg_env_with_handler_cfg(evm_env.cfg_env_with_handler_cfg) + .with_block_env(evm_env.block_env) + .with_tx_env(tx) + .build() } - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + fn evm_with_env_and_inspector( + &self, + db: DB, + mut evm_env: EvmEnv, + tx: TxEnv, + inspector: I, + ) -> Evm<'_, I, DB> where DB: Database, I: GetInspector, { + evm_env.cfg_env_with_handler_cfg.handler_cfg.is_optimism = true; + EvmBuilder::default() .with_db(db) .with_external_context(inspector) - .optimism() + .with_cfg_env_with_handler_cfg(evm_env.cfg_env_with_handler_cfg) + .with_block_env(evm_env.block_env) + .with_tx_env(tx) .append_handler_register(inspector_handle_register) .build() } @@ -189,14 +211,10 @@ impl ConfigureEvm for OpEvmConfig { #[cfg(test)] mod tests { use super::*; - use alloy_consensus::{constants::KECCAK_EMPTY, Header, Receipt}; + use alloy_consensus::{Header, Receipt}; use alloy_eips::eip7685::Requests; use alloy_genesis::Genesis; - use alloy_primitives::{ - bytes, - map::{HashMap, HashSet}, - Address, LogData, B256, U256, - }; + use alloy_primitives::{bytes, map::HashMap, Address, LogData, B256, U256}; use reth_chainspec::ChainSpec; use reth_evm::execute::ProviderError; use reth_execution_types::{ @@ -209,7 +227,6 @@ mod tests { db::{BundleState, CacheDB, EmptyDBTyped}, inspectors::NoOpInspector, primitives::{AccountInfo, BlockEnv, CfgEnv, SpecId}, - JournaledState, }; use revm_primitives::HandlerCfg; use std::sync::Arc; @@ -244,47 +261,6 @@ mod tests { assert_eq!(cfg_env_with_handler_cfg.chain_id, chain_spec.chain().id()); } - #[test] - fn test_evm_configure() { - // Create a default `OpEvmConfig` - let evm_config = test_evm_config(); - - // Initialize an empty database wrapped in CacheDB - let db = CacheDB::>::default(); - - // Create an EVM instance using the configuration and the database - let evm = evm_config.evm(db); - - // Check that the EVM environment is initialized with default values - assert_eq!(evm.context.evm.inner.env, Box::default()); - - // Latest spec ID and no warm preloaded addresses - assert_eq!( - evm.context.evm.inner.journaled_state, - JournaledState::new(SpecId::LATEST, HashSet::default()) - ); - - // Ensure that the accounts database is empty - assert!(evm.context.evm.inner.db.accounts.is_empty()); - - // Ensure that the block hashes database is empty - assert!(evm.context.evm.inner.db.block_hashes.is_empty()); - - // Verify that there are two default contracts in the contracts database - assert_eq!(evm.context.evm.inner.db.contracts.len(), 2); - assert!(evm.context.evm.inner.db.contracts.contains_key(&KECCAK_EMPTY)); - assert!(evm.context.evm.inner.db.contracts.contains_key(&B256::ZERO)); - - // Ensure that the logs database is empty - assert!(evm.context.evm.inner.db.logs.is_empty()); - - // Optimism in handler - assert_eq!(evm.handler.cfg, HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }); - - // Default spec ID - assert_eq!(evm.handler.spec_id(), SpecId::LATEST); - } - #[test] fn test_evm_with_env_default_spec() { let evm_config = test_evm_config(); @@ -389,50 +365,6 @@ mod tests { assert_eq!(evm.handler.cfg, HandlerCfg { spec_id: SpecId::ECOTONE, is_optimism: true }); } - #[test] - fn test_evm_with_inspector() { - let evm_config = test_evm_config(); - - let db = CacheDB::>::default(); - - // No operation inspector - let noop = NoOpInspector; - - let evm = evm_config.evm_with_inspector(db, noop); - - // Check that the inspector is set correctly - assert_eq!(evm.context.external, noop); - - // Check that the EVM environment is initialized with default values - assert_eq!(evm.context.evm.inner.env, Box::default()); - - // Latest spec ID and no warm preloaded addresses - assert_eq!( - evm.context.evm.inner.journaled_state, - JournaledState::new(SpecId::LATEST, HashSet::default()) - ); - - // Ensure that the accounts database is empty - assert!(evm.context.evm.inner.db.accounts.is_empty()); - - // Ensure that the block hashes database is empty - assert!(evm.context.evm.inner.db.block_hashes.is_empty()); - - // Verify that there are two default contracts in the contracts database - assert_eq!(evm.context.evm.inner.db.contracts.len(), 2); - assert!(evm.context.evm.inner.db.contracts.contains_key(&KECCAK_EMPTY)); - assert!(evm.context.evm.inner.db.contracts.contains_key(&B256::ZERO)); - - // Ensure that the logs database is empty - assert!(evm.context.evm.inner.db.logs.is_empty()); - - // Default spec ID - assert_eq!(evm.handler.spec_id(), SpecId::LATEST); - - // Optimism in handler - assert_eq!(evm.handler.cfg, HandlerCfg { spec_id: SpecId::LATEST, is_optimism: true }); - } - #[test] fn test_evm_with_env_and_default_inspector() { let evm_config = test_evm_config(); diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index 3490914b67bce..ac534d43c6770 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -55,8 +55,8 @@ impl MyEvmConfig { impl MyEvmConfig { /// Sets the precompiles to the EVM handler /// - /// This will be invoked when the EVM is created via [ConfigureEvm::evm] or - /// [ConfigureEvm::evm_with_inspector] + /// This will be invoked when the EVM is created via [ConfigureEvm::evm_with_env] or + /// [ConfigureEvm::evm_with_env_and_inspector] /// /// This will use the default mainnet precompiles and add additional precompiles. pub fn set_precompiles(handler: &mut EvmHandler) @@ -117,15 +117,24 @@ impl ConfigureEvmEnv for MyEvmConfig { } impl ConfigureEvm for MyEvmConfig { - fn evm(&self, db: DB) -> Evm<'_, (), DB> { + fn evm_with_env(&self, db: DB, evm_env: EvmEnv, tx: TxEnv) -> Evm<'_, (), DB> { EvmBuilder::default() .with_db(db) + .with_cfg_env_with_handler_cfg(evm_env.cfg_env_with_handler_cfg) + .with_block_env(evm_env.block_env) + .with_tx_env(tx) // add additional precompiles .append_handler_register(MyEvmConfig::set_precompiles) .build() } - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + fn evm_with_env_and_inspector( + &self, + db: DB, + evm_env: EvmEnv, + tx: TxEnv, + inspector: I, + ) -> Evm<'_, I, DB> where DB: Database, I: GetInspector, @@ -133,6 +142,9 @@ impl ConfigureEvm for MyEvmConfig { EvmBuilder::default() .with_db(db) .with_external_context(inspector) + .with_cfg_env_with_handler_cfg(evm_env.cfg_env_with_handler_cfg) + .with_block_env(evm_env.block_env) + .with_tx_env(tx) // add additional precompiles .append_handler_register(MyEvmConfig::set_precompiles) .append_handler_register(inspector_handle_register) diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 1fb4dbefb3a99..2cdebfe5602c7 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -68,8 +68,8 @@ impl MyEvmConfig { /// Sets the precompiles to the EVM handler /// - /// This will be invoked when the EVM is created via [ConfigureEvm::evm] or - /// [ConfigureEvm::evm_with_inspector] + /// This will be invoked when the EVM is created via [ConfigureEvm::evm_with_env] or + /// [ConfigureEvm::evm_with_env_and_inspector] /// /// This will use the default mainnet precompiles and wrap them with a cache. pub fn set_precompiles( @@ -179,10 +179,13 @@ impl ConfigureEvmEnv for MyEvmConfig { } impl ConfigureEvm for MyEvmConfig { - fn evm(&self, db: DB) -> Evm<'_, (), DB> { + fn evm_with_env(&self, db: DB, evm_env: EvmEnv, tx: TxEnv) -> Evm<'_, (), DB> { let new_cache = self.precompile_cache.clone(); EvmBuilder::default() .with_db(db) + .with_cfg_env_with_handler_cfg(evm_env.cfg_env_with_handler_cfg) + .with_block_env(evm_env.block_env) + .with_tx_env(tx) // add additional precompiles .append_handler_register_box(Box::new(move |handler| { MyEvmConfig::set_precompiles(handler, new_cache.clone()) @@ -190,7 +193,13 @@ impl ConfigureEvm for MyEvmConfig { .build() } - fn evm_with_inspector(&self, db: DB, inspector: I) -> Evm<'_, I, DB> + fn evm_with_env_and_inspector( + &self, + db: DB, + evm_env: EvmEnv, + tx: TxEnv, + inspector: I, + ) -> Evm<'_, I, DB> where DB: Database, I: GetInspector, @@ -199,6 +208,9 @@ impl ConfigureEvm for MyEvmConfig { EvmBuilder::default() .with_db(db) .with_external_context(inspector) + .with_cfg_env_with_handler_cfg(evm_env.cfg_env_with_handler_cfg) + .with_block_env(evm_env.block_env) + .with_tx_env(tx) // add additional precompiles .append_handler_register_box(Box::new(move |handler| { MyEvmConfig::set_precompiles(handler, new_cache.clone()) From 11bd9dded36c694afbc8b1b550e690976f8d25f0 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 15 Jan 2025 19:32:13 +0100 Subject: [PATCH 077/113] fix(builder): prague blob params activation (#13810) --- crates/ethereum/payload/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 7d49570ff8d4d..7f529aadadc9b 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -429,7 +429,7 @@ where .map_err(PayloadBuilderError::other)?; excess_blob_gas = if chain_spec.is_cancun_active_at_timestamp(parent_header.timestamp) { - let blob_params = if chain_spec.is_prague_active_at_timestamp(parent_header.timestamp) { + let blob_params = if chain_spec.is_prague_active_at_timestamp(attributes.timestamp) { BlobParams::prague() } else { // cancun From ac73b520794faa3ea0e428274ebbeffc2719bb53 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 16 Jan 2025 08:47:14 +0000 Subject: [PATCH 078/113] fix(trie): remove branch nodes from updates if it was deleted (#13813) --- crates/trie/sparse/src/trie.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 21a64c0d6c210..b00b7f4ab967e 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1113,6 +1113,7 @@ impl RevealedSparseTrie

{ } if let Some(updates) = self.updates.as_mut() { + updates.updated_nodes.remove(&removed_path); updates.removed_nodes.insert(removed_path.clone()); } From bbc592c5bf08aeaf35b1d14b10f6c149abd1fd9a Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 16 Jan 2025 08:47:17 +0000 Subject: [PATCH 079/113] fix(trie): remove branch nodes in sparse trie that shouldn't be stored (#13808) --- crates/trie/sparse/src/state.rs | 4 +-- crates/trie/sparse/src/trie.rs | 51 ++++++++++++++++++++------------- 2 files changed, 33 insertions(+), 22 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 505a326c0bf45..4b47217f98936 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -246,7 +246,7 @@ impl SparseStateTrie { }; trace!(target: "trie::sparse", ?path, ?node, ?hash_mask, ?tree_mask, "Revealing account node"); - trie.reveal_node(path, node, hash_mask, tree_mask)?; + trie.reveal_node(path, node, tree_mask, hash_mask)?; } } @@ -277,7 +277,7 @@ impl SparseStateTrie { }; trace!(target: "trie::sparse", ?account, ?path, ?node, ?hash_mask, ?tree_mask, "Revealing storage node"); - trie.reveal_node(path, node, hash_mask, tree_mask)?; + trie.reveal_node(path, node, tree_mask, hash_mask)?; } } } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index b00b7f4ab967e..b5bed53b53ef9 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -170,10 +170,10 @@ pub struct RevealedSparseTrie

{ provider: P, /// All trie nodes. nodes: HashMap, - /// All branch node hash masks. - branch_node_hash_masks: HashMap, /// All branch node tree masks. branch_node_tree_masks: HashMap, + /// All branch node hash masks. + branch_node_hash_masks: HashMap, /// All leaf values. values: HashMap>, /// Prefix set. @@ -188,8 +188,8 @@ impl

fmt::Debug for RevealedSparseTrie

{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RevealedSparseTrie") .field("nodes", &self.nodes) - .field("branch_hash_masks", &self.branch_node_hash_masks) .field("branch_tree_masks", &self.branch_node_tree_masks) + .field("branch_hash_masks", &self.branch_node_hash_masks) .field("values", &self.values) .field("prefix_set", &self.prefix_set) .field("updates", &self.updates) @@ -203,8 +203,8 @@ impl Default for RevealedSparseTrie { Self { provider: Default::default(), nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), - branch_node_hash_masks: HashMap::default(), branch_node_tree_masks: HashMap::default(), + branch_node_hash_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), updates: None, @@ -224,15 +224,15 @@ impl RevealedSparseTrie { let mut this = Self { provider: Default::default(), nodes: HashMap::default(), - branch_node_hash_masks: HashMap::default(), branch_node_tree_masks: HashMap::default(), + branch_node_hash_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), updates: None, } .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), node, hash_mask, tree_mask)?; + this.reveal_node(Nibbles::default(), node, tree_mask, hash_mask)?; Ok(this) } } @@ -249,15 +249,15 @@ impl

RevealedSparseTrie

{ let mut this = Self { provider, nodes: HashMap::default(), - branch_node_hash_masks: HashMap::default(), branch_node_tree_masks: HashMap::default(), + branch_node_hash_masks: HashMap::default(), values: HashMap::default(), prefix_set: PrefixSetMut::default(), rlp_buf: Vec::new(), updates: None, } .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), node, hash_mask, tree_mask)?; + this.reveal_node(Nibbles::default(), node, tree_mask, hash_mask)?; Ok(this) } @@ -266,8 +266,8 @@ impl

RevealedSparseTrie

{ RevealedSparseTrie { provider, nodes: self.nodes, - branch_node_hash_masks: self.branch_node_hash_masks, branch_node_tree_masks: self.branch_node_tree_masks, + branch_node_hash_masks: self.branch_node_hash_masks, values: self.values, prefix_set: self.prefix_set, updates: self.updates, @@ -303,20 +303,20 @@ impl

RevealedSparseTrie

{ &mut self, path: Nibbles, node: TrieNode, - hash_mask: Option, tree_mask: Option, + hash_mask: Option, ) -> SparseTrieResult<()> { // If the node is already revealed and it's not a hash node, do nothing. if self.nodes.get(&path).is_some_and(|node| !node.is_hash()) { return Ok(()) } - if let Some(hash_mask) = hash_mask { - self.branch_node_hash_masks.insert(path.clone(), hash_mask); - } if let Some(tree_mask) = tree_mask { self.branch_node_tree_masks.insert(path.clone(), tree_mask); } + if let Some(hash_mask) = hash_mask { + self.branch_node_hash_masks.insert(path.clone(), hash_mask); + } match node { TrieNode::EmptyRoot => { @@ -807,10 +807,10 @@ impl

RevealedSparseTrie

{ let store_in_db_trie_value = if let Some(updates) = self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) { - // Store in DB trie if there are either any children that are stored in the - // DB trie, or any children represent hashed values let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); if store_in_db_trie { + // Store in DB trie if there are either any children that are stored in + // the DB trie, or any children represent hashed values hashes.reverse(); let branch_node = BranchNodeCompact::new( *state_mask, @@ -820,6 +820,17 @@ impl

RevealedSparseTrie

{ hash.filter(|_| path.is_empty()), ); updates.updated_nodes.insert(path.clone(), branch_node); + } else if self + .branch_node_tree_masks + .get(&path) + .is_some_and(|mask| !mask.is_empty()) || + self.branch_node_hash_masks + .get(&path) + .is_some_and(|mask| !mask.is_empty()) + { + // If new tree and hash masks are empty, but previously they weren't, we + // need to remove the node. + updates.removed_nodes.insert(path.clone()); } store_in_db_trie @@ -2111,7 +2122,7 @@ mod tests { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); sparse - .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), tree_mask, hash_mask) .unwrap(); } @@ -2137,7 +2148,7 @@ mod tests { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); sparse - .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), tree_mask, hash_mask) .unwrap(); } @@ -2202,7 +2213,7 @@ mod tests { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); sparse - .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), tree_mask, hash_mask) .unwrap(); } @@ -2232,7 +2243,7 @@ mod tests { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); sparse - .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), tree_mask, hash_mask) .unwrap(); } @@ -2300,7 +2311,7 @@ mod tests { let hash_mask = branch_node_hash_masks.get(&path).copied(); let tree_mask = branch_node_tree_masks.get(&path).copied(); sparse - .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), hash_mask, tree_mask) + .reveal_node(path, TrieNode::decode(&mut &node[..]).unwrap(), tree_mask, hash_mask) .unwrap(); } From 1948e0f79cd7af50f484d7a6972e8232cd504bbd Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Thu, 16 Jan 2025 10:30:53 +0100 Subject: [PATCH 080/113] feat(trie): sparse trie accessors (#13815) --- crates/trie/sparse/src/state.rs | 13 +++++++++++++ crates/trie/sparse/src/trie.rs | 8 ++++++-- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 4b47217f98936..2def53e57aa59 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -105,6 +105,19 @@ impl SparseStateTrie { self.storages.get(account)?.as_revealed_ref()?.get_leaf_value(&Nibbles::unpack(slot)) } + /// Returns reference to state trie if it was revealed. + pub const fn state_trie_ref(&self) -> Option<&RevealedSparseTrie> { + self.state.as_revealed_ref() + } + + /// Returns reference to storage trie if it was revealed. + pub fn storage_trie_ref( + &self, + address: &B256, + ) -> Option<&RevealedSparseTrie> { + self.storages.get(address).and_then(|e| e.as_revealed_ref()) + } + /// Returns mutable reference to storage sparse trie if it was revealed. pub fn storage_trie_mut( &mut self, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index b5bed53b53ef9..bb1f247a2a4ce 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -288,6 +288,11 @@ impl

RevealedSparseTrie

{ self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) } + /// Returns reference to all trie nodes. + pub const fn nodes_ref(&self) -> &HashMap { + &self.nodes + } + /// Returns a reference to the leaf value if present. pub fn get_leaf_value(&self, path: &Nibbles) -> Option<&Vec> { self.values.get(path) @@ -1130,8 +1135,7 @@ impl RevealedSparseTrie

{ new_node } - // If more than one child is left set in the branch, we just re-insert it - // as-is. + // If more than one child is left set in the branch, we just re-insert it as-is. else { SparseNode::new_branch(state_mask) } From f1f9d5a652555516b0202d13ae071b47d05a2aca Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 16 Jan 2025 11:09:37 +0000 Subject: [PATCH 081/113] fix(trie): delete removed node from updated nodes in sparse trie (#13822) --- crates/trie/sparse/src/trie.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index bb1f247a2a4ce..26f1b0dd9cbc9 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -835,6 +835,7 @@ impl

RevealedSparseTrie

{ { // If new tree and hash masks are empty, but previously they weren't, we // need to remove the node. + updates.updated_nodes.remove(&path); updates.removed_nodes.insert(path.clone()); } From 265f783c226cc5261c9f5d1c3e9702117e885750 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 16 Jan 2025 15:40:45 +0400 Subject: [PATCH 082/113] refactor: always create Evm through ConfigureEvm (#13812) --- Cargo.lock | 1 + crates/ethereum/payload/src/lib.rs | 10 +--- crates/evm/src/system_calls/mod.rs | 55 +++++++------------ crates/optimism/payload/src/builder.rs | 3 +- .../rpc-eth-api/src/helpers/pending_block.rs | 50 ++++++----------- crates/rpc/rpc-eth-api/src/helpers/state.rs | 5 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 14 +---- crates/rpc/rpc-eth-types/Cargo.toml | 1 + crates/rpc/rpc-eth-types/src/pending_block.rs | 10 ++-- 9 files changed, 49 insertions(+), 100 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 69b5d07900b57..09258315cf791 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9004,6 +9004,7 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-errors", + "reth-evm", "reth-execution-types", "reth-metrics", "reth-primitives", diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 7f529aadadc9b..d236eac8649f0 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -202,12 +202,7 @@ where // apply eip-4788 pre block contract call system_caller - .pre_block_beacon_root_contract_call( - &mut db, - evm_env.cfg_env_with_handler_cfg(), - evm_env.block_env(), - attributes.parent_beacon_block_root, - ) + .pre_block_beacon_root_contract_call(&mut db, &evm_env, attributes.parent_beacon_block_root) .map_err(|err| { warn!(target: "payload_builder", parent_hash=%parent_header.hash(), @@ -220,8 +215,7 @@ where // apply eip-2935 blockhashes update system_caller.pre_block_blockhashes_contract_call( &mut db, - evm_env.cfg_env_with_handler_cfg(), - evm_env.block_env(), + &evm_env, parent_header.hash(), ) .map_err(|err| { diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index 4d0fc8041d457..63527b29b2198 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -1,6 +1,6 @@ //! System contract call functions. -use crate::ConfigureEvm; +use crate::{ConfigureEvm, EvmEnv}; use alloc::{boxed::Box, sync::Arc}; use alloy_consensus::BlockHeader; use alloy_eips::{ @@ -11,7 +11,7 @@ use core::fmt::Display; use reth_chainspec::EthereumHardforks; use reth_execution_errors::BlockExecutionError; use revm::{Database, DatabaseCommit, Evm}; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, EvmState, B256}; +use revm_primitives::{EvmState, B256}; mod eip2935; mod eip4788; @@ -70,24 +70,6 @@ impl SystemCaller { pub fn finish(self) {} } -fn initialize_evm<'a, DB>( - db: &'a mut DB, - initialized_cfg: &'a CfgEnvWithHandlerCfg, - initialized_block_env: &'a BlockEnv, -) -> Evm<'a, (), &'a mut DB> -where - DB: Database, -{ - Evm::builder() - .with_db(db) - .with_env_with_handler_cfg(EnvWithHandlerCfg::new_with_cfg_env( - initialized_cfg.clone(), - initialized_block_env.clone(), - Default::default(), - )) - .build() -} - impl SystemCaller where EvmConfig: ConfigureEvm, @@ -149,18 +131,19 @@ where pub fn pre_block_blockhashes_contract_call( &mut self, db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, + evm_env: &EvmEnv, parent_block_hash: B256, ) -> Result<(), BlockExecutionError> where DB: Database + DatabaseCommit, DB::Error: Display, { - let mut evm = initialize_evm(db, initialized_cfg, initialized_block_env); + let evm_config = self.evm_config.clone(); + let mut evm = evm_config.evm_with_env(db, evm_env.clone(), Default::default()); + self.apply_blockhashes_contract_call( - initialized_block_env.timestamp.to(), - initialized_block_env.number.to(), + evm_env.block_env.timestamp.to(), + evm_env.block_env.number.to(), parent_block_hash, &mut evm, )?; @@ -203,19 +186,19 @@ where pub fn pre_block_beacon_root_contract_call( &mut self, db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, + evm_env: &EvmEnv, parent_beacon_block_root: Option, ) -> Result<(), BlockExecutionError> where DB: Database + DatabaseCommit, DB::Error: Display, { - let mut evm = initialize_evm(db, initialized_cfg, initialized_block_env); + let evm_config = self.evm_config.clone(); + let mut evm = evm_config.evm_with_env(db, evm_env.clone(), Default::default()); self.apply_beacon_root_contract_call( - initialized_block_env.timestamp.to(), - initialized_block_env.number.to(), + evm_env.block_env.timestamp.to(), + evm_env.block_env.number.to(), parent_beacon_block_root, &mut evm, )?; @@ -258,14 +241,14 @@ where pub fn post_block_withdrawal_requests_contract_call( &mut self, db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, + evm_env: &EvmEnv, ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, { - let mut evm = initialize_evm(db, initialized_cfg, initialized_block_env); + let evm_config = self.evm_config.clone(); + let mut evm = evm_config.evm_with_env(db, evm_env.clone(), Default::default()); let result = self.apply_withdrawal_requests_contract_call(&mut evm)?; @@ -296,14 +279,14 @@ where pub fn post_block_consolidation_requests_contract_call( &mut self, db: &mut DB, - initialized_cfg: &CfgEnvWithHandlerCfg, - initialized_block_env: &BlockEnv, + evm_env: &EvmEnv, ) -> Result where DB: Database + DatabaseCommit, DB::Error: Display, { - let mut evm = initialize_evm(db, initialized_cfg, initialized_block_env); + let evm_config = self.evm_config.clone(); + let mut evm = evm_config.evm_with_env(db, evm_env.clone(), Default::default()); let res = self.apply_consolidation_requests_contract_call(&mut evm)?; diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index 12dbd0b5bca22..fd2bcffe596b0 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -713,8 +713,7 @@ where SystemCaller::new(self.evm_config.clone(), self.chain_spec.clone()) .pre_block_beacon_root_contract_call( db, - &self.evm_env.cfg_env_with_handler_cfg, - &self.evm_env.block_env, + &self.evm_env, self.attributes().payload_attributes.parent_beacon_block_root, ) .map_err(|err| { diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index e947a0d0a6d7c..968ef379fced3 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -23,10 +23,7 @@ use reth_provider::{ }; use reth_revm::{ database::StateProviderDatabase, - primitives::{ - BlockEnv, CfgEnvWithHandlerCfg, EVMError, Env, ExecutionResult, InvalidTransaction, - ResultAndState, - }, + primitives::{BlockEnv, EVMError, ExecutionResult, InvalidTransaction, ResultAndState}, }; use reth_rpc_eth_types::{EthApiError, PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; use reth_transaction_pool::{ @@ -65,7 +62,7 @@ pub trait LoadPendingBlock: &self, ) -> &Mutex, ProviderReceipt>>>; - /// Configures the [`CfgEnvWithHandlerCfg`] and [`BlockEnv`] for the pending block + /// Configures the [`EvmEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block #[expect(clippy::type_complexity)] @@ -86,12 +83,10 @@ pub trait LoadPendingBlock: // Note: for the PENDING block we assume it is past the known merge block and // thus this will not fail when looking up the total // difficulty value for the blockenv. - let EvmEnv { cfg_env_with_handler_cfg, block_env } = - self.evm_config().cfg_and_block_env(block.header()); + let evm_env = self.evm_config().cfg_and_block_env(block.header()); return Ok(PendingBlockEnv::new( - cfg_env_with_handler_cfg, - block_env, + evm_env, PendingBlockEnvOrigin::ActualPending(block, receipts), )); } @@ -105,7 +100,7 @@ pub trait LoadPendingBlock: .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; - let EvmEnv { cfg_env_with_handler_cfg, block_env } = self + let evm_env = self .evm_config() .next_cfg_and_block_env( &latest, @@ -119,11 +114,7 @@ pub trait LoadPendingBlock: .map_err(RethError::other) .map_err(Self::Error::from_eth_err)?; - Ok(PendingBlockEnv::new( - cfg_env_with_handler_cfg, - block_env, - PendingBlockEnvOrigin::DerivedFromLatest(latest.hash()), - )) + Ok(PendingBlockEnv::new(evm_env, PendingBlockEnvOrigin::DerivedFromLatest(latest.hash()))) } /// Returns the locally built pending block @@ -159,7 +150,7 @@ pub trait LoadPendingBlock: // check if the block is still good if let Some(pending_block) = lock.as_ref() { // this is guaranteed to be the `latest` header - if pending.block_env.number.to::() == pending_block.block.number() && + if pending.evm_env.block_env.number.to::() == pending_block.block.number() && parent_hash == pending_block.block.parent_hash() && now <= pending_block.expires_at { @@ -171,7 +162,7 @@ pub trait LoadPendingBlock: let (sealed_block, receipts) = match self .spawn_blocking_io(move |this| { // we rebuild the block - this.build_block(pending.cfg, pending.block_env, parent_hash) + this.build_block(pending.evm_env, parent_hash) }) .await { @@ -243,8 +234,7 @@ pub trait LoadPendingBlock: #[expect(clippy::type_complexity)] fn build_block( &self, - cfg: CfgEnvWithHandlerCfg, - block_env: BlockEnv, + evm_env: EvmEnv, parent_hash: B256, ) -> Result< (RecoveredBlock>, Vec>), @@ -262,15 +252,15 @@ pub trait LoadPendingBlock: let mut cumulative_gas_used = 0; let mut sum_blob_gas_used = 0; - let block_gas_limit: u64 = block_env.gas_limit.to::(); - let base_fee = block_env.basefee.to::(); + let block_gas_limit: u64 = evm_env.block_env.gas_limit.to::(); + let base_fee = evm_env.block_env.basefee.to::(); let mut executed_txs = Vec::new(); let mut senders = Vec::new(); let mut best_txs = self.pool().best_transactions_with_attributes(BestTransactionsAttributes::new( base_fee, - block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), + evm_env.block_env.get_blob_gasprice().map(|gasprice| gasprice as u64), )); let chain_spec = self.provider().chain_spec(); @@ -278,7 +268,7 @@ pub trait LoadPendingBlock: let mut system_caller = SystemCaller::new(self.evm_config().clone(), chain_spec.clone()); system_caller - .pre_block_blockhashes_contract_call(&mut db, &cfg, &block_env, parent_hash) + .pre_block_blockhashes_contract_call(&mut db, &evm_env, parent_hash) .map_err(|err| EthApiError::Internal(err.into()))?; let mut results = Vec::new(); @@ -334,14 +324,8 @@ pub trait LoadPendingBlock: } } - // Configure the environment for the block. - let env = Env::boxed( - cfg.cfg_env.clone(), - block_env.clone(), - Self::evm_config(self).tx_env(tx.tx(), tx.signer()), - ); - - let mut evm = revm::Evm::builder().with_env(env).with_db(&mut db).build(); + let tx_env = self.evm_config().tx_env(tx.tx(), tx.signer()); + let mut evm = self.evm_config().evm_with_env(&mut db, evm_env.clone(), tx_env); let ResultAndState { result, state } = match evm.transact() { Ok(res) => res, @@ -399,7 +383,7 @@ pub trait LoadPendingBlock: // executes the withdrawals and commits them to the Database and BundleState. let balance_increments = post_block_withdrawals_balance_increments( chain_spec.as_ref(), - block_env.timestamp.try_into().unwrap_or(u64::MAX), + evm_env.block_env.timestamp.try_into().unwrap_or(u64::MAX), &[], ); @@ -416,7 +400,7 @@ pub trait LoadPendingBlock: let state_root = db.database.state_root(hashed_state).map_err(Self::Error::from_eth_err)?; let (block, receipts) = self.assemble_block_and_receipts( - &block_env, + &evm_env.block_env, parent_hash, state_root, executed_txs, diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index fe068ec4d1e6a..2f33ab1122a2f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -219,9 +219,8 @@ pub trait LoadState: { async move { if at.is_pending() { - let PendingBlockEnv { cfg, block_env, origin } = - self.pending_block_env_and_cfg()?; - Ok(((cfg, block_env).into(), origin.state_block_id())) + let PendingBlockEnv { evm_env, origin } = self.pending_block_env_and_cfg()?; + Ok((evm_env, origin.state_block_id())) } else { // Use cached values if there is no pending block let block_hash = RpcNodeCore::provider(self) diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 810a49f07ac3f..5e99ba134d29f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -457,21 +457,11 @@ pub trait Trace: SystemCaller::new(self.evm_config().clone(), self.provider().chain_spec()); // apply relevant system calls system_caller - .pre_block_beacon_root_contract_call( - db, - evm_env.cfg_env_with_handler_cfg(), - evm_env.block_env(), - block.parent_beacon_block_root(), - ) + .pre_block_beacon_root_contract_call(db, evm_env, block.parent_beacon_block_root()) .map_err(|_| EthApiError::EvmCustom("failed to apply 4788 system call".to_string()))?; system_caller - .pre_block_blockhashes_contract_call( - db, - evm_env.cfg_env_with_handler_cfg(), - evm_env.block_env(), - block.parent_hash(), - ) + .pre_block_blockhashes_contract_call(db, evm_env, block.parent_hash()) .map_err(|_| { EthApiError::EvmCustom("failed to apply blockhashes system call".to_string()) })?; diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 72b153ab08457..11bf6c6231d21 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -15,6 +15,7 @@ workspace = true reth-chainspec.workspace = true reth-chain-state.workspace = true reth-errors.workspace = true +reth-evm.workspace = true reth-execution-types.workspace = true reth-metrics.workspace = true reth-primitives = { workspace = true, features = ["secp256k1"] } diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 62d5954cd9a5c..bfd6da9927291 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -8,17 +8,15 @@ use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::B256; use derive_more::Constructor; +use reth_evm::EvmEnv; use reth_primitives::{Receipt, RecoveredBlock}; use reth_primitives_traits::Block; -use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg}; -/// Configured [`BlockEnv`] and [`CfgEnvWithHandlerCfg`] for a pending block. +/// Configured [`EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] pub struct PendingBlockEnv { - /// Configured [`CfgEnvWithHandlerCfg`] for the pending block. - pub cfg: CfgEnvWithHandlerCfg, - /// Configured [`BlockEnv`] for the pending block. - pub block_env: BlockEnv, + /// Configured [`EvmEnv`] for the pending block. + pub evm_env: EvmEnv, /// Origin block for the config pub origin: PendingBlockEnvOrigin, } From 590496d0e42f0c17fa54cd11db177b7cee3e6425 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 16 Jan 2025 13:47:18 +0000 Subject: [PATCH 083/113] chore(trie): less logs for sparse branch node updates (#13811) --- crates/trie/sparse/src/trie.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 26f1b0dd9cbc9..9c168fd77bdb4 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -774,15 +774,6 @@ impl

RevealedSparseTrie

{ hash_mask.set_bit(last_child_nibble); hashes.push(hash); } - - trace!( - target: "trie::sparse", - ?path, - ?child_path, - tree_mask_bit_set = should_set_tree_mask_bit, - hash_mask_bit_set = hash.is_some(), - "Updating branch node child masks" - ); } // Insert children in the resulting buffer in a normal order, @@ -801,6 +792,14 @@ impl

RevealedSparseTrie

{ } } + trace!( + target: "trie::sparse", + ?path, + ?tree_mask, + ?hash_mask, + "Branch node masks" + ); + self.rlp_buf.clear(); let branch_node_ref = BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); From 2c21edfed5d89e3c5181a06aef05e78d2ad06630 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 16 Jan 2025 15:10:09 +0000 Subject: [PATCH 084/113] fix(trie): empty sparse trie branch node masks (#13825) --- crates/trie/sparse/src/trie.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 9c168fd77bdb4..28cdb423ee726 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -833,9 +833,21 @@ impl

RevealedSparseTrie

{ .is_some_and(|mask| !mask.is_empty()) { // If new tree and hash masks are empty, but previously they weren't, we - // need to remove the node. + // need to remove the node update and add the node itself to the list of + // removed nodes. updates.updated_nodes.remove(&path); updates.removed_nodes.insert(path.clone()); + } else if self + .branch_node_hash_masks + .get(&path) + .is_none_or(|mask| mask.is_empty()) && + self.branch_node_hash_masks + .get(&path) + .is_none_or(|mask| mask.is_empty()) + { + // If new tree and hash masks are empty, and they were previously empty + // as well, we need to remove the node update. + updates.updated_nodes.remove(&path); } store_in_db_trie From 7df983802eeb7e924afe8e6fb64843382831f334 Mon Sep 17 00:00:00 2001 From: Debjit Bhowal Date: Thu, 16 Jan 2025 21:14:15 +0530 Subject: [PATCH 085/113] Higher limit for total_difficulty.bit_len (#13820) --- crates/net/eth-wire/src/ethstream.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/net/eth-wire/src/ethstream.rs b/crates/net/eth-wire/src/ethstream.rs index ccc80594b60e0..212fd8e4694e3 100644 --- a/crates/net/eth-wire/src/ethstream.rs +++ b/crates/net/eth-wire/src/ethstream.rs @@ -158,12 +158,12 @@ where } // TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times - // larger, it will still fit within 100 bits - if status.total_difficulty.bit_len() > 100 { + // larger, it will still fit within 160 bits + if status.total_difficulty.bit_len() > 160 { self.inner.disconnect(DisconnectReason::ProtocolBreach).await?; return Err(EthHandshakeError::TotalDifficultyBitLenTooLarge { got: status.total_difficulty.bit_len(), - maximum: 100, + maximum: 160, } .into()) } @@ -498,7 +498,7 @@ mod tests { let status = Status { version: EthVersion::Eth67, chain: NamedChain::Mainnet.into(), - total_difficulty: U256::from(2).pow(U256::from(100)), + total_difficulty: U256::from(2).pow(U256::from(164)), blockhash: B256::random(), genesis, // Pass the current fork id. @@ -522,7 +522,7 @@ mod tests { assert!(matches!( handshake_res, Err(EthStreamError::EthHandshakeError( - EthHandshakeError::TotalDifficultyBitLenTooLarge { got: 101, maximum: 100 } + EthHandshakeError::TotalDifficultyBitLenTooLarge { got: 165, maximum: 160 } )) )); }); @@ -539,7 +539,7 @@ mod tests { assert!(matches!( handshake_res, Err(EthStreamError::EthHandshakeError( - EthHandshakeError::TotalDifficultyBitLenTooLarge { got: 101, maximum: 100 } + EthHandshakeError::TotalDifficultyBitLenTooLarge { got: 165, maximum: 160 } )) )); From 13ecd6afa1f129efa14dfdee5147243a5954442c Mon Sep 17 00:00:00 2001 From: Ashish Thapa Date: Thu, 16 Jan 2025 21:41:16 +0545 Subject: [PATCH 086/113] Nit: replace block and sender with RecoveredBlock in ExecutedBlock (#13804) Co-authored-by: Matthias Seitz --- crates/chain-state/src/in_memory.rs | 256 ++++++------- crates/chain-state/src/memory_overlay.rs | 10 +- crates/chain-state/src/test_utils.rs | 5 +- crates/engine/tree/src/engine.rs | 2 +- crates/engine/tree/src/persistence.rs | 12 +- crates/engine/tree/src/tree/mod.rs | 346 +++++++++--------- crates/ethereum/payload/src/lib.rs | 9 +- crates/node/builder/src/launch/engine.rs | 2 +- crates/optimism/payload/src/builder.rs | 9 +- .../src/providers/blockchain_provider.rs | 54 +-- .../provider/src/providers/consistent.rs | 106 ++++-- crates/storage/provider/src/writer/mod.rs | 12 +- 12 files changed, 443 insertions(+), 380 deletions(-) diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 59e01055f7a43..6358370e2b5de 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -6,7 +6,7 @@ use crate::{ }; use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_eips::{eip2718::Encodable2718, BlockHashOrNumber, BlockNumHash}; -use alloy_primitives::{map::HashMap, Address, TxHash, B256}; +use alloy_primitives::{map::HashMap, TxHash, B256}; use parking_lot::RwLock; use reth_chainspec::ChainInfo; use reth_execution_types::{Chain, ExecutionOutcome}; @@ -181,7 +181,7 @@ impl CanonicalInMemoryState { ) -> Self { let in_memory_state = InMemoryState::new(blocks, numbers, pending); let header = in_memory_state.head_state().map_or_else(SealedHeader::default, |state| { - state.block_ref().block().clone_sealed_header() + state.block_ref().recovered_block().clone_sealed_header() }); let chain_info_tracker = ChainInfoTracker::new(header, finalized, safe); let (canon_state_notification_sender, _) = @@ -228,7 +228,8 @@ impl CanonicalInMemoryState { /// Returns the header corresponding to the given hash. pub fn header_by_hash(&self, hash: B256) -> Option> { - self.state_by_hash(hash).map(|block| block.block_ref().block.clone_sealed_header()) + self.state_by_hash(hash) + .map(|block| block.block_ref().recovered_block().clone_sealed_header()) } /// Clears all entries in the in memory state. @@ -241,7 +242,7 @@ impl CanonicalInMemoryState { /// Note: This assumes that the parent block of the pending block is canonical. pub fn set_pending_block(&self, pending: ExecutedBlock) { // fetch the state of the pending block's parent block - let parent = self.state_by_hash(pending.block().parent_hash()); + let parent = self.state_by_hash(pending.recovered_block().parent_hash()); let pending = BlockState::with_parent(pending, parent); self.inner.in_memory_state.pending.send_modify(|p| { p.replace(pending); @@ -264,15 +265,15 @@ impl CanonicalInMemoryState { // we first remove the blocks from the reorged chain for block in reorged { - let hash = block.block().hash(); - let number = block.block().number(); + let hash = block.recovered_block().hash(); + let number = block.recovered_block().number(); blocks.remove(&hash); numbers.remove(&number); } // insert the new blocks for block in new_blocks { - let parent = blocks.get(&block.block().parent_hash()).cloned(); + let parent = blocks.get(&block.recovered_block().parent_hash()).cloned(); let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -332,16 +333,16 @@ impl CanonicalInMemoryState { // height) let mut old_blocks = blocks .drain() - .filter(|(_, b)| b.block_ref().block().number() > persisted_height) + .filter(|(_, b)| b.block_ref().recovered_block().number() > persisted_height) .map(|(_, b)| b.block.clone()) .collect::>(); // sort the blocks by number so we can insert them back in natural order (low -> high) - old_blocks.sort_unstable_by_key(|block| block.block().number()); + old_blocks.sort_unstable_by_key(|block| block.recovered_block().number()); // re-insert the blocks in natural order and connect them to their parent blocks for block in old_blocks { - let parent = blocks.get(&block.block().parent_hash()).cloned(); + let parent = blocks.get(&block.recovered_block().parent_hash()).cloned(); let block_state = BlockState::with_parent(block, parent); let hash = block_state.hash(); let number = block_state.number(); @@ -354,7 +355,7 @@ impl CanonicalInMemoryState { // also shift the pending state if it exists self.inner.in_memory_state.pending.send_modify(|p| { if let Some(p) = p.as_mut() { - p.parent = blocks.get(&p.block_ref().block.parent_hash()).cloned(); + p.parent = blocks.get(&p.block_ref().recovered_block().parent_hash()).cloned(); } }); } @@ -461,7 +462,7 @@ impl CanonicalInMemoryState { /// Returns the `SealedHeader` corresponding to the pending state. pub fn pending_sealed_header(&self) -> Option> { - self.pending_state().map(|h| h.block_ref().block().clone_sealed_header()) + self.pending_state().map(|h| h.block_ref().recovered_block().clone_sealed_header()) } /// Returns the `Header` corresponding to the pending state. @@ -471,7 +472,8 @@ impl CanonicalInMemoryState { /// Returns the `SealedBlock` corresponding to the pending state. pub fn pending_block(&self) -> Option> { - self.pending_state().map(|block_state| block_state.block_ref().block().clone()) + self.pending_state() + .map(|block_state| block_state.block_ref().recovered_block().sealed_block().clone()) } /// Returns the `RecoveredBlock` corresponding to the pending state. @@ -479,15 +481,17 @@ impl CanonicalInMemoryState { where N::SignedTx: SignedTransaction, { - self.pending_state() - .and_then(|block_state| block_state.block_ref().block().clone().try_recover().ok()) + self.pending_state().map(|block_state| block_state.block_ref().recovered_block().clone()) } /// Returns a tuple with the `SealedBlock` corresponding to the pending /// state and a vector of its `Receipt`s. pub fn pending_block_and_receipts(&self) -> Option> { self.pending_state().map(|block_state| { - (block_state.block_ref().block().clone(), block_state.executed_block_receipts()) + ( + block_state.block_ref().recovered_block().sealed_block().clone(), + block_state.executed_block_receipts(), + ) }) } @@ -547,7 +551,7 @@ impl CanonicalInMemoryState { for block_state in self.canonical_chain() { if let Some(tx) = block_state .block_ref() - .block() + .recovered_block() .body() .transactions() .iter() @@ -571,7 +575,7 @@ impl CanonicalInMemoryState { for block_state in self.canonical_chain() { if let Some((index, tx)) = block_state .block_ref() - .block() + .recovered_block() .body() .transactions() .iter() @@ -582,10 +586,10 @@ impl CanonicalInMemoryState { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number(), - base_fee: block_state.block_ref().block.base_fee_per_gas(), - timestamp: block_state.block_ref().block.timestamp(), - excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), + block_number: block_state.block_ref().recovered_block().number(), + base_fee: block_state.block_ref().recovered_block().base_fee_per_gas(), + timestamp: block_state.block_ref().recovered_block().timestamp(), + excess_blob_gas: block_state.block_ref().recovered_block().excess_blob_gas(), }; return Some((tx.clone(), meta)) } @@ -621,7 +625,7 @@ impl BlockState { if let Some(parent) = &self.parent { parent.anchor() } else { - self.block.block().parent_num_hash() + self.block.recovered_block().parent_num_hash() } } @@ -635,27 +639,20 @@ impl BlockState { &self.block } - /// Returns a clone of the block with recovered senders for the state. - pub fn clone_recovered_block(&self) -> RecoveredBlock { - let block = self.block.block().clone(); - let senders = self.block.senders().clone(); - RecoveredBlock::new_sealed(block, senders) - } - /// Returns the hash of executed block that determines the state. pub fn hash(&self) -> B256 { - self.block.block().hash() + self.block.recovered_block().hash() } /// Returns the block number of executed block that determines the state. pub fn number(&self) -> u64 { - self.block.block().number() + self.block.recovered_block().number() } /// Returns the state root after applying the executed block that determines /// the state. pub fn state_root(&self) -> B256 { - self.block.block().state_root() + self.block.recovered_block().state_root() } /// Returns the `Receipts` of executed block that determines the state. @@ -748,7 +745,7 @@ impl BlockState { self.chain().find_map(|block_state| { block_state .block_ref() - .block() + .recovered_block() .body() .transactions() .iter() @@ -768,7 +765,7 @@ impl BlockState { self.chain().find_map(|block_state| { block_state .block_ref() - .block() + .recovered_block() .body() .transactions() .iter() @@ -779,10 +776,13 @@ impl BlockState { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block_ref().block.number(), - base_fee: block_state.block_ref().block.base_fee_per_gas(), - timestamp: block_state.block_ref().block.timestamp(), - excess_blob_gas: block_state.block_ref().block.excess_blob_gas(), + block_number: block_state.block_ref().recovered_block().number(), + base_fee: block_state.block_ref().recovered_block().base_fee_per_gas(), + timestamp: block_state.block_ref().recovered_block().timestamp(), + excess_blob_gas: block_state + .block_ref() + .recovered_block() + .excess_blob_gas(), }; (tx.clone(), meta) }) @@ -793,10 +793,8 @@ impl BlockState { /// Represents an executed block stored in-memory. #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct ExecutedBlock { - /// Sealed block the rest of fields refer to. - pub block: Arc>, - /// Block's senders. - pub senders: Arc>, + /// Recovered Block + pub recovered_block: Arc>, /// Block's execution outcome. pub execution_output: Arc>, /// Block's hashed state. @@ -808,30 +806,17 @@ pub struct ExecutedBlock { impl ExecutedBlock { /// [`ExecutedBlock`] constructor. pub const fn new( - block: Arc>, - senders: Arc>, + recovered_block: Arc>, execution_output: Arc>, hashed_state: Arc, trie: Arc, ) -> Self { - Self { block, senders, execution_output, hashed_state, trie } + Self { recovered_block, execution_output, hashed_state, trie } } - /// Returns a reference to the executed block. - pub fn block(&self) -> &SealedBlock { - &self.block - } - - /// Returns a reference to the block's senders - pub fn senders(&self) -> &Vec

{ - &self.senders - } - - /// Returns a [`RecoveredBlock`] - /// - /// Note: this clones the block and senders. - pub fn clone_recovered_block(&self) -> RecoveredBlock { - RecoveredBlock::new_sealed((*self.block).clone(), (*self.senders).clone()) + /// Returns a reference to [`RecoveredBlock`] + pub fn recovered_block(&self) -> &RecoveredBlock { + &self.recovered_block } /// Returns a reference to the block's execution outcome @@ -890,7 +875,7 @@ impl> NewCanonicalChain { Self::Commit { new } => { let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { chain.append_block( - exec.clone_recovered_block(), + exec.recovered_block().clone(), exec.execution_outcome().clone(), ); chain @@ -900,14 +885,14 @@ impl> NewCanonicalChain { Self::Reorg { new, old } => { let new = Arc::new(new.iter().fold(Chain::default(), |mut chain, exec| { chain.append_block( - exec.clone_recovered_block(), + exec.recovered_block().clone(), exec.execution_outcome().clone(), ); chain })); let old = Arc::new(old.iter().fold(Chain::default(), |mut chain, exec| { chain.append_block( - exec.clone_recovered_block(), + exec.recovered_block().clone(), exec.execution_outcome().clone(), ); chain @@ -924,7 +909,7 @@ impl> NewCanonicalChain { pub fn tip(&self) -> &SealedBlock { match self { Self::Commit { new } | Self::Reorg { new, .. } => { - new.last().expect("non empty blocks").block() + new.last().expect("non empty blocks").recovered_block() } } } @@ -935,7 +920,9 @@ mod tests { use super::*; use crate::test_utils::TestBlockBuilder; use alloy_eips::eip7685::Requests; - use alloy_primitives::{map::B256HashMap, BlockNumber, Bytes, StorageKey, StorageValue}; + use alloy_primitives::{ + map::B256HashMap, Address, BlockNumber, Bytes, StorageKey, StorageValue, + }; use rand::Rng; use reth_errors::ProviderResult; use reth_primitives::{Account, Bytecode, EthPrimitives, Receipt}; @@ -1168,8 +1155,8 @@ mod tests { let result = in_memory_state.pending_state(); assert!(result.is_some()); let actual_pending_state = result.unwrap(); - assert_eq!(actual_pending_state.block.block().hash(), pending_hash); - assert_eq!(actual_pending_state.block.block().number, pending_number); + assert_eq!(actual_pending_state.block.recovered_block().hash(), pending_hash); + assert_eq!(actual_pending_state.block.recovered_block().number, pending_number); } #[test] @@ -1210,7 +1197,7 @@ mod tests { let state = BlockState::new(block.clone()); - assert_eq!(state.hash(), block.block.hash()); + assert_eq!(state.hash(), block.recovered_block().hash()); } #[test] @@ -1232,7 +1219,7 @@ mod tests { let state = BlockState::new(block.clone()); - assert_eq!(state.state_root(), block.block().state_root); + assert_eq!(state.state_root(), block.recovered_block().state_root); } #[test] @@ -1255,18 +1242,24 @@ mod tests { let block2 = test_block_builder.get_executed_block_with_number(0, B256::random()); let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] }; state.update_chain(chain); - assert_eq!(state.head_state().unwrap().block_ref().block().hash(), block1.block().hash()); assert_eq!( - state.state_by_number(0).unwrap().block_ref().block().hash(), - block1.block().hash() + state.head_state().unwrap().block_ref().recovered_block().hash(), + block1.recovered_block().hash() + ); + assert_eq!( + state.state_by_number(0).unwrap().block_ref().recovered_block().hash(), + block1.recovered_block().hash() ); let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1] }; state.update_chain(chain); - assert_eq!(state.head_state().unwrap().block_ref().block().hash(), block2.block().hash()); assert_eq!( - state.state_by_number(0).unwrap().block_ref().block().hash(), - block2.block().hash() + state.head_state().unwrap().block_ref().recovered_block().hash(), + block2.recovered_block().hash() + ); + assert_eq!( + state.state_by_number(0).unwrap().block_ref().recovered_block().hash(), + block2.recovered_block().hash() ); assert_eq!(state.inner.in_memory_state.block_count(), 1); @@ -1281,7 +1274,8 @@ mod tests { let block1 = test_block_builder.get_executed_block_with_number(0, B256::random()); // Second block with parent hash of the first block - let block2 = test_block_builder.get_executed_block_with_number(1, block1.block().hash()); + let block2 = + test_block_builder.get_executed_block_with_number(1, block1.recovered_block().hash()); // Commit the two blocks let chain = NewCanonicalChain::Commit { new: vec![block1.clone(), block2.clone()] }; @@ -1300,69 +1294,75 @@ mod tests { ); // Check the pending block - assert_eq!(state.pending_block().unwrap(), block2.block().clone()); + assert_eq!(state.pending_block().unwrap(), block2.recovered_block().sealed_block().clone()); // Check the pending block number and hash assert_eq!( state.pending_block_num_hash().unwrap(), - BlockNumHash { number: 1, hash: block2.block().hash() } + BlockNumHash { number: 1, hash: block2.recovered_block().hash() } ); // Check the pending header - assert_eq!(state.pending_header().unwrap(), block2.block().header().clone()); + assert_eq!(state.pending_header().unwrap(), block2.recovered_block().header().clone()); // Check the pending sealed header - assert_eq!(state.pending_sealed_header().unwrap(), block2.block().clone_sealed_header()); - - // Check the pending block with senders assert_eq!( - state.pending_recovered_block().unwrap(), - block2.block().clone().try_recover().unwrap() + state.pending_sealed_header().unwrap(), + block2.recovered_block().clone_sealed_header() ); + // Check the pending block with senders + assert_eq!(state.pending_recovered_block().unwrap(), block2.recovered_block().clone()); + // Check the pending block and receipts - assert_eq!(state.pending_block_and_receipts().unwrap(), (block2.block().clone(), vec![])); + assert_eq!( + state.pending_block_and_receipts().unwrap(), + (block2.recovered_block().sealed_block().clone(), vec![]) + ); } #[test] fn test_canonical_in_memory_state_state_provider() { let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block1 = test_block_builder.get_executed_block_with_number(1, B256::random()); - let block2 = test_block_builder.get_executed_block_with_number(2, block1.block().hash()); - let block3 = test_block_builder.get_executed_block_with_number(3, block2.block().hash()); + let block2 = + test_block_builder.get_executed_block_with_number(2, block1.recovered_block().hash()); + let block3 = + test_block_builder.get_executed_block_with_number(3, block2.recovered_block().hash()); let state1 = Arc::new(BlockState::new(block1.clone())); let state2 = Arc::new(BlockState::with_parent(block2.clone(), Some(state1.clone()))); let state3 = Arc::new(BlockState::with_parent(block3.clone(), Some(state2.clone()))); let mut blocks = HashMap::default(); - blocks.insert(block1.block().hash(), state1); - blocks.insert(block2.block().hash(), state2); - blocks.insert(block3.block().hash(), state3); + blocks.insert(block1.recovered_block().hash(), state1); + blocks.insert(block2.recovered_block().hash(), state2); + blocks.insert(block3.recovered_block().hash(), state3); let mut numbers = BTreeMap::new(); - numbers.insert(1, block1.block().hash()); - numbers.insert(2, block2.block().hash()); - numbers.insert(3, block3.block().hash()); + numbers.insert(1, block1.recovered_block().hash()); + numbers.insert(2, block2.recovered_block().hash()); + numbers.insert(3, block3.recovered_block().hash()); let canonical_state = CanonicalInMemoryState::new(blocks, numbers, None, None, None); let historical: StateProviderBox = Box::new(MockStateProvider); - let overlay_provider = canonical_state.state_provider(block3.block().hash(), historical); + let overlay_provider = + canonical_state.state_provider(block3.recovered_block().hash(), historical); assert_eq!(overlay_provider.in_memory.len(), 3); - assert_eq!(overlay_provider.in_memory[0].block().number, 3); - assert_eq!(overlay_provider.in_memory[1].block().number, 2); - assert_eq!(overlay_provider.in_memory[2].block().number, 1); + assert_eq!(overlay_provider.in_memory[0].recovered_block().number, 3); + assert_eq!(overlay_provider.in_memory[1].recovered_block().number, 2); + assert_eq!(overlay_provider.in_memory[2].recovered_block().number, 1); assert_eq!( - overlay_provider.in_memory[0].block().parent_hash, - overlay_provider.in_memory[1].block().hash() + overlay_provider.in_memory[0].recovered_block().parent_hash, + overlay_provider.in_memory[1].recovered_block().hash() ); assert_eq!( - overlay_provider.in_memory[1].block().parent_hash, - overlay_provider.in_memory[2].block().hash() + overlay_provider.in_memory[1].recovered_block().parent_hash, + overlay_provider.in_memory[2].recovered_block().hash() ); let unknown_hash = B256::random(); @@ -1381,7 +1381,7 @@ mod tests { #[test] fn test_canonical_in_memory_state_canonical_chain_single_block() { let block = TestBlockBuilder::eth().get_executed_block_with_number(1, B256::random()); - let hash = block.block().hash(); + let hash = block.recovered_block().hash(); let mut blocks = HashMap::default(); blocks.insert(hash, Arc::new(BlockState::new(block))); let mut numbers = BTreeMap::new(); @@ -1403,7 +1403,7 @@ mod tests { for i in 1..=3 { let block = block_builder.get_executed_block_with_number(i, parent_hash); - let hash = block.block().hash(); + let hash = block.recovered_block().hash(); state.update_blocks(Some(block), None); parent_hash = hash; } @@ -1425,7 +1425,7 @@ mod tests { for i in 1..=2 { let block = block_builder.get_executed_block_with_number(i, parent_hash); - let hash = block.block().hash(); + let hash = block.recovered_block().hash(); state.update_blocks(Some(block), None); parent_hash = hash; } @@ -1446,14 +1446,14 @@ mod tests { let parents = chain[3].parent_state_chain(); assert_eq!(parents.len(), 3); - assert_eq!(parents[0].block().block.number, 3); - assert_eq!(parents[1].block().block.number, 2); - assert_eq!(parents[2].block().block.number, 1); + assert_eq!(parents[0].block().recovered_block().number, 3); + assert_eq!(parents[1].block().recovered_block().number, 2); + assert_eq!(parents[2].block().recovered_block().number, 1); let parents = chain[2].parent_state_chain(); assert_eq!(parents.len(), 2); - assert_eq!(parents[0].block().block.number, 2); - assert_eq!(parents[1].block().block.number, 1); + assert_eq!(parents[0].block().recovered_block().number, 2); + assert_eq!(parents[1].block().recovered_block().number, 1); let parents = chain[0].parent_state_chain(); assert_eq!(parents.len(), 0); @@ -1465,15 +1465,15 @@ mod tests { let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let single_block = create_mock_state(&mut test_block_builder, single_block_number, B256::random()); - let single_block_hash = single_block.block().block.hash(); + let single_block_hash = single_block.block().recovered_block().hash(); let parents = single_block.parent_state_chain(); assert_eq!(parents.len(), 0); let block_state_chain = single_block.chain().collect::>(); assert_eq!(block_state_chain.len(), 1); - assert_eq!(block_state_chain[0].block().block.number, single_block_number); - assert_eq!(block_state_chain[0].block().block.hash(), single_block_hash); + assert_eq!(block_state_chain[0].block().recovered_block().number, single_block_number); + assert_eq!(block_state_chain[0].block().recovered_block().hash(), single_block_hash); } #[test] @@ -1483,18 +1483,18 @@ mod tests { let block_state_chain = chain[2].chain().collect::>(); assert_eq!(block_state_chain.len(), 3); - assert_eq!(block_state_chain[0].block().block.number, 3); - assert_eq!(block_state_chain[1].block().block.number, 2); - assert_eq!(block_state_chain[2].block().block.number, 1); + assert_eq!(block_state_chain[0].block().recovered_block().number, 3); + assert_eq!(block_state_chain[1].block().recovered_block().number, 2); + assert_eq!(block_state_chain[2].block().recovered_block().number, 1); let block_state_chain = chain[1].chain().collect::>(); assert_eq!(block_state_chain.len(), 2); - assert_eq!(block_state_chain[0].block().block.number, 2); - assert_eq!(block_state_chain[1].block().block.number, 1); + assert_eq!(block_state_chain[0].block().recovered_block().number, 2); + assert_eq!(block_state_chain[1].block().recovered_block().number, 1); let block_state_chain = chain[0].chain().collect::>(); assert_eq!(block_state_chain.len(), 1); - assert_eq!(block_state_chain[0].block().block.number, 1); + assert_eq!(block_state_chain[0].block().recovered_block().number, 1); } #[test] @@ -1502,10 +1502,14 @@ mod tests { // Generate 4 blocks let mut test_block_builder: TestBlockBuilder = TestBlockBuilder::default(); let block0 = test_block_builder.get_executed_block_with_number(0, B256::random()); - let block1 = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); - let block1a = test_block_builder.get_executed_block_with_number(1, block0.block.hash()); - let block2 = test_block_builder.get_executed_block_with_number(2, block1.block.hash()); - let block2a = test_block_builder.get_executed_block_with_number(2, block1.block.hash()); + let block1 = + test_block_builder.get_executed_block_with_number(1, block0.recovered_block.hash()); + let block1a = + test_block_builder.get_executed_block_with_number(1, block0.recovered_block.hash()); + let block2 = + test_block_builder.get_executed_block_with_number(2, block1.recovered_block.hash()); + let block2a = + test_block_builder.get_executed_block_with_number(2, block1.recovered_block.hash()); let sample_execution_outcome = ExecutionOutcome { receipts: Receipts::from_iter([vec![], vec![]]), @@ -1520,7 +1524,7 @@ mod tests { chain_commit.to_chain_notification(), CanonStateNotification::Commit { new: Arc::new(Chain::new( - vec![block0.clone_recovered_block(), block1.clone_recovered_block()], + vec![block0.recovered_block().clone(), block1.recovered_block().clone()], sample_execution_outcome.clone(), None )) @@ -1537,12 +1541,12 @@ mod tests { chain_reorg.to_chain_notification(), CanonStateNotification::Reorg { old: Arc::new(Chain::new( - vec![block1.clone_recovered_block(), block2.clone_recovered_block()], + vec![block1.recovered_block().clone(), block2.recovered_block().clone()], sample_execution_outcome.clone(), None )), new: Arc::new(Chain::new( - vec![block1a.clone_recovered_block(), block2a.clone_recovered_block()], + vec![block1a.recovered_block().clone(), block2a.recovered_block().clone()], sample_execution_outcome, None )) diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 880c95ab3c37e..51594890b8ac1 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -65,8 +65,8 @@ impl<'a, N: NodePrimitives> MemoryOverlayStateProviderRef<'a, N> { impl BlockHashReader for MemoryOverlayStateProviderRef<'_, N> { fn block_hash(&self, number: BlockNumber) -> ProviderResult> { for block in &self.in_memory { - if block.block.number() == number { - return Ok(Some(block.block.hash())); + if block.recovered_block().number() == number { + return Ok(Some(block.recovered_block().hash())); } } @@ -82,9 +82,9 @@ impl BlockHashReader for MemoryOverlayStateProviderRef<'_, N> let mut earliest_block_number = None; let mut in_memory_hashes = Vec::new(); for block in &self.in_memory { - if range.contains(&block.block.number()) { - in_memory_hashes.insert(0, block.block.hash()); - earliest_block_number = Some(block.block.number()); + if range.contains(&block.recovered_block().number()) { + in_memory_hashes.insert(0, block.recovered_block().hash()); + earliest_block_number = Some(block.recovered_block().number()); } } diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index cce88d713612e..9f8135b2a09aa 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -210,8 +210,7 @@ impl TestBlockBuilder { let (block, senders) = block_with_senders.split_sealed(); ExecutedBlock::new( - Arc::new(block), - Arc::new(senders), + Arc::new(RecoveredBlock::new_sealed(block, senders)), Arc::new(ExecutionOutcome::new( BundleState::default(), receipts, @@ -251,7 +250,7 @@ impl TestBlockBuilder { range.map(move |number| { let current_parent_hash = parent_hash; let block = self.get_executed_block_with_number(number, current_parent_hash); - parent_hash = block.block.hash(); + parent_hash = block.recovered_block().hash(); block }) } diff --git a/crates/engine/tree/src/engine.rs b/crates/engine/tree/src/engine.rs index 238846d4f0d62..1e721627becfa 100644 --- a/crates/engine/tree/src/engine.rs +++ b/crates/engine/tree/src/engine.rs @@ -253,7 +253,7 @@ impl Display for EngineApiRequest { match self { Self::Beacon(msg) => msg.fmt(f), Self::InsertExecutedBlock(block) => { - write!(f, "InsertExecutedBlock({:?})", block.block().num_hash()) + write!(f, "InsertExecutedBlock({:?})", block.recovered_block().num_hash()) } } } diff --git a/crates/engine/tree/src/persistence.rs b/crates/engine/tree/src/persistence.rs index 787120292452b..5c9d1357d2c7f 100644 --- a/crates/engine/tree/src/persistence.rs +++ b/crates/engine/tree/src/persistence.rs @@ -142,11 +142,11 @@ where &self, blocks: Vec>, ) -> Result, PersistenceError> { - debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.block.num_hash()), last=?blocks.last().map(|b| b.block.num_hash()), "Saving range of blocks"); + debug!(target: "engine::persistence", first=?blocks.first().map(|b| b.recovered_block.num_hash()), last=?blocks.last().map(|b| b.recovered_block.num_hash()), "Saving range of blocks"); let start_time = Instant::now(); let last_block_hash_num = blocks.last().map(|block| BlockNumHash { - hash: block.block().hash(), - number: block.block().header().number(), + hash: block.recovered_block().hash(), + number: block.recovered_block().header().number(), }); if last_block_hash_num.is_some() { @@ -339,7 +339,7 @@ mod tests { let mut test_block_builder = TestBlockBuilder::eth(); let executed = test_block_builder.get_executed_block_with_number(block_number, B256::random()); - let block_hash = executed.block().hash(); + let block_hash = executed.recovered_block().hash(); let blocks = vec![executed]; let (tx, rx) = oneshot::channel(); @@ -363,7 +363,7 @@ mod tests { let mut test_block_builder = TestBlockBuilder::eth(); let blocks = test_block_builder.get_executed_blocks(0..5).collect::>(); - let last_hash = blocks.last().unwrap().block().hash(); + let last_hash = blocks.last().unwrap().recovered_block().hash(); let (tx, rx) = oneshot::channel(); persistence_handle.save_blocks(blocks, tx).unwrap(); @@ -380,7 +380,7 @@ mod tests { let mut test_block_builder = TestBlockBuilder::eth(); for range in ranges { let blocks = test_block_builder.get_executed_blocks(range).collect::>(); - let last_hash = blocks.last().unwrap().block().hash(); + let last_hash = blocks.last().unwrap().recovered_block().hash(); let (tx, rx) = oneshot::channel(); persistence_handle.save_blocks(blocks, tx).unwrap(); diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index cff2dfdebbd31..059fde3d74145 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -147,7 +147,7 @@ impl TreeState { /// Returns the block by hash. fn block_by_hash(&self, hash: B256) -> Option>> { - self.blocks_by_hash.get(&hash).map(|b| b.block.clone()) + self.blocks_by_hash.get(&hash).map(|b| Arc::new(b.recovered_block().sealed_block().clone())) } /// Returns all available blocks for the given hash that lead back to the canonical chain, from @@ -156,10 +156,10 @@ impl TreeState { /// Returns `None` if the block for the given hash is not found. fn blocks_by_hash(&self, hash: B256) -> Option<(B256, Vec>)> { let block = self.blocks_by_hash.get(&hash).cloned()?; - let mut parent_hash = block.block().parent_hash(); + let mut parent_hash = block.recovered_block().parent_hash(); let mut blocks = vec![block]; while let Some(executed) = self.blocks_by_hash.get(&parent_hash) { - parent_hash = executed.block.parent_hash(); + parent_hash = executed.recovered_block().parent_hash(); blocks.push(executed.clone()); } @@ -168,9 +168,9 @@ impl TreeState { /// Insert executed block into the state. fn insert_executed(&mut self, executed: ExecutedBlock) { - let hash = executed.block.hash(); - let parent_hash = executed.block.parent_hash(); - let block_number = executed.block.number(); + let hash = executed.recovered_block().hash(); + let parent_hash = executed.recovered_block().parent_hash(); + let block_number = executed.recovered_block().number(); if self.blocks_by_hash.contains_key(&hash) { return; @@ -202,7 +202,7 @@ impl TreeState { let executed = self.blocks_by_hash.remove(&hash)?; // Remove this block from collection of children of its parent block. - let parent_entry = self.parent_to_child.entry(executed.block.parent_hash()); + let parent_entry = self.parent_to_child.entry(executed.recovered_block().parent_hash()); if let hash_map::Entry::Occupied(mut entry) = parent_entry { entry.get_mut().remove(&hash); @@ -215,10 +215,11 @@ impl TreeState { let children = self.parent_to_child.remove(&hash).unwrap_or_default(); // Remove this block from `blocks_by_number`. - let block_number_entry = self.blocks_by_number.entry(executed.block.number()); + let block_number_entry = self.blocks_by_number.entry(executed.recovered_block().number()); if let btree_map::Entry::Occupied(mut entry) = block_number_entry { // We have to find the index of the block since it exists in a vec - if let Some(index) = entry.get().iter().position(|b| b.block.hash() == hash) { + if let Some(index) = entry.get().iter().position(|b| b.recovered_block().hash() == hash) + { entry.get_mut().swap_remove(index); // If there are no blocks left then remove the entry for this block @@ -239,7 +240,7 @@ impl TreeState { } while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash(); + current_block = executed.recovered_block().parent_hash(); if current_block == hash { return true } @@ -267,14 +268,16 @@ impl TreeState { // upper bound let mut current_block = self.current_canonical_head.hash; while let Some(executed) = self.blocks_by_hash.get(¤t_block) { - current_block = executed.block.parent_hash(); - if executed.block.number() <= upper_bound { - debug!(target: "engine::tree", num_hash=?executed.block.num_hash(), "Attempting to remove block walking back from the head"); - if let Some((removed, _)) = self.remove_by_hash(executed.block.hash()) { - debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed block walking back from the head"); + current_block = executed.recovered_block().parent_hash(); + if executed.recovered_block().number() <= upper_bound { + debug!(target: "engine::tree", num_hash=?executed.recovered_block().num_hash(), "Attempting to remove block walking back from the head"); + if let Some((removed, _)) = self.remove_by_hash(executed.recovered_block().hash()) { + debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed block walking back from the head"); // finally, move the trie updates - self.persisted_trie_updates - .insert(removed.block.hash(), (removed.block.number(), removed.trie)); + self.persisted_trie_updates.insert( + removed.recovered_block().hash(), + (removed.recovered_block().number(), removed.trie), + ); } } } @@ -297,11 +300,11 @@ impl TreeState { let blocks_to_remove = self .blocks_by_number .range((Bound::Unbounded, Bound::Excluded(finalized_num))) - .flat_map(|(_, blocks)| blocks.iter().map(|b| b.block.hash())) + .flat_map(|(_, blocks)| blocks.iter().map(|b| b.recovered_block().hash())) .collect::>(); for hash in blocks_to_remove { if let Some((removed, _)) = self.remove_by_hash(hash) { - debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed finalized sidechain block"); + debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed finalized sidechain block"); } } @@ -318,17 +321,19 @@ impl TreeState { // re-insert the finalized hash if we removed it if let Some(position) = - blocks_to_remove.iter().position(|b| b.block.hash() == finalized_hash) + blocks_to_remove.iter().position(|b| b.recovered_block().hash() == finalized_hash) { let finalized_block = blocks_to_remove.swap_remove(position); self.blocks_by_number.insert(finalized_num, vec![finalized_block]); } - let mut blocks_to_remove = - blocks_to_remove.into_iter().map(|e| e.block.hash()).collect::>(); + let mut blocks_to_remove = blocks_to_remove + .into_iter() + .map(|e| e.recovered_block().hash()) + .collect::>(); while let Some(block) = blocks_to_remove.pop_front() { if let Some((removed, children)) = self.remove_by_hash(block) { - debug!(target: "engine::tree", num_hash=?removed.block.num_hash(), "Removed finalized sidechain child block"); + debug!(target: "engine::tree", num_hash=?removed.recovered_block().num_hash(), "Removed finalized sidechain child block"); blocks_to_remove.extend(children); } } @@ -900,11 +905,11 @@ where return Ok(None) }; - let new_head_number = new_head_block.block.number(); + let new_head_number = new_head_block.recovered_block().number(); let mut current_canonical_number = self.state.tree_state.current_canonical_head.number; let mut new_chain = vec![new_head_block.clone()]; - let mut current_hash = new_head_block.block.parent_hash(); + let mut current_hash = new_head_block.recovered_block().parent_hash(); let mut current_number = new_head_number - 1; // Walk back the new chain until we reach a block we know about @@ -913,7 +918,7 @@ where // that are _above_ the current canonical head. while current_number > current_canonical_number { if let Some(block) = self.executed_block_by_hash(current_hash)? { - current_hash = block.block.parent_hash(); + current_hash = block.recovered_block().parent_hash(); current_number -= 1; new_chain.push(block); } else { @@ -942,7 +947,7 @@ where while current_canonical_number > current_number { if let Some(block) = self.executed_block_by_hash(old_hash)? { old_chain.push(block.clone()); - old_hash = block.block.parent_hash(); + old_hash = block.recovered_block().parent_hash(); current_canonical_number -= 1; } else { // This shouldn't happen as we're walking back the canonical chain @@ -958,7 +963,7 @@ where // a common ancestor (fork block) is reached. while old_hash != current_hash { if let Some(block) = self.executed_block_by_hash(old_hash)? { - old_hash = block.block.parent_hash(); + old_hash = block.recovered_block().parent_hash(); old_chain.push(block); } else { // This shouldn't happen as we're walking back the canonical chain @@ -967,7 +972,7 @@ where } if let Some(block) = self.executed_block_by_hash(current_hash)? { - current_hash = block.block.parent_hash(); + current_hash = block.recovered_block().parent_hash(); new_chain.push(block); } else { // This shouldn't happen as we've already walked this path @@ -1203,7 +1208,7 @@ where if blocks_to_persist.is_empty() { debug!(target: "engine::tree", "Returned empty set of blocks to persist"); } else { - debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.block.num_hash()).collect::>(), "Persisting blocks"); + debug!(target: "engine::tree", blocks = ?blocks_to_persist.iter().map(|block| block.recovered_block().num_hash()).collect::>(), "Persisting blocks"); let (tx, rx) = oneshot::channel(); let _ = self.persistence.save_blocks(blocks_to_persist, tx); self.persistence_state.start(rx); @@ -1262,9 +1267,9 @@ where FromEngine::Request(request) => { match request { EngineApiRequest::InsertExecutedBlock(block) => { - debug!(target: "engine::tree", block=?block.block().num_hash(), "inserting already executed block"); + debug!(target: "engine::tree", block=?block.recovered_block().num_hash(), "inserting already executed block"); let now = Instant::now(); - let sealed_block = block.block.clone(); + let sealed_block = Arc::new(block.recovered_block().sealed_block().clone()); self.state.tree_state.insert_executed(block); self.metrics.engine.inserted_already_executed_blocks.increment(1); @@ -1544,15 +1549,15 @@ where debug!(target: "engine::tree", ?last_persisted_number, ?canonical_head_number, ?target_number, ?current_hash, "Returning canonical blocks to persist"); while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) { - if block.block.number() <= last_persisted_number { + if block.recovered_block().number() <= last_persisted_number { break; } - if block.block.number() <= target_number { + if block.recovered_block().number() <= target_number { blocks_to_persist.push(block.clone()); } - current_hash = block.block.parent_hash(); + current_hash = block.recovered_block().parent_hash(); } // reverse the order so that the oldest block comes first @@ -1610,8 +1615,7 @@ where let hashed_state = self.provider.hashed_post_state(execution_output.state()); Ok(Some(ExecutedBlock { - block: Arc::new(block), - senders: Arc::new(senders), + recovered_block: Arc::new(RecoveredBlock::new_sealed(block, senders)), trie: updates.clone(), execution_output: Arc::new(execution_output), hashed_state: Arc::new(hashed_state), @@ -2003,7 +2007,7 @@ where let NewCanonicalChain::Reorg { new, old: _ } = chain_update else { return None }; let BlockNumHash { number: new_num, hash: new_hash } = - new.first().map(|block| block.block.num_hash())?; + new.first().map(|block| block.recovered_block().num_hash())?; match new_num.cmp(&self.persistence_state.last_persisted_block.number) { Ordering::Greater => { @@ -2045,8 +2049,8 @@ where // reinsert any missing reorged blocks if let NewCanonicalChain::Reorg { new, old } = &chain_update { - let new_first = new.first().map(|first| first.block.num_hash()); - let old_first = old.first().map(|first| first.block.num_hash()); + let new_first = new.first().map(|first| first.recovered_block().num_hash()); + let old_first = old.first().map(|first| first.recovered_block().num_hash()); trace!(target: "engine::tree", ?new_first, ?old_first, "Reorg detected, new and old first blocks"); self.update_reorg_metrics(old.len()); @@ -2080,8 +2084,13 @@ where /// This reinserts any blocks in the new chain that do not already exist in the tree fn reinsert_reorged_blocks(&mut self, new_chain: Vec>) { for block in new_chain { - if self.state.tree_state.executed_block_by_hash(block.block.hash()).is_none() { - trace!(target: "engine::tree", num=?block.block.number(), hash=?block.block.hash(), "Reinserting block into tree state"); + if self + .state + .tree_state + .executed_block_by_hash(block.recovered_block().hash()) + .is_none() + { + trace!(target: "engine::tree", num=?block.recovered_block().number(), hash=?block.recovered_block().hash(), "Reinserting block into tree state"); self.state.tree_state.insert_executed(block); } } @@ -2464,15 +2473,18 @@ where debug!(target: "engine::tree", ?root_elapsed, block=?sealed_block.num_hash(), "Calculated state root"); let executed: ExecutedBlock = ExecutedBlock { - block: sealed_block.clone(), - senders: Arc::new(block.senders().to_vec()), + recovered_block: Arc::new(RecoveredBlock::new_sealed( + sealed_block.as_ref().clone(), + block.senders().to_vec(), + )), execution_output: Arc::new(ExecutionOutcome::from((output, block_number))), hashed_state: Arc::new(hashed_state), trie: Arc::new(trie_output), }; - if self.state.tree_state.canonical_block_hash() == executed.block().parent_hash() { - debug!(target: "engine::tree", pending = ?executed.block().num_hash() ,"updating pending block"); + if self.state.tree_state.canonical_block_hash() == executed.recovered_block().parent_hash() + { + debug!(target: "engine::tree", pending = ?executed.recovered_block().num_hash() ,"updating pending block"); // if the parent is the canonical head, we can insert the block as the pending block self.canonical_in_memory_state.set_pending_block(executed.clone()); } @@ -2988,7 +3000,7 @@ mod tests { let mut parent_hash = B256::ZERO; for block in &blocks { - let sealed_block = block.block(); + let sealed_block = block.recovered_block(); let hash = sealed_block.hash(); let number = sealed_block.number; blocks_by_hash.insert(hash, block.clone()); @@ -3002,7 +3014,7 @@ mod tests { self.tree.state.tree_state = TreeState { blocks_by_hash, blocks_by_number, - current_canonical_head: blocks.last().unwrap().block().num_hash(), + current_canonical_head: blocks.last().unwrap().recovered_block().num_hash(), parent_to_child, persisted_trie_updates: HashMap::default(), }; @@ -3013,12 +3025,11 @@ mod tests { CanonicalInMemoryState::new(state_by_hash, hash_by_number, pending, None, None); self.blocks = blocks.clone(); - self.persist_blocks( - blocks - .into_iter() - .map(|b| RecoveredBlock::new_sealed(b.block().clone(), b.senders().clone())) - .collect(), - ); + + let recovered_blocks = + blocks.iter().map(|b| b.recovered_block().clone()).collect::>(); + + self.persist_blocks(recovered_blocks); self } @@ -3311,7 +3322,7 @@ mod tests { let test_harness = TestHarness::new(MAINNET.clone()).with_blocks(blocks.clone()); for executed_block in blocks { - let sealed_block = executed_block.block(); + let sealed_block = executed_block.recovered_block(); let expected_state = BlockState::new(executed_block.clone()); @@ -3441,21 +3452,21 @@ mod tests { tree_state.insert_executed(blocks[1].clone()); assert_eq!( - tree_state.parent_to_child.get(&blocks[0].block.hash()), - Some(&HashSet::from_iter([blocks[1].block.hash()])) + tree_state.parent_to_child.get(&blocks[0].recovered_block().hash()), + Some(&HashSet::from_iter([blocks[1].recovered_block().hash()])) ); - assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash())); tree_state.insert_executed(blocks[2].clone()); assert_eq!( - tree_state.parent_to_child.get(&blocks[1].block.hash()), - Some(&HashSet::from_iter([blocks[2].block.hash()])) + tree_state.parent_to_child.get(&blocks[1].recovered_block().hash()), + Some(&HashSet::from_iter([blocks[2].recovered_block().hash()])) ); - assert!(tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash())); } #[tokio::test] @@ -3469,12 +3480,12 @@ mod tests { } assert_eq!(tree_state.blocks_by_hash.len(), 5); - let fork_block_3 = - test_block_builder.get_executed_block_with_number(3, blocks[1].block.hash()); - let fork_block_4 = - test_block_builder.get_executed_block_with_number(4, fork_block_3.block.hash()); - let fork_block_5 = - test_block_builder.get_executed_block_with_number(5, fork_block_4.block.hash()); + let fork_block_3 = test_block_builder + .get_executed_block_with_number(3, blocks[1].recovered_block().hash()); + let fork_block_4 = test_block_builder + .get_executed_block_with_number(4, fork_block_3.recovered_block().hash()); + let fork_block_5 = test_block_builder + .get_executed_block_with_number(5, fork_block_4.recovered_block().hash()); tree_state.insert_executed(fork_block_3.clone()); tree_state.insert_executed(fork_block_4.clone()); @@ -3482,16 +3493,16 @@ mod tests { assert_eq!(tree_state.blocks_by_hash.len(), 8); assert_eq!(tree_state.blocks_by_number[&3].len(), 2); // two blocks at height 3 (original and fork) - assert_eq!(tree_state.parent_to_child[&blocks[1].block.hash()].len(), 2); // block 2 should have two children + assert_eq!(tree_state.parent_to_child[&blocks[1].recovered_block().hash()].len(), 2); // block 2 should have two children // verify that we can insert the same block again without issues tree_state.insert_executed(fork_block_4.clone()); assert_eq!(tree_state.blocks_by_hash.len(), 8); - assert!(tree_state.parent_to_child[&fork_block_3.block.hash()] - .contains(&fork_block_4.block.hash())); - assert!(tree_state.parent_to_child[&fork_block_4.block.hash()] - .contains(&fork_block_5.block.hash())); + assert!(tree_state.parent_to_child[&fork_block_3.recovered_block().hash()] + .contains(&fork_block_4.recovered_block().hash())); + assert!(tree_state.parent_to_child[&fork_block_4.recovered_block().hash()] + .contains(&fork_block_5.recovered_block().hash())); assert_eq!(tree_state.blocks_by_number[&4].len(), 2); assert_eq!(tree_state.blocks_by_number[&5].len(), 2); @@ -3510,40 +3521,40 @@ mod tests { let last = blocks.last().unwrap(); // set the canonical head - tree_state.set_canonical_head(last.block.num_hash()); + tree_state.set_canonical_head(last.recovered_block().num_hash()); // inclusive bound, so we should remove anything up to and including 2 tree_state.remove_until( - BlockNumHash::new(2, blocks[1].block.hash()), + BlockNumHash::new(2, blocks[1].recovered_block().hash()), start_num_hash.hash, - Some(blocks[1].block.num_hash()), + Some(blocks[1].recovered_block().num_hash()), ); - assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash())); - assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash())); assert!(!tree_state.blocks_by_number.contains_key(&1)); assert!(!tree_state.blocks_by_number.contains_key(&2)); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash())); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash())); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash())); assert!(tree_state.blocks_by_number.contains_key(&3)); assert!(tree_state.blocks_by_number.contains_key(&4)); assert!(tree_state.blocks_by_number.contains_key(&5)); - assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); - assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); - assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash())); assert_eq!( - tree_state.parent_to_child.get(&blocks[2].block.hash()), - Some(&HashSet::from_iter([blocks[3].block.hash()])) + tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()), + Some(&HashSet::from_iter([blocks[3].recovered_block().hash()])) ); assert_eq!( - tree_state.parent_to_child.get(&blocks[3].block.hash()), - Some(&HashSet::from_iter([blocks[4].block.hash()])) + tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()), + Some(&HashSet::from_iter([blocks[4].recovered_block().hash()])) ); } @@ -3560,40 +3571,40 @@ mod tests { let last = blocks.last().unwrap(); // set the canonical head - tree_state.set_canonical_head(last.block.num_hash()); + tree_state.set_canonical_head(last.recovered_block().num_hash()); // we should still remove everything up to and including 2 tree_state.remove_until( - BlockNumHash::new(2, blocks[1].block.hash()), + BlockNumHash::new(2, blocks[1].recovered_block().hash()), start_num_hash.hash, None, ); - assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash())); - assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash())); assert!(!tree_state.blocks_by_number.contains_key(&1)); assert!(!tree_state.blocks_by_number.contains_key(&2)); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash())); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash())); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash())); assert!(tree_state.blocks_by_number.contains_key(&3)); assert!(tree_state.blocks_by_number.contains_key(&4)); assert!(tree_state.blocks_by_number.contains_key(&5)); - assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); - assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); - assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash())); assert_eq!( - tree_state.parent_to_child.get(&blocks[2].block.hash()), - Some(&HashSet::from_iter([blocks[3].block.hash()])) + tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()), + Some(&HashSet::from_iter([blocks[3].recovered_block().hash()])) ); assert_eq!( - tree_state.parent_to_child.get(&blocks[3].block.hash()), - Some(&HashSet::from_iter([blocks[4].block.hash()])) + tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()), + Some(&HashSet::from_iter([blocks[4].recovered_block().hash()])) ); } @@ -3610,40 +3621,40 @@ mod tests { let last = blocks.last().unwrap(); // set the canonical head - tree_state.set_canonical_head(last.block.num_hash()); + tree_state.set_canonical_head(last.recovered_block().num_hash()); // we have no forks so we should still remove anything up to and including 2 tree_state.remove_until( - BlockNumHash::new(2, blocks[1].block.hash()), + BlockNumHash::new(2, blocks[1].recovered_block().hash()), start_num_hash.hash, - Some(blocks[0].block.num_hash()), + Some(blocks[0].recovered_block().num_hash()), ); - assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].block.hash())); - assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].block.hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[0].recovered_block().hash())); + assert!(!tree_state.blocks_by_hash.contains_key(&blocks[1].recovered_block().hash())); assert!(!tree_state.blocks_by_number.contains_key(&1)); assert!(!tree_state.blocks_by_number.contains_key(&2)); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].block.hash())); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].block.hash())); - assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].block.hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[2].recovered_block().hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[3].recovered_block().hash())); + assert!(tree_state.blocks_by_hash.contains_key(&blocks[4].recovered_block().hash())); assert!(tree_state.blocks_by_number.contains_key(&3)); assert!(tree_state.blocks_by_number.contains_key(&4)); assert!(tree_state.blocks_by_number.contains_key(&5)); - assert!(!tree_state.parent_to_child.contains_key(&blocks[0].block.hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[1].block.hash())); - assert!(tree_state.parent_to_child.contains_key(&blocks[2].block.hash())); - assert!(tree_state.parent_to_child.contains_key(&blocks[3].block.hash())); - assert!(!tree_state.parent_to_child.contains_key(&blocks[4].block.hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[0].recovered_block().hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[1].recovered_block().hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[2].recovered_block().hash())); + assert!(tree_state.parent_to_child.contains_key(&blocks[3].recovered_block().hash())); + assert!(!tree_state.parent_to_child.contains_key(&blocks[4].recovered_block().hash())); assert_eq!( - tree_state.parent_to_child.get(&blocks[2].block.hash()), - Some(&HashSet::from_iter([blocks[3].block.hash()])) + tree_state.parent_to_child.get(&blocks[2].recovered_block().hash()), + Some(&HashSet::from_iter([blocks[3].recovered_block().hash()])) ); assert_eq!( - tree_state.parent_to_child.get(&blocks[3].block.hash()), - Some(&HashSet::from_iter([blocks[4].block.hash()])) + tree_state.parent_to_child.get(&blocks[3].recovered_block().hash()), + Some(&HashSet::from_iter([blocks[4].recovered_block().hash()])) ); } @@ -3660,40 +3671,44 @@ mod tests { } // set block 3 as the current canonical head - test_harness.tree.state.tree_state.set_canonical_head(blocks[2].block.num_hash()); + test_harness + .tree + .state + .tree_state + .set_canonical_head(blocks[2].recovered_block().num_hash()); // create a fork from block 2 - let fork_block_3 = - test_block_builder.get_executed_block_with_number(3, blocks[1].block.hash()); - let fork_block_4 = - test_block_builder.get_executed_block_with_number(4, fork_block_3.block.hash()); - let fork_block_5 = - test_block_builder.get_executed_block_with_number(5, fork_block_4.block.hash()); + let fork_block_3 = test_block_builder + .get_executed_block_with_number(3, blocks[1].recovered_block().hash()); + let fork_block_4 = test_block_builder + .get_executed_block_with_number(4, fork_block_3.recovered_block().hash()); + let fork_block_5 = test_block_builder + .get_executed_block_with_number(5, fork_block_4.recovered_block().hash()); test_harness.tree.state.tree_state.insert_executed(fork_block_3.clone()); test_harness.tree.state.tree_state.insert_executed(fork_block_4.clone()); test_harness.tree.state.tree_state.insert_executed(fork_block_5.clone()); // normal (non-reorg) case - let result = test_harness.tree.on_new_head(blocks[4].block.hash()).unwrap(); + let result = test_harness.tree.on_new_head(blocks[4].recovered_block().hash()).unwrap(); assert!(matches!(result, Some(NewCanonicalChain::Commit { .. }))); if let Some(NewCanonicalChain::Commit { new }) = result { assert_eq!(new.len(), 2); - assert_eq!(new[0].block.hash(), blocks[3].block.hash()); - assert_eq!(new[1].block.hash(), blocks[4].block.hash()); + assert_eq!(new[0].recovered_block().hash(), blocks[3].recovered_block().hash()); + assert_eq!(new[1].recovered_block().hash(), blocks[4].recovered_block().hash()); } // reorg case - let result = test_harness.tree.on_new_head(fork_block_5.block.hash()).unwrap(); + let result = test_harness.tree.on_new_head(fork_block_5.recovered_block().hash()).unwrap(); assert!(matches!(result, Some(NewCanonicalChain::Reorg { .. }))); if let Some(NewCanonicalChain::Reorg { new, old }) = result { assert_eq!(new.len(), 3); - assert_eq!(new[0].block.hash(), fork_block_3.block.hash()); - assert_eq!(new[1].block.hash(), fork_block_4.block.hash()); - assert_eq!(new[2].block.hash(), fork_block_5.block.hash()); + assert_eq!(new[0].recovered_block().hash(), fork_block_3.recovered_block().hash()); + assert_eq!(new[1].recovered_block().hash(), fork_block_4.recovered_block().hash()); + assert_eq!(new[2].recovered_block().hash(), fork_block_5.recovered_block().hash()); assert_eq!(old.len(), 1); - assert_eq!(old[0].block.hash(), blocks[2].block.hash()); + assert_eq!(old[0].recovered_block().hash(), blocks[2].recovered_block().hash()); } } @@ -3712,7 +3727,7 @@ mod tests { } // set last block as the current canonical head - let last_block = blocks.last().unwrap().block.clone(); + let last_block = blocks.last().unwrap().recovered_block().clone(); test_harness.tree.state.tree_state.set_canonical_head(last_block.num_hash()); @@ -3722,8 +3737,7 @@ mod tests { for block in &chain_a { test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { - block: Arc::new(block.clone_sealed_block()), - senders: Arc::new(block.senders().to_vec()), + recovered_block: Arc::new(block.clone()), execution_output: Arc::new(ExecutionOutcome::default()), hashed_state: Arc::new(HashedPostState::default()), trie: Arc::new(TrieUpdates::default()), @@ -3733,8 +3747,7 @@ mod tests { for block in &chain_b { test_harness.tree.state.tree_state.insert_executed(ExecutedBlock { - block: Arc::new(block.clone_sealed_block()), - senders: Arc::new(block.senders().to_vec()), + recovered_block: Arc::new(block.clone()), execution_output: Arc::new(ExecutionOutcome::default()), hashed_state: Arc::new(HashedPostState::default()), trie: Arc::new(TrieUpdates::default()), @@ -3752,12 +3765,12 @@ mod tests { if let Some(NewCanonicalChain::Reorg { new, old }) = result { assert_eq!(new.len(), expected_new.len()); for (index, block) in expected_new.iter().enumerate() { - assert_eq!(new[index].block.hash(), block.hash()); + assert_eq!(new[index].recovered_block().hash(), block.hash()); } assert_eq!(old.len(), chain_a.len()); for (index, block) in chain_a.iter().enumerate() { - assert_eq!(old[index].block.hash(), block.hash()); + assert_eq!(old[index].recovered_block().hash(), block.hash()); } } @@ -3798,12 +3811,12 @@ mod tests { for (i, item) in blocks_to_persist.iter().enumerate().take(expected_blocks_to_persist_length) { - assert_eq!(item.block.number, last_persisted_block_number + i as u64 + 1); + assert_eq!(item.recovered_block().number, last_persisted_block_number + i as u64 + 1); } // make sure only canonical blocks are included let fork_block = test_block_builder.get_executed_block_with_number(4, B256::random()); - let fork_block_hash = fork_block.block.hash(); + let fork_block_hash = fork_block.recovered_block().hash(); test_harness.tree.state.tree_state.insert_executed(fork_block); assert!(test_harness.tree.state.tree_state.block_by_hash(fork_block_hash).is_some()); @@ -3812,12 +3825,11 @@ mod tests { assert_eq!(blocks_to_persist.len(), expected_blocks_to_persist_length); // check that the fork block is not included in the blocks to persist - assert!(!blocks_to_persist.iter().any(|b| b.block.hash() == fork_block_hash)); + assert!(!blocks_to_persist.iter().any(|b| b.recovered_block().hash() == fork_block_hash)); // check that the original block 4 is still included - assert!(blocks_to_persist - .iter() - .any(|b| b.block.number == 4 && b.block.hash() == blocks[4].block.hash())); + assert!(blocks_to_persist.iter().any(|b| b.recovered_block().number == 4 && + b.recovered_block().hash() == blocks[4].recovered_block().hash())); } #[tokio::test] @@ -3831,7 +3843,7 @@ mod tests { test_harness = test_harness.with_blocks(blocks); let missing_block = test_block_builder - .generate_random_block(6, test_harness.blocks.last().unwrap().block().hash()); + .generate_random_block(6, test_harness.blocks.last().unwrap().recovered_block().hash()); test_harness.fcu_to(missing_block.hash(), PayloadStatusEnum::Syncing).await; @@ -3855,11 +3867,11 @@ mod tests { test_harness = test_harness.with_blocks(base_chain.clone()); test_harness - .fcu_to(base_chain.last().unwrap().block().hash(), ForkchoiceStatus::Valid) + .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) .await; // extend main chain - let main_chain = test_harness.block_builder.create_fork(base_chain[0].block(), 3); + let main_chain = test_harness.block_builder.create_fork(base_chain[0].recovered_block(), 3); test_harness.insert_chain(main_chain).await; } @@ -3872,7 +3884,7 @@ mod tests { let main_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..5).collect(); test_harness = test_harness.with_blocks(main_chain.clone()); - let fork_chain = test_harness.block_builder.create_fork(main_chain[2].block(), 3); + let fork_chain = test_harness.block_builder.create_fork(main_chain[2].recovered_block(), 3); let fork_chain_last_hash = fork_chain.last().unwrap().hash(); // add fork blocks to the tree @@ -3905,13 +3917,13 @@ mod tests { test_harness = test_harness.with_blocks(base_chain.clone()); test_harness - .fcu_to(base_chain.last().unwrap().block().hash(), ForkchoiceStatus::Valid) + .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) .await; // extend main chain with enough blocks to trigger pipeline run but don't insert them let main_chain = test_harness .block_builder - .create_fork(base_chain[0].block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); + .create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); let main_chain_last_hash = main_chain.last().unwrap().hash(); test_harness.send_fcu(main_chain_last_hash, ForkchoiceStatus::Syncing).await; @@ -3972,14 +3984,14 @@ mod tests { // fcu to the tip of base chain test_harness - .fcu_to(base_chain.last().unwrap().block().hash(), ForkchoiceStatus::Valid) + .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) .await; // create main chain, extension of base chain, with enough blocks to // trigger backfill sync let main_chain = test_harness .block_builder - .create_fork(base_chain[0].block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); + .create_fork(base_chain[0].recovered_block(), MIN_BLOCKS_FOR_PIPELINE_RUN + 10); let main_chain_last = main_chain.last().unwrap(); let main_chain_last_hash = main_chain_last.hash(); @@ -4099,11 +4111,12 @@ mod tests { // fcu to the tip of base chain test_harness - .fcu_to(base_chain.last().unwrap().block().hash(), ForkchoiceStatus::Valid) + .fcu_to(base_chain.last().unwrap().recovered_block().hash(), ForkchoiceStatus::Valid) .await; // create main chain, extension of base chain - let main_chain = test_harness.block_builder.create_fork(base_chain[0].block(), 10); + let main_chain = + test_harness.block_builder.create_fork(base_chain[0].recovered_block(), 10); // determine target in the middle of main hain let target = main_chain.get(5).unwrap(); let target_hash = target.hash(); @@ -4138,7 +4151,7 @@ mod tests { let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); test_harness = test_harness.with_blocks(base_chain.clone()); - let old_head = base_chain.first().unwrap().block(); + let old_head = base_chain.first().unwrap().recovered_block(); // extend base chain let extension_chain = test_harness.block_builder.create_fork(old_head, 5); @@ -4198,7 +4211,7 @@ mod tests { // side chain consisting of two blocks, the last will be inserted first // so that we force it to be buffered let side_chain = - test_harness.block_builder.create_fork(base_chain.last().unwrap().block(), 2); + test_harness.block_builder.create_fork(base_chain.last().unwrap().recovered_block(), 2); // buffer last block of side chain let buffered_block = side_chain.last().unwrap(); @@ -4236,7 +4249,7 @@ mod tests { let base_chain: Vec<_> = test_harness.block_builder.get_executed_blocks(0..1).collect(); test_harness = test_harness.with_blocks(base_chain.clone()); - let old_head = base_chain.first().unwrap().block(); + let old_head = base_chain.first().unwrap().recovered_block(); // extend base chain let extension_chain = test_harness.block_builder.create_fork(old_head, 5); @@ -4300,8 +4313,9 @@ mod tests { test_harness = test_harness.with_blocks(base_chain.clone()); // create a side chain with an invalid block - let side_chain = - test_harness.block_builder.create_fork(base_chain.last().unwrap().block(), 15); + let side_chain = test_harness + .block_builder + .create_fork(base_chain.last().unwrap().recovered_block(), 15); let invalid_index = 9; test_harness.setup_range_insertion_for_invalid_chain(side_chain.clone(), invalid_index); diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index d236eac8649f0..35c612e649ada 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -30,7 +30,8 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_primitives::{ proofs::{self}, - Block, BlockBody, EthereumHardforks, InvalidTransactionError, Receipt, TransactionSigned, + Block, BlockBody, EthereumHardforks, InvalidTransactionError, Receipt, RecoveredBlock, + TransactionSigned, }; use reth_primitives_traits::Block as _; use reth_revm::database::StateProviderDatabase; @@ -478,8 +479,10 @@ where // create the executed block data let executed = ExecutedBlock { - block: sealed_block.clone(), - senders: Arc::new(executed_senders), + recovered_block: Arc::new(RecoveredBlock::new_sealed( + sealed_block.as_ref().clone(), + executed_senders, + )), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), trie: Arc::new(trie_output), diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 31968197d8185..b008b00d2020a 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -344,7 +344,7 @@ where tokio::select! { payload = built_payloads.select_next_some() => { if let Some(executed_block) = payload.executed_block() { - debug!(target: "reth::cli", block=?executed_block.block().num_hash(), "inserting built payload"); + debug!(target: "reth::cli", block=?executed_block.recovered_block().num_hash(), "inserting built payload"); if let Either::Right(eth_service) = &mut engine_service { eth_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block).into()); } diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index fd2bcffe596b0..fe2aa5bb2e222 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -25,7 +25,8 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::{NoopPayloadTransactions, PayloadTransactions}; use reth_primitives::{ - proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, SealedHeader, TxType, + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, RecoveredBlock, + SealedHeader, TxType, }; use reth_primitives_traits::block::Block as _; use reth_provider::{ @@ -431,8 +432,10 @@ where // create the executed block data let executed: ExecutedBlock = ExecutedBlock { - block: sealed_block.clone(), - senders: Arc::new(info.executed_senders), + recovered_block: Arc::new(RecoveredBlock::new_sealed( + sealed_block.as_ref().clone(), + info.executed_senders, + )), execution_output: Arc::new(execution_outcome), hashed_state: Arc::new(hashed_state), trie: Arc::new(trie_output), diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 707ce2212a813..8b8e486e1de54 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -792,7 +792,7 @@ mod tests { use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; - use reth_primitives::{EthPrimitives, Receipt, SealedBlock, StaticFileSegment}; + use reth_primitives::{EthPrimitives, Receipt, RecoveredBlock, SealedBlock, StaticFileSegment}; use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, @@ -925,8 +925,7 @@ mod tests { ExecutionOutcome { receipts: block_receipts.into(), ..Default::default() }; ExecutedBlock::new( - Arc::new(block.clone()), - Arc::new(senders), + Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), execution_outcome.into(), Default::default(), Default::default(), @@ -987,10 +986,10 @@ mod tests { if state.anchor().number + 1 == block_number { let mut lowest_memory_block = state.parent_state_chain().last().expect("qed").block(); - let num_hash = lowest_memory_block.block().num_hash(); + let num_hash = lowest_memory_block.recovered_block().num_hash(); let mut execution_output = (*lowest_memory_block.execution_output).clone(); - execution_output.first_block = lowest_memory_block.block().number; + execution_output.first_block = lowest_memory_block.recovered_block().number; lowest_memory_block.execution_output = Arc::new(execution_output); // Push to disk @@ -1055,8 +1054,10 @@ mod tests { first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { new: vec![ExecutedBlock::new( - Arc::new(first_in_mem_block.clone()), - Arc::new(in_memory_block_senders), + Arc::new(RecoveredBlock::new_sealed( + first_in_mem_block.clone(), + in_memory_block_senders, + )), Default::default(), Default::default(), Default::default(), @@ -1089,8 +1090,10 @@ mod tests { // Insert the last block into the pending state provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { - block: Arc::new(last_in_mem_block.clone()), - senders: Default::default(), + recovered_block: Arc::new(RecoveredBlock::new_sealed( + last_in_mem_block.clone(), + Default::default(), + )), execution_output: Default::default(), hashed_state: Default::default(), trie: Default::default(), @@ -1145,8 +1148,10 @@ mod tests { first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { new: vec![ExecutedBlock::new( - Arc::new(first_in_mem_block.clone()), - Arc::new(in_memory_block_senders), + Arc::new(RecoveredBlock::new_sealed( + first_in_mem_block.clone(), + in_memory_block_senders, + )), Default::default(), Default::default(), Default::default(), @@ -1197,8 +1202,10 @@ mod tests { // Set the block as pending provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { - block: Arc::new(block.clone()), - senders: Default::default(), + recovered_block: Arc::new(RecoveredBlock::new_sealed( + block.clone(), + block.senders().unwrap(), + )), execution_output: Default::default(), hashed_state: Default::default(), trie: Default::default(), @@ -1278,8 +1285,10 @@ mod tests { first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { new: vec![ExecutedBlock::new( - Arc::new(first_in_mem_block.clone()), - Arc::new(in_memory_block_senders), + Arc::new(RecoveredBlock::new_sealed( + first_in_mem_block.clone(), + in_memory_block_senders, + )), Default::default(), Default::default(), Default::default(), @@ -1841,8 +1850,7 @@ mod tests { .map(|block| { let senders = block.senders().expect("failed to recover senders"); ExecutedBlock::new( - Arc::new(block.clone()), - Arc::new(senders), + Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), Arc::new(ExecutionOutcome { bundle: BundleState::new( in_memory_state.into_iter().map(|(address, (account, _))| { @@ -1977,8 +1985,10 @@ mod tests { // adding a pending block to state can test pending() and pending_state_by_hash() function let pending_block = database_blocks[database_blocks.len() - 1].clone(); only_database_provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { - block: Arc::new(pending_block.clone()), - senders: Default::default(), + recovered_block: Arc::new(RecoveredBlock::new_sealed( + pending_block.clone(), + Default::default(), + )), execution_output: Default::default(), hashed_state: Default::default(), trie: Default::default(), @@ -2098,8 +2108,10 @@ mod tests { // Set the pending block in memory let pending_block = in_memory_blocks.last().unwrap(); provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { - block: Arc::new(pending_block.clone()), - senders: Default::default(), + recovered_block: Arc::new(RecoveredBlock::new_sealed( + pending_block.clone(), + Default::default(), + )), execution_output: Default::default(), hashed_state: Default::default(), trie: Default::default(), diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 1417be828f8b4..d84152f88e9d4 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -442,7 +442,7 @@ impl ConsistentProvider { let (start, end) = self.convert_range_bounds(range, || { in_mem_chain .iter() - .map(|b| b.block_ref().block().body().transactions().len() as u64) + .map(|b| b.block_ref().recovered_block().body().transactions().len() as u64) .sum::() + last_block_body_index.last_tx_num() }); @@ -474,7 +474,8 @@ impl ConsistentProvider { // Iterate from the lowest block to the highest in-memory chain for block_state in in_mem_chain.iter().rev() { - let block_tx_count = block_state.block_ref().block().body().transactions().len(); + let block_tx_count = + block_state.block_ref().recovered_block().body().transactions().len(); let remaining = (tx_range.end() - tx_range.start() + 1) as usize; // If the transaction range start is equal or higher than the next block first @@ -546,7 +547,7 @@ impl ConsistentProvider { // Iterate from the lowest block to the highest for block_state in in_mem_chain.iter().rev() { let executed_block = block_state.block_ref(); - let block = executed_block.block(); + let block = executed_block.recovered_block(); for tx_index in 0..block.body().transactions().len() { match id { @@ -629,7 +630,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( (*block_hash).into(), |db_provider| db_provider.header(block_hash), - |block_state| Ok(Some(block_state.block_ref().block().header().clone())), + |block_state| Ok(Some(block_state.block_ref().recovered_block().clone_header())), ) } @@ -637,7 +638,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( num.into(), |db_provider| db_provider.header_by_number(num), - |block_state| Ok(Some(block_state.block_ref().block().header().clone())), + |block_state| Ok(Some(block_state.block_ref().recovered_block().clone_header())), ) } @@ -679,7 +680,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.headers_range(range), - |block_state, _| Some(block_state.block_ref().block().header().clone()), + |block_state, _| Some(block_state.block_ref().recovered_block().header().clone()), |_| true, ) } @@ -691,7 +692,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( number.into(), |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block_ref().block().clone_sealed_header())), + |block_state| Ok(Some(block_state.block_ref().recovered_block().clone_sealed_header())), ) } @@ -702,7 +703,7 @@ impl HeaderProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block_ref().block().clone_sealed_header()), + |block_state, _| Some(block_state.block_ref().recovered_block().clone_sealed_header()), |_| true, ) } @@ -716,7 +717,7 @@ impl HeaderProvider for ConsistentProvider { range, |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), |block_state, predicate| { - let header = block_state.block_ref().block().sealed_header(); + let header = block_state.block_ref().recovered_block().sealed_header(); predicate(header).then(|| header.clone()) }, predicate, @@ -802,7 +803,7 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block( hash.into(), |db_provider| db_provider.find_block_by_hash(hash, source), - |block_state| Ok(Some(block_state.block_ref().block().clone_block())), + |block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())), ) } BlockSource::Pending => { @@ -815,7 +816,7 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block(id), - |block_state| Ok(Some(block_state.block_ref().block().clone_block())), + |block_state| Ok(Some(block_state.block_ref().recovered_block().clone_block())), ) } @@ -847,7 +848,7 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.clone_recovered_block())), + |block_state| Ok(Some(block_state.block().recovered_block().clone())), ) } @@ -859,7 +860,7 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.sealed_block_with_senders(id, transaction_kind), - |block_state| Ok(Some(block_state.clone_recovered_block())), + |block_state| Ok(Some(block_state.block().recovered_block().clone())), ) } @@ -867,7 +868,7 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_range(range), - |block_state, _| Some(block_state.block_ref().block().clone_block()), + |block_state, _| Some(block_state.block_ref().recovered_block().clone_block()), |_| true, ) } @@ -879,7 +880,7 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_with_senders_range(range), - |block_state, _| Some(block_state.clone_recovered_block()), + |block_state, _| Some(block_state.block().recovered_block().clone()), |_| true, ) } @@ -891,7 +892,7 @@ impl BlockReader for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_block_with_senders_range(range), - |block_state, _| Some(block_state.clone_recovered_block()), + |block_state, _| Some(block_state.block().recovered_block().clone()), |_| true, ) } @@ -913,7 +914,13 @@ impl TransactionsProvider for ConsistentProvider { id.into(), |provider| provider.transaction_by_id(id), |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body().transactions().get(tx_index).cloned()) + Ok(block_state + .block_ref() + .recovered_block() + .body() + .transactions() + .get(tx_index) + .cloned()) }, ) } @@ -926,7 +933,13 @@ impl TransactionsProvider for ConsistentProvider { id.into(), |provider| provider.transaction_by_id_unhashed(id), |tx_index, _, block_state| { - Ok(block_state.block_ref().block().body().transactions().get(tx_index).cloned()) + Ok(block_state + .block_ref() + .recovered_block() + .body() + .transactions() + .get(tx_index) + .cloned()) }, ) } @@ -956,7 +969,7 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block_ref().block().number())), + |_, _, block_state| Ok(Some(block_state.block_ref().recovered_block().number())), ) } @@ -967,7 +980,9 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block_ref().block().body().transactions().to_vec())), + |block_state| { + Ok(Some(block_state.block_ref().recovered_block().body().transactions().to_vec())) + }, ) } @@ -978,7 +993,9 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.transactions_by_block_range(range), - |block_state, _| Some(block_state.block_ref().block().body().transactions().to_vec()), + |block_state, _| { + Some(block_state.block_ref().recovered_block().body().transactions().to_vec()) + }, |_| true, ) } @@ -991,7 +1008,8 @@ impl TransactionsProvider for ConsistentProvider { range, |db_provider, db_range| db_provider.transactions_by_tx_range(db_range), |index_range, block_state| { - Ok(block_state.block_ref().block().body().transactions()[index_range].to_vec()) + Ok(block_state.block_ref().recovered_block().body().transactions()[index_range] + .to_vec()) }, ) } @@ -1003,7 +1021,9 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_tx_range( range, |db_provider, db_range| db_provider.senders_by_tx_range(db_range), - |index_range, block_state| Ok(block_state.block_ref().senders[index_range].to_vec()), + |index_range, block_state| { + Ok(block_state.block_ref().recovered_block.senders()[index_range].to_vec()) + }, ) } @@ -1011,7 +1031,9 @@ impl TransactionsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_sender(id), - |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), + |tx_index, _, block_state| { + Ok(block_state.block_ref().recovered_block.senders().get(tx_index).copied()) + }, ) } } @@ -1032,7 +1054,7 @@ impl ReceiptProvider for ConsistentProvider { fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { for block_state in self.head_block.iter().flat_map(|b| b.chain()) { let executed_block = block_state.block_ref(); - let block = executed_block.block(); + let block = executed_block.recovered_block(); let receipts = block_state.executed_block_receipts(); // assuming 1:1 correspondence between transactions and receipts @@ -1124,7 +1146,9 @@ impl WithdrawalsProvider for ConsistentProvider { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block_ref().block().body().withdrawals().cloned()), + |block_state| { + Ok(block_state.block_ref().recovered_block().body().withdrawals().cloned()) + }, ) } } @@ -1139,7 +1163,7 @@ impl OmmersProvider for ConsistentProvider { return Ok(Some(Vec::new())) } - Ok(block_state.block_ref().block().body().ommers().map(|o| o.to_vec())) + Ok(block_state.block_ref().recovered_block().body().ommers().map(|o| o.to_vec())) }, ) } @@ -1167,8 +1191,9 @@ impl BlockBodyIndicesProvider for ConsistentProvider { // Iterate from the lowest block in memory until our target block for state in block_state.chain().collect::>().into_iter().rev() { - let block_tx_count = state.block_ref().block.body().transactions().len() as u64; - if state.block_ref().block().number() == number { + let block_tx_count = + state.block_ref().recovered_block().body().transactions().len() as u64; + if state.block_ref().recovered_block().number() == number { stored_indices.tx_count = block_tx_count; } else { stored_indices.first_tx_num += block_tx_count; @@ -1450,7 +1475,7 @@ mod tests { use reth_chain_state::{ExecutedBlock, NewCanonicalChain}; use reth_db::models::AccountBeforeTx; use reth_execution_types::ExecutionOutcome; - use reth_primitives::SealedBlock; + use reth_primitives::{RecoveredBlock, SealedBlock}; use reth_storage_api::{BlockReader, BlockSource, ChangeSetReader}; use reth_testing_utils::generators::{ self, random_block_range, random_changeset_range, random_eoa_accounts, BlockRangeParams, @@ -1550,8 +1575,10 @@ mod tests { first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { new: vec![ExecutedBlock::new( - Arc::new(first_in_mem_block.clone()), - Arc::new(in_memory_block_senders), + Arc::new(RecoveredBlock::new_sealed( + first_in_mem_block.clone(), + in_memory_block_senders, + )), Default::default(), Default::default(), Default::default(), @@ -1590,8 +1617,10 @@ mod tests { // Insert the last block into the pending state provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { - block: Arc::new(last_in_mem_block.clone()), - senders: Default::default(), + recovered_block: Arc::new(RecoveredBlock::new_sealed( + last_in_mem_block.clone(), + Default::default(), + )), execution_output: Default::default(), hashed_state: Default::default(), trie: Default::default(), @@ -1654,8 +1683,10 @@ mod tests { first_in_mem_block.senders().expect("failed to recover senders"); let chain = NewCanonicalChain::Commit { new: vec![ExecutedBlock::new( - Arc::new(first_in_mem_block.clone()), - Arc::new(in_memory_block_senders), + Arc::new(RecoveredBlock::new_sealed( + first_in_mem_block.clone(), + in_memory_block_senders, + )), Default::default(), Default::default(), Default::default(), @@ -1758,8 +1789,7 @@ mod tests { .map(|block| { let senders = block.senders().expect("failed to recover senders"); ExecutedBlock::new( - Arc::new(block.clone()), - Arc::new(senders), + Arc::new(RecoveredBlock::new_sealed(block.clone(), senders)), Arc::new(ExecutionOutcome { bundle: BundleState::new( in_memory_state.into_iter().map(|(address, (account, _))| { diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index 022c71f81c447..d1cc61600db89 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -143,9 +143,9 @@ where } // NOTE: checked non-empty above - let first_block = blocks.first().unwrap().block(); + let first_block = blocks.first().unwrap().recovered_block(); - let last_block = blocks.last().unwrap().block(); + let last_block = blocks.last().unwrap().recovered_block(); let first_number = first_block.number(); let last_block_number = last_block.number(); @@ -160,11 +160,9 @@ where // * trie updates (cannot naively extend, need helper) // * indices (already done basically) // Insert the blocks - for ExecutedBlock { block, senders, execution_output, hashed_state, trie } in blocks { - let sealed_block = Arc::unwrap_or_clone(block) - .try_with_senders_unchecked(Arc::unwrap_or_clone(senders)) - .unwrap(); - self.database().insert_block(sealed_block, StorageLocation::Both)?; + for ExecutedBlock { recovered_block, execution_output, hashed_state, trie } in blocks { + self.database() + .insert_block(Arc::unwrap_or_clone(recovered_block), StorageLocation::Both)?; // Write state and changesets to the database. // Must be written after blocks because of the receipt lookup. From b10b438765a186317b6fa86cefb59c6590a50f2e Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:04:08 +0100 Subject: [PATCH 087/113] ci: use reusable cargo update workflow (#13824) --- .github/workflows/dependencies.yml | 53 ++++-------------------------- 1 file changed, 6 insertions(+), 47 deletions(-) diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index 4716486e688bc..3268d8ff695f5 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -9,53 +9,12 @@ on: workflow_dispatch: # Needed so we can run it manually -env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - BRANCH: cargo-update - TITLE: "chore(deps): weekly `cargo update`" - BODY: | - Automation to keep dependencies in `Cargo.lock` current. - -
cargo update log -

- - ```log - $cargo_update_log - ``` - -

-
+permissions: + contents: write + pull-requests: write jobs: update: - name: Update - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@nightly - - - name: cargo update - # Remove first line that always just says "Updating crates.io index" - run: cargo update --color never 2>&1 | sed '/crates.io index/d' | tee -a cargo_update.log - - - name: craft commit message and PR body - id: msg - run: | - export cargo_update_log="$(cat cargo_update.log)" - - echo "commit_message<> $GITHUB_OUTPUT - printf "$TITLE\n\n$cargo_update_log\n" >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - echo "body<> $GITHUB_OUTPUT - echo "$BODY" | envsubst >> $GITHUB_OUTPUT - echo "EOF" >> $GITHUB_OUTPUT - - - name: Create Pull Request - uses: peter-evans/create-pull-request@v7 - with: - add-paths: ./Cargo.lock - commit-message: ${{ steps.msg.outputs.commit_message }} - title: ${{ env.TITLE }} - body: ${{ steps.msg.outputs.body }} - branch: ${{ env.BRANCH }} + uses: ithacaxyz/ci/.github/workflows/cargo-update-pr.yml@main + secrets: + token: ${{ secrets.GITHUB_TOKEN }} From 9b68cf88c79d6a76ad2c65f726793b52e60f1465 Mon Sep 17 00:00:00 2001 From: youyyytrok Date: Thu, 16 Jan 2025 17:05:38 +0100 Subject: [PATCH 088/113] chore: rm broken link in hooks README.md (#13814) --- crates/static-file/static-file/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/static-file/static-file/README.md b/crates/static-file/static-file/README.md index 3e41a184e61e4..a414ded0c67bf 100644 --- a/crates/static-file/static-file/README.md +++ b/crates/static-file/static-file/README.md @@ -104,7 +104,7 @@ graph TD; ### Glossary In descending order of abstraction hierarchy: -[`StaticFileProducer`](../../static-file/static-file/src/static_file_producer.rs#L25): A `reth` [hook](../../consensus/beacon/src/engine/hooks/static_file.rs) service that when triggered, **copies** finalized data from the database to the latest static file. Upon completion, it updates the internal index at `StaticFileProvider` with the new highest block and transaction on each specific segment. +[`StaticFileProducer`](../../static-file/static-file/src/static_file_producer.rs#L25): A `reth` hook service that when triggered, **copies** finalized data from the database to the latest static file. Upon completion, it updates the internal index at `StaticFileProvider` with the new highest block and transaction on each specific segment. [`StaticFileProvider`](../../storage/provider/src/providers/static_file/manager.rs#L44) A provider similar to `DatabaseProvider`, **managing all existing static_file files** and selecting the optimal one (by range and segment type) to fulfill a request. **A single instance is shared across all components and should be instantiated only once within `ProviderFactory`**. An immutable reference is given every time `ProviderFactory` creates a new `DatabaseProvider`. From a90ecd90577986428853c13163feb371bf121495 Mon Sep 17 00:00:00 2001 From: Dhruv Agarwal <91938348+Dhruv-2003@users.noreply.github.com> Date: Thu, 16 Jan 2025 21:54:12 +0530 Subject: [PATCH 089/113] feat(cli): added header request retry in stages run command (#13816) --- crates/cli/commands/src/stage/run.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/crates/cli/commands/src/stage/run.rs b/crates/cli/commands/src/stage/run.rs index 1fb2e2886ce95..254b5fe6483e1 100644 --- a/crates/cli/commands/src/stage/run.rs +++ b/crates/cli/commands/src/stage/run.rs @@ -189,9 +189,15 @@ impl let fetch_client = Arc::new(network.fetch_client().await?); // Use `to` as the tip for the stage - let tip: P::BlockHeader = fetch_client - .get_header(BlockHashOrNumber::Number(self.to)) - .await? + let tip: P::BlockHeader = loop { + match fetch_client.get_header(BlockHashOrNumber::Number(self.to)).await { + Ok(header) => break header, + Err(error) if error.is_retryable() => { + warn!(target: "reth::cli", "Error requesting header: {error}. Retrying...") + } + Err(error) => return Err(error.into()), + } + } .into_data() .ok_or(StageError::MissingSyncGap)?; let (_, rx) = watch::channel(tip.hash_slow()); From 7e972ea23dc31bb1cb97fd50b96f9a30201f0cb2 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 16 Jan 2025 16:45:23 +0000 Subject: [PATCH 090/113] fix(trie): use correct `store_in_db_trie` value for sparse extension nodes (#13826) --- crates/trie/sparse/src/trie.rs | 47 +++++++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 12 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 28cdb423ee726..80b0e31985ca7 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -381,6 +381,7 @@ impl

RevealedSparseTrie

{ // Memoize the hash of a previously blinded node in a new extension // node. hash: Some(*hash), + store_in_db_trie: None, }); self.reveal_node_or_hash(child_path, &ext.child)?; } @@ -602,14 +603,14 @@ impl

RevealedSparseTrie

{ while let Some((mut path, level)) = paths.pop() { match self.nodes.get(&path).unwrap() { SparseNode::Empty | SparseNode::Hash(_) => {} - SparseNode::Leaf { hash, .. } => { + SparseNode::Leaf { key: _, hash } => { if hash.is_some() && !prefix_set.contains(&path) { continue } targets.push(path); } - SparseNode::Extension { key, hash } => { + SparseNode::Extension { key, hash, store_in_db_trie: _ } => { if hash.is_some() && !prefix_set.contains(&path) { continue } @@ -621,7 +622,7 @@ impl

RevealedSparseTrie

{ paths.push((path, level + 1)); } } - SparseNode::Branch { state_mask, hash, .. } => { + SparseNode::Branch { state_mask, hash, store_in_db_trie: _ } => { if hash.is_some() && !prefix_set.contains(&path) { continue } @@ -673,26 +674,37 @@ impl

RevealedSparseTrie

{ (rlp_node, SparseNodeType::Leaf) } } - SparseNode::Extension { key, hash } => { + SparseNode::Extension { key, hash, store_in_db_trie } => { let mut child_path = path.clone(); child_path.extend_from_slice_unchecked(key); - if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - ( - RlpNode::word_rlp(&hash), - SparseNodeType::Extension { store_in_db_trie: true }, - ) + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + (RlpNode::word_rlp(&hash), SparseNodeType::Extension { store_in_db_trie }) } else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) { let (_, child, child_node_type) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); let rlp_node = ExtensionNodeRef::new(key, &child).rlp(&mut self.rlp_buf); *hash = rlp_node.as_hash(); + let store_in_db_trie_value = child_node_type.store_in_db_trie(); + + trace!( + target: "trie::sparse", + ?path, + ?child_path, + ?child_node_type, + "Extension node" + ); + + *store_in_db_trie = Some(store_in_db_trie_value); + ( rlp_node, SparseNodeType::Extension { // Inherit the `store_in_db_trie` flag from the child node, which is // always the branch node - store_in_db_trie: child_node_type.store_in_db_trie(), + store_in_db_trie: store_in_db_trie_value, }, ) } else { @@ -1228,7 +1240,14 @@ pub enum SparseNode { key: Nibbles, /// Pre-computed hash of the sparse node. /// Can be reused unless this trie path has been updated. + /// + /// If [`None`], then the value is not known and should be calculated from scratch. hash: Option, + /// Pre-computed flag indicating whether the trie node should be stored in the database. + /// Can be reused unless this trie path has been updated. + /// + /// If [`None`], then the value is not known and should be calculated from scratch. + store_in_db_trie: Option, }, /// Sparse branch node with state mask. Branch { @@ -1236,9 +1255,13 @@ pub enum SparseNode { state_mask: TrieMask, /// Pre-computed hash of the sparse node. /// Can be reused unless this trie path has been updated. + /// + /// If [`None`], then the value is not known and should be calculated from scratch. hash: Option, /// Pre-computed flag indicating whether the trie node should be stored in the database. /// Can be reused unless this trie path has been updated. + /// + /// If [`None`], then the value is not known and should be calculated from scratch. store_in_db_trie: Option, }, } @@ -1270,7 +1293,7 @@ impl SparseNode { /// Create new [`SparseNode::Extension`] from the key slice. pub const fn new_ext(key: Nibbles) -> Self { - Self::Extension { key, hash: None } + Self::Extension { key, hash: None, store_in_db_trie: None } } /// Create new [`SparseNode::Leaf`] from leaf key and value. @@ -2308,7 +2331,7 @@ mod tests { // Check that the root extension node exists assert_matches!( sparse.nodes.get(&Nibbles::default()), - Some(SparseNode::Extension { key, hash: None }) if *key == Nibbles::from_nibbles([0x00]) + Some(SparseNode::Extension { key, hash: None, store_in_db_trie: None }) if *key == Nibbles::from_nibbles([0x00]) ); // Insert the leaf with a different prefix From 8efe441cc0226abb4a374d022a57a1dd2bc7b396 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 17 Jan 2025 04:22:21 +0400 Subject: [PATCH 091/113] feat: use `reth-ethereum-primitives` (#13830) --- Cargo.lock | 25 +- bin/reth-bench/Cargo.toml | 2 +- .../src/commands/debug_cmd/build_block.rs | 11 +- crates/chain-state/src/test_utils.rs | 7 +- crates/engine/util/Cargo.toml | 1 - crates/engine/util/src/reorg.rs | 8 +- crates/ethereum/evm/src/execute.rs | 2 +- crates/ethereum/payload/src/lib.rs | 15 +- crates/ethereum/primitives/Cargo.toml | 26 +- .../ethereum/primitives/src/alloy_compat.rs | 44 + crates/ethereum/primitives/src/lib.rs | 3 + crates/ethereum/primitives/src/receipt.rs | 136 ++ crates/ethereum/primitives/src/transaction.rs | 329 +++- crates/evm/execution-types/Cargo.toml | 5 +- crates/net/downloaders/Cargo.toml | 1 - .../downloaders/src/receipt_file_client.rs | 11 +- crates/net/network/src/transactions/mod.rs | 2 +- .../network/src/transactions/validation.rs | 5 +- .../network/tests/it/big_pooled_txs_req.rs | 1 + crates/net/network/tests/it/txgossip.rs | 3 +- crates/node/core/Cargo.toml | 2 +- crates/optimism/cli/Cargo.toml | 1 - crates/optimism/cli/src/ovm_file_codec.rs | 76 +- crates/optimism/cli/src/receipt_file_codec.rs | 101 +- crates/optimism/consensus/Cargo.toml | 2 +- crates/optimism/evm/Cargo.toml | 1 - crates/optimism/evm/src/l1.rs | 8 +- crates/optimism/node/Cargo.toml | 1 - crates/optimism/payload/Cargo.toml | 1 - crates/optimism/payload/src/builder.rs | 7 +- crates/optimism/primitives/Cargo.toml | 1 + crates/optimism/primitives/src/receipt.rs | 193 ++ crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/storage/Cargo.toml | 1 - crates/optimism/storage/src/lib.rs | 4 +- crates/primitives/Cargo.toml | 86 +- crates/primitives/src/alloy_compat.rs | 189 -- crates/primitives/src/block.rs | 2 +- crates/primitives/src/lib.rs | 3 - crates/primitives/src/proofs.rs | 11 +- crates/primitives/src/receipt.rs | 523 +---- crates/primitives/src/transaction/mod.rs | 1701 +---------------- crates/primitives/src/transaction/pooled.rs | 3 +- crates/primitives/src/transaction/tx_type.rs | 303 +-- .../src/segments/user/transaction_lookup.rs | 3 +- crates/rpc/rpc/src/debug.rs | 6 +- crates/rpc/rpc/src/eth/bundle.rs | 4 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 2 +- crates/rpc/rpc/src/eth/helpers/types.rs | 3 +- .../stages/src/stages/hashing_storage.rs | 3 +- crates/stages/stages/src/stages/tx_lookup.rs | 7 +- .../codecs/src/alloy/transaction/optimism.rs | 2 +- crates/storage/db-api/Cargo.toml | 1 - crates/storage/db/Cargo.toml | 2 +- crates/storage/provider/Cargo.toml | 1 - .../src/providers/blockchain_provider.rs | 6 +- .../provider/src/providers/database/mod.rs | 7 +- .../storage/provider/src/test_utils/mock.rs | 6 +- crates/transaction-pool/src/test_utils/gen.rs | 10 +- .../transaction-pool/src/test_utils/mock.rs | 4 +- crates/transaction-pool/src/traits.rs | 10 +- .../src/mined_sidecar.rs | 6 +- examples/custom-beacon-withdrawals/Cargo.toml | 4 +- examples/custom-dev-node/src/main.rs | 4 +- examples/db-access/Cargo.toml | 1 + examples/db-access/src/main.rs | 14 +- testing/testing-utils/src/generators.rs | 2 +- 67 files changed, 941 insertions(+), 3025 deletions(-) create mode 100644 crates/ethereum/primitives/src/alloy_compat.rs delete mode 100644 crates/primitives/src/alloy_compat.rs diff --git a/Cargo.lock b/Cargo.lock index 09258315cf791..1aa711563b322 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3050,6 +3050,7 @@ dependencies = [ "reth-node-ethereum", "reth-node-types", "reth-primitives", + "reth-primitives-traits", "reth-provider", ] @@ -5453,7 +5454,6 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", - "arbitrary", "derive_more", "op-alloy-consensus", "serde", @@ -7444,9 +7444,13 @@ version = "1.1.5" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-network", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types", + "alloy-serde", "arbitrary", + "bincode", "derive_more", "modular-bitfield", "once_cell", @@ -7455,7 +7459,9 @@ dependencies = [ "rand 0.8.5", "reth-codecs", "reth-primitives-traits", + "reth-testing-utils", "reth-zstd-compressors", + "revm-primitives", "secp256k1", "serde", "test-fuzz", @@ -8552,42 +8558,27 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-genesis", - "alloy-network", "alloy-primitives", "alloy-rlp", - "alloy-rpc-types", - "alloy-serde", "alloy-trie", "arbitrary", "assert_matches", - "bincode", - "bytes", "c-kzg", "codspeed-criterion-compat", "derive_more", - "modular-bitfield", "once_cell", - "op-alloy-consensus", - "op-alloy-rpc-types", "pprof", "proptest", "proptest-arbitrary-interop", - "rand 0.8.5", "reth-chainspec", "reth-codecs", "reth-ethereum-forks", + "reth-ethereum-primitives", "reth-primitives-traits", "reth-static-file-types", - "reth-testing-utils", "reth-trie-common", - "reth-zstd-compressors", - "revm-primitives", - "rstest", - "secp256k1", "serde", "serde_json", - "serde_with", - "test-fuzz", ] [[package]] diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 0182076130cb4..65ea9fb90149e 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -96,7 +96,7 @@ min-info-logs = ["tracing/release_max_level_info"] min-debug-logs = ["tracing/release_max_level_debug"] min-trace-logs = ["tracing/release_max_level_trace"] -optimism = ["reth-primitives/optimism", "reth-node-core/optimism"] +optimism = ["reth-node-core/optimism"] # no-op feature flag for switching between the `optimism` and default functionality in CI matrices ethereum = [] diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 396fe621451ff..88f2b322bca09 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -24,8 +24,11 @@ use reth_execution_types::ExecutionOutcome; use reth_fs_util as fs; use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig, EthExecutorProvider}; -use reth_primitives::{EthPrimitives, SealedBlock, SealedHeader, Transaction, TransactionSigned}; -use reth_primitives_traits::Block as _; +use reth_primitives::{ + transaction::SignedTransactionIntoRecoveredExt, EthPrimitives, SealedBlock, SealedHeader, + Transaction, TransactionSigned, +}; +use reth_primitives_traits::{Block as _, SignedTransaction}; use reth_provider::{ providers::{BlockchainProvider, ProviderNodeTypes}, BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, @@ -163,7 +166,7 @@ impl> Command { for tx_bytes in &self.transactions { debug!(target: "reth::cli", bytes = ?tx_bytes, "Decoding transaction"); let transaction = TransactionSigned::decode(&mut &Bytes::from_str(tx_bytes)?[..])? - .into_ecrecovered() + .try_ecrecovered() .ok_or_else(|| eyre::eyre!("failed to recover tx"))?; let encoded_length = match &transaction.transaction { @@ -183,7 +186,7 @@ impl> Command { let encoded_length = pooled.encode_2718_len(); // insert the blob into the store - blob_store.insert(transaction.hash(), sidecar)?; + blob_store.insert(*transaction.tx_hash(), sidecar)?; encoded_length } diff --git a/crates/chain-state/src/test_utils.rs b/crates/chain-state/src/test_utils.rs index 9f8135b2a09aa..1eb68e670efff 100644 --- a/crates/chain-state/src/test_utils.rs +++ b/crates/chain-state/src/test_utils.rs @@ -4,7 +4,9 @@ use crate::{ in_memory::ExecutedBlock, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, }; -use alloy_consensus::{Header, Transaction as _, TxEip1559, EMPTY_ROOT_HASH}; +use alloy_consensus::{ + Header, SignableTransaction, Transaction as _, TxEip1559, TxReceipt, EMPTY_ROOT_HASH, +}; use alloy_eips::{ eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, INITIAL_BASE_FEE}, eip7685::Requests, @@ -17,6 +19,7 @@ use reth_chainspec::{ChainSpec, EthereumHardfork, MIN_TRANSACTION_GAS}; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ proofs::{calculate_receipt_root, calculate_transaction_root, calculate_withdrawals_root}, + transaction::SignedTransactionIntoRecoveredExt, BlockBody, EthPrimitives, NodePrimitives, Receipt, Receipts, RecoveredBlock, RecoveredTx, SealedBlock, SealedHeader, Transaction, TransactionSigned, }; @@ -131,7 +134,7 @@ impl TestBlockBuilder { cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS, ..Default::default() } - .with_bloom() + .into_with_bloom() }) .collect::>(); diff --git a/crates/engine/util/Cargo.toml b/crates/engine/util/Cargo.toml index 1d1a524e2b1ab..3e5f333e8d1fb 100644 --- a/crates/engine/util/Cargo.toml +++ b/crates/engine/util/Cargo.toml @@ -52,7 +52,6 @@ tracing.workspace = true [features] optimism = [ - "reth-primitives/optimism", "reth-provider/optimism", "revm-primitives/optimism", ] diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 2136c92a014ee..4f8ed0ce33154 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -21,7 +21,7 @@ use reth_payload_validator::ExecutionPayloadValidator; use reth_primitives::{ proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, Receipt, Receipts, }; -use reth_primitives_traits::block::Block as _; +use reth_primitives_traits::{block::Block as _, SignedTransaction}; use reth_provider::{BlockReader, ExecutionOutcome, ProviderError, StateProviderFactory}; use reth_revm::{ database::StateProviderDatabase, @@ -329,19 +329,19 @@ where let exec_result = match evm.transact() { Ok(result) => result, error @ Err(EVMError::Transaction(_) | EVMError::Header(_)) => { - trace!(target: "engine::stream::reorg", hash = %tx.hash(), ?error, "Error executing transaction from next block"); + trace!(target: "engine::stream::reorg", hash = %tx.tx_hash(), ?error, "Error executing transaction from next block"); continue } // Treat error as fatal Err(error) => { return Err(RethError::Execution(BlockExecutionError::Validation( - BlockValidationError::EVM { hash: tx.hash(), error: Box::new(error) }, + BlockValidationError::EVM { hash: *tx.tx_hash(), error: Box::new(error) }, ))) } }; evm.db_mut().commit(exec_result.state); - if let Some(blob_tx) = tx.transaction.as_eip4844() { + if let Some(blob_tx) = tx.as_eip4844() { sum_blob_gas_used += blob_tx.blob_gas(); versioned_hashes.extend(blob_tx.blob_versioned_hashes.clone()); } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 99975734ea0e4..8bc3272272b26 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -22,7 +22,7 @@ use reth_evm::{ ConfigureEvm, TxEnvOverrides, }; use reth_primitives::{EthPrimitives, Receipt, RecoveredBlock}; -use reth_primitives_traits::BlockBody; +use reth_primitives_traits::{BlockBody, SignedTransaction}; use reth_revm::db::State; use revm_primitives::{ db::{Database, DatabaseCommit}, diff --git a/crates/ethereum/payload/src/lib.rs b/crates/ethereum/payload/src/lib.rs index 35c612e649ada..7d172784e9d5c 100644 --- a/crates/ethereum/payload/src/lib.rs +++ b/crates/ethereum/payload/src/lib.rs @@ -9,7 +9,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] #![allow(clippy::useless_let_if_seq)] -use alloy_consensus::{Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{Header, Transaction, Typed2718, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{ eip4844::MAX_DATA_GAS_PER_BLOCK, eip6110, eip7685::Requests, eip7840::BlobParams, merge::BEACON_NONCE, @@ -33,7 +33,7 @@ use reth_primitives::{ Block, BlockBody, EthereumHardforks, InvalidTransactionError, Receipt, RecoveredBlock, TransactionSigned, }; -use reth_primitives_traits::Block as _; +use reth_primitives_traits::{Block as _, SignedTransaction}; use reth_revm::database::StateProviderDatabase; use reth_storage_api::StateProviderFactory; use reth_transaction_pool::{ @@ -250,7 +250,7 @@ where // There's only limited amount of blob space available per block, so we need to check if // the EIP-4844 can still fit in the block - if let Some(blob_tx) = tx.transaction.as_eip4844() { + if let Some(blob_tx) = tx.as_eip4844() { let tx_blob_gas = blob_tx.blob_gas(); if sum_blob_gas_used + tx_blob_gas > MAX_DATA_GAS_PER_BLOCK { // we can't fit this _blob_ transaction into the block, so we mark it as @@ -306,7 +306,7 @@ where evm.db_mut().commit(state); // add to the total blob gas used if the transaction successfully executed - if let Some(blob_tx) = tx.transaction.as_eip4844() { + if let Some(blob_tx) = tx.as_eip4844() { let tx_blob_gas = blob_tx.blob_gas(); sum_blob_gas_used += tx_blob_gas; @@ -332,9 +332,8 @@ where })); // update add to total fees - let miner_fee = tx - .effective_tip_per_gas(Some(base_fee)) - .expect("fee is always valid; execution succeeded"); + let miner_fee = + tx.effective_tip_per_gas(base_fee).expect("fee is always valid; execution succeeded"); total_fees += U256::from(miner_fee) * U256::from(gas_used); // append sender and transaction to the respective lists @@ -419,7 +418,7 @@ where // grab the blob sidecars from the executed txs blob_sidecars = pool .get_all_blobs_exact( - executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| tx.hash()).collect(), + executed_txs.iter().filter(|tx| tx.is_eip4844()).map(|tx| *tx.tx_hash()).collect(), ) .map_err(PayloadBuilderError::other)?; diff --git a/crates/ethereum/primitives/Cargo.toml b/crates/ethereum/primitives/Cargo.toml index ede63a4499421..e4f21c95d1888 100644 --- a/crates/ethereum/primitives/Cargo.toml +++ b/crates/ethereum/primitives/Cargo.toml @@ -20,8 +20,12 @@ reth-zstd-compressors = { workspace = true, optional = true } # ethereum alloy-eips.workspace = true alloy-primitives.workspace = true +alloy-network = { workspace = true, optional = true } alloy-consensus = { workspace = true, features = ["serde"] } +alloy-serde = { workspace = true, optional = true } alloy-rlp.workspace = true +alloy-rpc-types = { workspace = true, optional = true } +revm-primitives.workspace = true # misc arbitrary = { workspace = true, optional = true, features = ["derive"] } @@ -34,16 +38,20 @@ serde.workspace = true [dev-dependencies] arbitrary.workspace = true +bincode.workspace = true proptest.workspace = true proptest-arbitrary-interop.workspace = true rand.workspace = true -reth-codecs.workspace = true +reth-codecs = { workspace = true, features = ["test-utils"] } +reth-testing-utils.workspace = true reth-zstd-compressors.workspace = true -secp256k1.workspace = true +secp256k1 = { workspace = true, features = ["rand"] } test-fuzz.workspace = true +alloy-consensus = { workspace = true, features = ["serde", "arbitrary"] } [features] default = ["std"] +alloy-compat = ["dep:alloy-network", "dep:alloy-serde", "dep:alloy-rpc-types"] std = [ "alloy-consensus/std", "alloy-primitives/std", @@ -54,7 +62,9 @@ std = [ "alloy-eips/std", "derive_more/std", "secp256k1?/std", - "once_cell/std" + "once_cell/std", + "revm-primitives/std", + "alloy-serde?/std" ] reth-codec = [ "std", @@ -70,5 +80,13 @@ arbitrary = [ "alloy-primitives/arbitrary", "reth-codecs?/arbitrary", "reth-primitives-traits/arbitrary", - "alloy-eips/arbitrary" + "alloy-eips/arbitrary", + "revm-primitives/arbitrary", + "alloy-rpc-types?/arbitrary", + "alloy-serde?/arbitrary" +] +serde-bincode-compat = [ + "alloy-consensus/serde-bincode-compat", + "alloy-eips/serde-bincode-compat", + "reth-primitives-traits/serde-bincode-compat" ] diff --git a/crates/ethereum/primitives/src/alloy_compat.rs b/crates/ethereum/primitives/src/alloy_compat.rs new file mode 100644 index 0000000000000..6dba43025f9aa --- /dev/null +++ b/crates/ethereum/primitives/src/alloy_compat.rs @@ -0,0 +1,44 @@ +//! Common conversions from alloy types. + +use crate::{Transaction, TransactionSigned}; +use alloc::string::ToString; +use alloy_consensus::TxEnvelope; +use alloy_network::{AnyRpcTransaction, AnyTxEnvelope}; +use alloy_serde::WithOtherFields; + +impl TryFrom for TransactionSigned { + type Error = alloy_rpc_types::ConversionError; + + fn try_from(tx: AnyRpcTransaction) -> Result { + use alloy_rpc_types::ConversionError; + + let WithOtherFields { inner: tx, other: _ } = tx; + + #[allow(unreachable_patterns)] + let (transaction, signature, hash) = match tx.inner { + AnyTxEnvelope::Ethereum(TxEnvelope::Legacy(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Legacy(tx), signature, hash) + } + AnyTxEnvelope::Ethereum(TxEnvelope::Eip2930(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip2930(tx), signature, hash) + } + AnyTxEnvelope::Ethereum(TxEnvelope::Eip1559(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip1559(tx), signature, hash) + } + AnyTxEnvelope::Ethereum(TxEnvelope::Eip4844(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip4844(tx.into()), signature, hash) + } + AnyTxEnvelope::Ethereum(TxEnvelope::Eip7702(tx)) => { + let (tx, signature, hash) = tx.into_parts(); + (Transaction::Eip7702(tx), signature, hash) + } + _ => return Err(ConversionError::Custom("unknown transaction type".to_string())), + }; + + Ok(Self { transaction, signature, hash: hash.into() }) + } +} diff --git a/crates/ethereum/primitives/src/lib.rs b/crates/ethereum/primitives/src/lib.rs index 4c0b42a517f28..79ccdf4e9d8af 100644 --- a/crates/ethereum/primitives/src/lib.rs +++ b/crates/ethereum/primitives/src/lib.rs @@ -16,3 +16,6 @@ pub use receipt::*; mod transaction; pub use transaction::*; + +#[cfg(feature = "alloy-compat")] +mod alloy_compat; diff --git a/crates/ethereum/primitives/src/receipt.rs b/crates/ethereum/primitives/src/receipt.rs index 75ae92b447bd5..491a544ef5cf0 100644 --- a/crates/ethereum/primitives/src/receipt.rs +++ b/crates/ethereum/primitives/src/receipt.rs @@ -184,3 +184,139 @@ impl InMemorySize for Receipt { } impl reth_primitives_traits::Receipt for Receipt {} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip2718::Encodable2718; + use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes}; + use reth_codecs::Compact; + + #[test] + fn test_decode_receipt() { + reth_codecs::test_utils::test_decode::(&hex!( + "c428b52ffd23fc42696156b10200f034792b6a94c3850215c2fef7aea361a0c31b79d9a32652eefc0d4e2e730036061cff7344b6fc6132b50cda0ed810a991ae58ef013150c12b2522533cb3b3a8b19b7786a8b5ff1d3cdc84225e22b02def168c8858df" + )); + } + + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + fn encode_legacy_receipt() { + let expected = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); + + let mut data = Vec::with_capacity(expected.length()); + let receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 0x1u64, + logs: vec![Log::new_unchecked( + address!("0000000000000000000000000000000000000011"), + vec![ + b256!("000000000000000000000000000000000000000000000000000000000000dead"), + b256!("000000000000000000000000000000000000000000000000000000000000beef"), + ], + bytes!("0100ff"), + )], + success: false, + }, + logs_bloom: [0; 256].into(), + }; + + receipt.encode(&mut data); + + // check that the rlp length equals the length of the expected rlp + assert_eq!(receipt.length(), expected.len()); + assert_eq!(data, expected); + } + + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + fn decode_legacy_receipt() { + let data = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); + + // EIP658Receipt + let expected = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 0x1u64, + logs: vec![Log::new_unchecked( + address!("0000000000000000000000000000000000000011"), + vec![ + b256!("000000000000000000000000000000000000000000000000000000000000dead"), + b256!("000000000000000000000000000000000000000000000000000000000000beef"), + ], + bytes!("0100ff"), + )], + success: false, + }, + logs_bloom: [0; 256].into(), + }; + + let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); + assert_eq!(receipt, expected); + } + + #[test] + fn gigantic_receipt() { + let receipt = Receipt { + cumulative_gas_used: 16747627, + success: true, + tx_type: TxType::Legacy, + logs: vec![ + Log::new_unchecked( + address!("4bf56695415f725e43c3e04354b604bcfb6dfb6e"), + vec![b256!("c69dc3d7ebff79e41f525be431d5cd3cc08f80eaf0f7819054a726eeb7086eb9")], + Bytes::from(vec![1; 0xffffff]), + ), + Log::new_unchecked( + address!("faca325c86bf9c2d5b413cd7b90b209be92229c2"), + vec![b256!("8cca58667b1e9ffa004720ac99a3d61a138181963b294d270d91c53d36402ae2")], + Bytes::from(vec![1; 0xffffff]), + ), + ], + }; + + let mut data = vec![]; + receipt.to_compact(&mut data); + let (decoded, _) = Receipt::from_compact(&data[..], data.len()); + assert_eq!(decoded, receipt); + } + + #[test] + fn test_encode_2718_length() { + let receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Eip1559, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + }, + logs_bloom: Bloom::default(), + }; + + let encoded = receipt.encoded_2718(); + assert_eq!( + encoded.len(), + receipt.encode_2718_len(), + "Encoded length should match the actual encoded data length" + ); + + // Test for legacy receipt as well + let legacy_receipt = ReceiptWithBloom { + receipt: Receipt { + tx_type: TxType::Legacy, + success: true, + cumulative_gas_used: 21000, + logs: vec![], + }, + logs_bloom: Bloom::default(), + }; + + let legacy_encoded = legacy_receipt.encoded_2718(); + assert_eq!( + legacy_encoded.len(), + legacy_receipt.encode_2718_len(), + "Encoded length for legacy receipt should match the actual encoded data length" + ); + } +} diff --git a/crates/ethereum/primitives/src/transaction.rs b/crates/ethereum/primitives/src/transaction.rs index b466ca3c3da8c..65cc894fcf866 100644 --- a/crates/ethereum/primitives/src/transaction.rs +++ b/crates/ethereum/primitives/src/transaction.rs @@ -1,7 +1,8 @@ use alloc::vec::Vec; use alloy_consensus::{ - transaction::RlpEcdsaTx, SignableTransaction, Signed, TxEip1559, TxEip2930, TxEip4844, - TxEip7702, TxLegacy, TxType, Typed2718, + transaction::{PooledTransaction, RlpEcdsaTx}, + BlobTransactionSidecar, SignableTransaction, Signed, TxEip1559, TxEip2930, TxEip4844, + TxEip4844WithSidecar, TxEip7702, TxLegacy, TxType, Typed2718, TypedTransaction, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, @@ -18,8 +19,10 @@ use once_cell as _; use once_cell::sync::OnceCell as OnceLock; use reth_primitives_traits::{ crypto::secp256k1::{recover_signer, recover_signer_unchecked}, - InMemorySize, SignedTransaction, + transaction::error::TransactionConversionError, + FillTxEnv, InMemorySize, SignedTransaction, }; +use revm_primitives::{AuthorizationList, TxEnv}; use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use std::sync::OnceLock; @@ -101,6 +104,17 @@ impl Transaction { Self::Eip7702(_) => TxType::Eip7702, } } + + /// This sets the transaction's nonce. + pub fn set_nonce(&mut self, nonce: u64) { + match self { + Self::Legacy(tx) => tx.nonce = nonce, + Self::Eip2930(tx) => tx.nonce = nonce, + Self::Eip1559(tx) => tx.nonce = nonce, + Self::Eip4844(tx) => tx.nonce = nonce, + Self::Eip7702(tx) => tx.nonce = nonce, + } + } } impl Typed2718 for Transaction { @@ -253,6 +267,18 @@ impl reth_codecs::Compact for Transaction { } } +impl From for Transaction { + fn from(value: TypedTransaction) -> Self { + match value { + TypedTransaction::Legacy(tx) => Self::Legacy(tx), + TypedTransaction::Eip2930(tx) => Self::Eip2930(tx), + TypedTransaction::Eip1559(tx) => Self::Eip1559(tx), + TypedTransaction::Eip4844(tx) => Self::Eip4844(tx.into()), + TypedTransaction::Eip7702(tx) => Self::Eip7702(tx), + } + } +} + /// Signed Ethereum transaction. #[derive(Debug, Clone, Eq, Serialize, Deserialize, derive_more::AsRef, derive_more::Deref)] #[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] @@ -269,6 +295,12 @@ pub struct TransactionSigned { pub transaction: Transaction, } +impl Default for TransactionSigned { + fn default() -> Self { + Self::new_unhashed(Transaction::Legacy(Default::default()), Signature::test_signature()) + } +} + impl TransactionSigned { fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) @@ -291,12 +323,50 @@ impl PartialEq for TransactionSigned { } impl TransactionSigned { + /// Creates a new signed transaction from the given transaction, signature and hash. + pub fn new(transaction: Transaction, signature: Signature, hash: B256) -> Self { + Self { hash: hash.into(), signature, transaction } + } + /// Creates a new signed transaction from the given transaction and signature without the hash. /// /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. pub fn new_unhashed(transaction: Transaction, signature: Signature) -> Self { Self { hash: Default::default(), signature, transaction } } + + /// Converts from an EIP-4844 transaction to a [`PooledTransaction`] with the given sidecar. + /// + /// Returns an `Err` containing the original `TransactionSigned` if the transaction is not + /// EIP-4844. + pub fn try_into_pooled_eip4844( + self, + sidecar: BlobTransactionSidecar, + ) -> Result { + let hash = *self.tx_hash(); + Ok(match self { + // If the transaction is an EIP-4844 transaction... + Self { transaction: Transaction::Eip4844(tx), signature, .. } => { + // Construct a pooled eip488 tx with the provided sidecar. + PooledTransaction::Eip4844(Signed::new_unchecked( + TxEip4844WithSidecar { tx, sidecar }, + signature, + hash, + )) + } + // If the transaction is not EIP-4844, return an error with the original + // transaction. + _ => return Err(self), + }) + } + + /// Returns the [`TxEip4844`] if the transaction is an EIP-4844 transaction. + pub const fn as_eip4844(&self) -> Option<&TxEip4844> { + match &self.transaction { + Transaction::Eip4844(tx) => Some(tx), + _ => None, + } + } } impl Typed2718 for TransactionSigned { @@ -561,6 +631,85 @@ impl reth_codecs::Compact for TransactionSigned { } } +impl FillTxEnv for TransactionSigned { + fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { + tx_env.caller = sender; + match self.as_ref() { + Transaction::Legacy(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = tx.chain_id; + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clear(); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip2930(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.gas_price); + tx_env.gas_priority_fee = None; + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip1559(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to; + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = None; + } + Transaction::Eip4844(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = TxKind::Call(tx.to); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); + tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); + tx_env.authorization_list = None; + } + Transaction::Eip7702(tx) => { + tx_env.gas_limit = tx.gas_limit; + tx_env.gas_price = U256::from(tx.max_fee_per_gas); + tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); + tx_env.transact_to = tx.to.into(); + tx_env.value = tx.value; + tx_env.data = tx.input.clone(); + tx_env.chain_id = Some(tx.chain_id); + tx_env.nonce = Some(tx.nonce); + tx_env.access_list.clone_from(&tx.access_list.0); + tx_env.blob_hashes.clear(); + tx_env.max_fee_per_blob_gas.take(); + tx_env.authorization_list = + Some(AuthorizationList::Signed(tx.authorization_list.clone())); + } + } + } +} + impl SignedTransaction for TransactionSigned { fn tx_hash(&self) -> &TxHash { self.hash.get_or_init(|| self.recalculate_hash()) @@ -582,6 +731,180 @@ impl SignedTransaction for TransactionSigned { } } +impl TryFrom for PooledTransaction { + type Error = TransactionConversionError; + + fn try_from(tx: TransactionSigned) -> Result { + let hash = *tx.tx_hash(); + match tx { + TransactionSigned { transaction: Transaction::Legacy(tx), signature, .. } => { + Ok(Self::Legacy(Signed::new_unchecked(tx, signature, hash))) + } + TransactionSigned { transaction: Transaction::Eip2930(tx), signature, .. } => { + Ok(Self::Eip2930(Signed::new_unchecked(tx, signature, hash))) + } + TransactionSigned { transaction: Transaction::Eip1559(tx), signature, .. } => { + Ok(Self::Eip1559(Signed::new_unchecked(tx, signature, hash))) + } + TransactionSigned { transaction: Transaction::Eip7702(tx), signature, .. } => { + Ok(Self::Eip7702(Signed::new_unchecked(tx, signature, hash))) + } + // Not supported because missing blob sidecar + TransactionSigned { transaction: Transaction::Eip4844(_), .. } => { + Err(TransactionConversionError::UnsupportedForP2P) + } + } + } +} + +impl From> for TransactionSigned +where + T: Into, +{ + fn from(value: Signed) -> Self { + let (tx, signature, hash) = value.into_parts(); + Self { transaction: tx.into(), signature, hash: hash.into() } + } +} + +impl From for TransactionSigned { + fn from(value: PooledTransaction) -> Self { + match value { + PooledTransaction::Legacy(tx) => tx.into(), + PooledTransaction::Eip2930(tx) => tx.into(), + PooledTransaction::Eip1559(tx) => tx.into(), + PooledTransaction::Eip7702(tx) => tx.into(), + PooledTransaction::Eip4844(tx) => { + let (tx, signature, hash) = tx.into_parts(); + Signed::new_unchecked(tx.tx, signature, hash).into() + } + } + } +} + +/// Bincode-compatible transaction type serde implementations. +#[cfg(feature = "serde-bincode-compat")] +pub mod serde_bincode_compat { + use alloc::borrow::Cow; + use alloy_consensus::{ + transaction::serde_bincode_compat::{TxEip1559, TxEip2930, TxEip7702, TxLegacy}, + TxEip4844, + }; + use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; + use reth_primitives_traits::{serde_bincode_compat::SerdeBincodeCompat, SignedTransaction}; + use serde::{Deserialize, Serialize}; + + /// Bincode-compatible [`super::Transaction`] serde implementation. + #[derive(Debug, Serialize, Deserialize)] + #[allow(missing_docs)] + pub enum Transaction<'a> { + Legacy(TxLegacy<'a>), + Eip2930(TxEip2930<'a>), + Eip1559(TxEip1559<'a>), + Eip4844(Cow<'a, TxEip4844>), + Eip7702(TxEip7702<'a>), + } + + impl<'a> From<&'a super::Transaction> for Transaction<'a> { + fn from(value: &'a super::Transaction) -> Self { + match value { + super::Transaction::Legacy(tx) => Self::Legacy(TxLegacy::from(tx)), + super::Transaction::Eip2930(tx) => Self::Eip2930(TxEip2930::from(tx)), + super::Transaction::Eip1559(tx) => Self::Eip1559(TxEip1559::from(tx)), + super::Transaction::Eip4844(tx) => Self::Eip4844(Cow::Borrowed(tx)), + super::Transaction::Eip7702(tx) => Self::Eip7702(TxEip7702::from(tx)), + } + } + } + + impl<'a> From> for super::Transaction { + fn from(value: Transaction<'a>) -> Self { + match value { + Transaction::Legacy(tx) => Self::Legacy(tx.into()), + Transaction::Eip2930(tx) => Self::Eip2930(tx.into()), + Transaction::Eip1559(tx) => Self::Eip1559(tx.into()), + Transaction::Eip4844(tx) => Self::Eip4844(tx.into_owned()), + Transaction::Eip7702(tx) => Self::Eip7702(tx.into()), + } + } + } + + /// Bincode-compatible [`super::TransactionSigned`] serde implementation. + #[derive(Debug, Serialize, Deserialize)] + pub struct TransactionSigned<'a> { + hash: TxHash, + signature: Signature, + transaction: Transaction<'a>, + } + + impl<'a> From<&'a super::TransactionSigned> for TransactionSigned<'a> { + fn from(value: &'a super::TransactionSigned) -> Self { + Self { + hash: *value.tx_hash(), + signature: value.signature, + transaction: Transaction::from(&value.transaction), + } + } + } + + impl<'a> From> for super::TransactionSigned { + fn from(value: TransactionSigned<'a>) -> Self { + Self { + hash: value.hash.into(), + signature: value.signature, + transaction: value.transaction.into(), + } + } + } + impl SerdeBincodeCompat for super::TransactionSigned { + type BincodeRepr<'a> = TransactionSigned<'a>; + } + + #[cfg(test)] + mod tests { + use super::super::{serde_bincode_compat, Transaction, TransactionSigned}; + use arbitrary::Arbitrary; + use rand::Rng; + use reth_testing_utils::generators; + use serde::{Deserialize, Serialize}; + + #[test] + fn test_transaction_bincode_roundtrip() { + #[derive(Debug, Serialize, Deserialize)] + struct Data<'a> { + transaction: serde_bincode_compat::Transaction<'a>, + } + + let mut bytes = [0u8; 1024]; + generators::rng().fill(bytes.as_mut_slice()); + let tx = Transaction::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let data = Data { transaction: (&tx).into() }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data<'_> = bincode::deserialize(&encoded).unwrap(); + assert_eq!(tx, decoded.transaction.into()); + } + + #[test] + fn test_transaction_signed_bincode_roundtrip() { + #[derive(Debug, Serialize, Deserialize)] + struct Data<'a> { + transaction: serde_bincode_compat::TransactionSigned<'a>, + } + + let mut bytes = [0u8; 1024]; + generators::rng().fill(bytes.as_mut_slice()); + let tx = + TransactionSigned::arbitrary(&mut arbitrary::Unstructured::new(&bytes)).unwrap(); + let data = Data { transaction: (&tx).into() }; + + let encoded = bincode::serialize(&data).unwrap(); + let decoded: Data<'_> = bincode::deserialize(&encoded).unwrap(); + assert_eq!(tx, decoded.transaction.into()); + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/evm/execution-types/Cargo.toml b/crates/evm/execution-types/Cargo.toml index 5a415f0b88926..f7372e7f204ba 100644 --- a/crates/evm/execution-types/Cargo.toml +++ b/crates/evm/execution-types/Cargo.toml @@ -36,7 +36,7 @@ reth-ethereum-primitives.workspace = true [features] default = ["std"] -optimism = ["reth-primitives/optimism", "revm/optimism"] +optimism = ["revm/optimism"] serde = [ "dep:serde", "rand/serde", @@ -55,7 +55,8 @@ serde-bincode-compat = [ "reth-primitives-traits/serde-bincode-compat", "serde_with", "alloy-eips/serde-bincode-compat", - "alloy-consensus/serde-bincode-compat", + "alloy-consensus/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat" ] std = [ "reth-primitives/std", diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index f4cc134ec4841..2945e9d2a57ee 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -73,7 +73,6 @@ tempfile.workspace = true [features] optimism = [ - "reth-primitives/optimism", "reth-db?/optimism", "reth-db-api?/optimism", "reth-provider/optimism" diff --git a/crates/net/downloaders/src/receipt_file_client.rs b/crates/net/downloaders/src/receipt_file_client.rs index 0cdd8bc6234f9..6f53a79cbe5b7 100644 --- a/crates/net/downloaders/src/receipt_file_client.rs +++ b/crates/net/downloaders/src/receipt_file_client.rs @@ -240,14 +240,15 @@ mod test { struct MockReceiptContainer(Option); impl TryFrom for ReceiptWithBlockNumber { - type Error = &'static str; + type Error = FileClientError; fn try_from(exported_receipt: MockReceipt) -> Result { let MockReceipt { tx_type, status, cumulative_gas_used, logs, block_number: number } = exported_receipt; #[allow(clippy::needless_update)] let receipt = Receipt { - tx_type: TxType::try_from(tx_type.to_be_bytes()[0])?, + tx_type: TxType::try_from(tx_type.to_be_bytes()[0]) + .map_err(|err| FileClientError::Rlp(err.into(), vec![tx_type]))?, success: status != 0, cumulative_gas_used, logs, @@ -276,11 +277,7 @@ mod test { .0; src.advance(src.len() - buf_slice.len()); - Ok(Some( - receipt - .map(|receipt| receipt.try_into().map_err(FileClientError::from)) - .transpose()?, - )) + Ok(Some(receipt.map(|receipt| receipt.try_into()).transpose()?)) } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 9cbc8e5b0b067..a1c6ceb5d4f92 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -2181,7 +2181,7 @@ mod tests { }); assert!(transactions .transactions_by_peers - .get(&signed_tx.hash()) + .get(signed_tx.tx_hash()) .unwrap() .contains(handle1.peer_id())); diff --git a/crates/net/network/src/transactions/validation.rs b/crates/net/network/src/transactions/validation.rs index 0b5547e2c31c4..beea8677b0884 100644 --- a/crates/net/network/src/transactions/validation.rs +++ b/crates/net/network/src/transactions/validation.rs @@ -356,7 +356,7 @@ mod test { #[test] fn eth68_announcement_unrecognized_tx_type() { let types = vec![ - TxType::MAX_RESERVED_EIP as u8 + 1, // the first type isn't valid + TxType::Eip7702 as u8 + 1, // the first type isn't valid TxType::Legacy as u8, ]; let sizes = vec![MAX_MESSAGE_SIZE, MAX_MESSAGE_SIZE]; @@ -391,8 +391,7 @@ mod test { #[test] fn eth68_announcement_too_small_tx() { - let types = - vec![TxType::MAX_RESERVED_EIP as u8, TxType::Legacy as u8, TxType::Eip2930 as u8]; + let types = vec![TxType::Eip7702 as u8, TxType::Legacy as u8, TxType::Eip2930 as u8]; let sizes = vec![ 0, // the first length isn't valid 0, // neither is the second diff --git a/crates/net/network/tests/it/big_pooled_txs_req.rs b/crates/net/network/tests/it/big_pooled_txs_req.rs index 328229e87e149..d0c0f33684a28 100644 --- a/crates/net/network/tests/it/big_pooled_txs_req.rs +++ b/crates/net/network/tests/it/big_pooled_txs_req.rs @@ -7,6 +7,7 @@ use reth_network::{ use reth_network_api::{NetworkInfo, Peers}; use reth_network_p2p::sync::{NetworkSyncUpdater, SyncState}; use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; use reth_provider::test_utils::MockEthProvider; use reth_transaction_pool::{ test_utils::{testing_pool, MockTransaction}, diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index f20ef6470833e..c04bb3d906ae1 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -9,6 +9,7 @@ use rand::thread_rng; use reth_network::{test_utils::Testnet, NetworkEvent, NetworkEventListenerProvider}; use reth_network_api::{events::PeerEvent, PeersInfo}; use reth_primitives::TransactionSigned; +use reth_primitives_traits::SignedTransaction; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; @@ -95,7 +96,7 @@ async fn test_4844_tx_gossip_penalization() { let peer0_reputation_after = peer1.peer_handle().peer_by_id(*peer0.peer_id()).await.unwrap().reputation(); assert_ne!(peer0_reputation_before, peer0_reputation_after); - assert_eq!(received, txs[1].transaction().hash()); + assert_eq!(received, *txs[1].transaction().tx_hash()); // this will return an [`Empty`] error because blob txs are disallowed to be broadcasted assert!(peer1_tx_listener.try_recv().is_err()); diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 7d4a417bed802..57762554bcd7c 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -76,7 +76,7 @@ proptest.workspace = true tokio.workspace = true [features] -optimism = ["reth-primitives/optimism", "reth-db/optimism"] +optimism = ["reth-db/optimism"] # Features for vergen to generate correct env vars jemalloc = ["reth-cli-util/jemalloc"] asm-keccak = ["reth-primitives/asm-keccak", "alloy-primitives/asm-keccak"] diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 3cd2edeecc5ab..243dc303b9823 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -83,7 +83,6 @@ optimism = [ "alloy-consensus", "dep:derive_more", "dep:serde", - "reth-primitives/optimism", "reth-optimism-evm/optimism", "reth-provider/optimism", "reth-node-core/optimism", diff --git a/crates/optimism/cli/src/ovm_file_codec.rs b/crates/optimism/cli/src/ovm_file_codec.rs index b2e1e6c4cdb7d..32e909837ca58 100644 --- a/crates/optimism/cli/src/ovm_file_codec.rs +++ b/crates/optimism/cli/src/ovm_file_codec.rs @@ -1,6 +1,6 @@ use alloy_consensus::{ transaction::{from_eip155_value, RlpEcdsaTx}, - Header, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy, + Header, TxEip1559, TxEip2930, TxEip7702, TxLegacy, }; use alloy_eips::{ eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, @@ -12,9 +12,8 @@ use alloy_primitives::{ }; use alloy_rlp::{Decodable, Error as RlpError, RlpDecodable}; use derive_more::{AsRef, Deref}; -use op_alloy_consensus::TxDeposit; +use op_alloy_consensus::{OpTxType, OpTypedTransaction, TxDeposit}; use reth_downloaders::file_client::FileClientError; -use reth_primitives::transaction::{Transaction, TxType}; use serde::{Deserialize, Serialize}; use tokio_util::codec::Decoder; @@ -83,17 +82,7 @@ pub struct TransactionSigned { /// Raw transaction info #[deref] #[as_ref] - pub transaction: Transaction, -} - -impl Default for TransactionSigned { - fn default() -> Self { - Self { - hash: Default::default(), - signature: Signature::test_signature(), - transaction: Default::default(), - } - } + pub transaction: OpTypedTransaction, } impl AsRef for TransactionSigned { @@ -113,7 +102,10 @@ impl TransactionSigned { /// Create a new signed transaction from a transaction and its signature. /// /// This will also calculate the transaction hash using its encoding. - pub fn from_transaction_and_signature(transaction: Transaction, signature: Signature) -> Self { + pub fn from_transaction_and_signature( + transaction: OpTypedTransaction, + signature: Signature, + ) -> Self { let mut initial_tx = Self { transaction, hash: Default::default(), signature }; initial_tx.hash = initial_tx.recalculate_hash(); initial_tx @@ -190,7 +182,7 @@ impl TransactionSigned { // so decoding methods do not need to manually advance the buffer pub fn decode_rlp_legacy_transaction(data: &mut &[u8]) -> alloy_rlp::Result { let (transaction, hash, signature) = Self::decode_rlp_legacy_transaction_tuple(data)?; - let signed = Self { transaction: Transaction::Legacy(transaction), hash, signature }; + let signed = Self { transaction: OpTypedTransaction::Legacy(transaction), hash, signature }; Ok(signed) } } @@ -229,55 +221,58 @@ impl Decodable for TransactionSigned { impl Encodable2718 for TransactionSigned { fn type_flag(&self) -> Option { match self.transaction.tx_type() { - TxType::Legacy => None, + OpTxType::Legacy => None, tx_type => Some(tx_type as u8), } } fn encode_2718_len(&self) -> usize { match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), - Transaction::Eip2930(access_list_tx) => { + OpTypedTransaction::Legacy(legacy_tx) => { + legacy_tx.eip2718_encoded_length(&self.signature) + } + OpTypedTransaction::Eip2930(access_list_tx) => { access_list_tx.eip2718_encoded_length(&self.signature) } - Transaction::Eip1559(dynamic_fee_tx) => { + OpTypedTransaction::Eip1559(dynamic_fee_tx) => { dynamic_fee_tx.eip2718_encoded_length(&self.signature) } - Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), - Transaction::Eip7702(set_code_tx) => { + OpTypedTransaction::Eip7702(set_code_tx) => { set_code_tx.eip2718_encoded_length(&self.signature) } - Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), + OpTypedTransaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), } } fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - self.transaction.eip2718_encode(&self.signature, out) + match &self.transaction { + OpTypedTransaction::Legacy(tx) => tx.eip2718_encode(&self.signature, out), + OpTypedTransaction::Eip2930(tx) => tx.eip2718_encode(&self.signature, out), + OpTypedTransaction::Eip1559(tx) => tx.eip2718_encode(&self.signature, out), + OpTypedTransaction::Eip7702(tx) => tx.eip2718_encode(&self.signature, out), + OpTypedTransaction::Deposit(tx) => tx.encode_2718(out), + } } } impl Decodable2718 for TransactionSigned { fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { - TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), - TxType::Eip2930 => { + OpTxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), + OpTxType::Eip2930 => { let (tx, signature, hash) = TxEip2930::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip2930(tx), signature, hash }) + Ok(Self { transaction: OpTypedTransaction::Eip2930(tx), signature, hash }) } - TxType::Eip1559 => { + OpTxType::Eip1559 => { let (tx, signature, hash) = TxEip1559::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip1559(tx), signature, hash }) + Ok(Self { transaction: OpTypedTransaction::Eip1559(tx), signature, hash }) } - TxType::Eip7702 => { + OpTxType::Eip7702 => { let (tx, signature, hash) = TxEip7702::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip7702(tx), signature, hash }) - } - TxType::Eip4844 => { - let (tx, signature, hash) = TxEip4844::rlp_decode_signed(buf)?.into_parts(); - Ok(Self { transaction: Transaction::Eip4844(tx), signature, hash }) + Ok(Self { transaction: OpTypedTransaction::Eip7702(tx), signature, hash }) } - TxType::Deposit => Ok(Self::from_transaction_and_signature( - Transaction::Deposit(TxDeposit::rlp_decode(buf)?), + OpTxType::Deposit => Ok(Self::from_transaction_and_signature( + OpTypedTransaction::Deposit(TxDeposit::rlp_decode(buf)?), TxDeposit::signature(), )), } @@ -291,8 +286,9 @@ impl Decodable2718 for TransactionSigned { #[cfg(test)] mod tests { use crate::ovm_file_codec::TransactionSigned; + use alloy_consensus::Typed2718; use alloy_primitives::{address, hex, TxKind, B256, U256}; - use reth_primitives::transaction::Transaction; + use op_alloy_consensus::OpTypedTransaction; const DEPOSIT_FUNCTION_SELECTOR: [u8; 4] = [0xb6, 0xb5, 0x5f, 0x25]; use alloy_rlp::Decodable; @@ -305,7 +301,7 @@ mod tests { // Verify deposit transaction let deposit_tx = match &deposit_decoded.transaction { - Transaction::Legacy(ref tx) => tx, + OpTypedTransaction::Legacy(ref tx) => tx, _ => panic!("Expected legacy transaction for NFT deposit"), }; @@ -345,7 +341,7 @@ mod tests { assert!(system_decoded.is_legacy()); let system_tx = match &system_decoded.transaction { - Transaction::Legacy(ref tx) => tx, + OpTypedTransaction::Legacy(ref tx) => tx, _ => panic!("Expected Legacy transaction"), }; diff --git a/crates/optimism/cli/src/receipt_file_codec.rs b/crates/optimism/cli/src/receipt_file_codec.rs index e307b10ac07c9..f89b9559060a9 100644 --- a/crates/optimism/cli/src/receipt_file_codec.rs +++ b/crates/optimism/cli/src/receipt_file_codec.rs @@ -5,9 +5,9 @@ use alloy_primitives::{ Address, Bloom, Bytes, B256, }; use alloy_rlp::{Decodable, RlpDecodable}; -use op_alloy_consensus::OpDepositReceipt; +use op_alloy_consensus::{OpDepositReceipt, OpTxType}; use reth_optimism_primitives::OpReceipt; -use reth_primitives::{Log, Receipt, TxType}; +use reth_primitives::{Log, Receipt}; use tokio_util::codec::Decoder; use reth_downloaders::{file_client::FileClientError, receipt_file_client::ReceiptWithBlockNumber}; @@ -92,49 +92,27 @@ pub struct OpGethReceipt { #[rlp(trailing)] struct OpGethReceiptContainer(Option); -impl TryFrom for Receipt { - type Error = &'static str; +impl TryFrom for OpReceipt { + type Error = FileClientError; fn try_from(exported_receipt: OpGethReceipt) -> Result { let OpGethReceipt { tx_type, status, cumulative_gas_used, logs, .. } = exported_receipt; - #[allow(clippy::needless_update)] - Ok(Self { - tx_type: TxType::try_from(tx_type.to_be_bytes()[0])?, - success: status != 0, - cumulative_gas_used, - logs, - ..Default::default() - }) - } -} - -impl TryFrom for OpReceipt { - type Error = &'static str; - - fn try_from(exported_receipt: OpGethReceipt) -> Result { - let Receipt { - tx_type, - success, - cumulative_gas_used, - logs, - deposit_nonce, - deposit_receipt_version, - } = exported_receipt.try_into()?; + let tx_type = OpTxType::try_from(tx_type.to_be_bytes()[0]) + .map_err(|e| FileClientError::Rlp(e.into(), vec![tx_type]))?; let receipt = - alloy_consensus::Receipt { status: success.into(), cumulative_gas_used, logs }; + alloy_consensus::Receipt { status: (status != 0).into(), cumulative_gas_used, logs }; match tx_type { - TxType::Legacy => Ok(Self::Legacy(receipt)), - TxType::Eip2930 => Ok(Self::Eip2930(receipt)), - TxType::Eip1559 => Ok(Self::Eip1559(receipt)), - TxType::Eip7702 => Ok(Self::Eip7702(receipt)), - TxType::Eip4844 => Err("EIP-4844 receipts are not supported for OP"), - TxType::Deposit => Ok(Self::Deposit(OpDepositReceipt { + OpTxType::Legacy => Ok(Self::Legacy(receipt)), + OpTxType::Eip2930 => Ok(Self::Eip2930(receipt)), + OpTxType::Eip1559 => Ok(Self::Eip1559(receipt)), + OpTxType::Eip7702 => Ok(Self::Eip7702(receipt)), + OpTxType::Deposit => Ok(Self::Deposit(OpDepositReceipt { inner: receipt, - deposit_nonce, - deposit_receipt_version, + deposit_nonce: None, + deposit_receipt_version: None, })), } } @@ -142,6 +120,7 @@ impl TryFrom for OpReceipt { #[cfg(test)] pub(crate) mod test { + use alloy_consensus::{Receipt, TxReceipt}; use alloy_primitives::{hex, LogData}; use super::*; @@ -156,12 +135,12 @@ pub(crate) mod test { let receipt = receipt_block_1(); OpGethReceipt { - tx_type: receipt.receipt.tx_type as u8, + tx_type: receipt.receipt.tx_type() as u8, post_state: Bytes::default(), - status: receipt.receipt.success as u64, - cumulative_gas_used: receipt.receipt.cumulative_gas_used, + status: receipt.receipt.status() as u64, + cumulative_gas_used: receipt.receipt.cumulative_gas_used(), bloom: Bloom::from(hex!("00000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000400000000000100000000000000200000000002000000000000001000000000000000000004000000000000000000000000000040000400000100400000000000000100000000000000000000000000000020000000000000000000000000000000000000000000000001000000000000000000000100000000000000000000000000000000000000000000000000000000000000088000000080000000000010000000000000000000000000000800008000120000000000000000000000000000000002000")), - logs: receipt.receipt.logs, + logs: receipt.receipt.logs().to_vec(), tx_hash: B256::from(hex!("5e77a04531c7c107af1882d76cbff9486d0a9aa53701c30888509d4f5f2b003a")), contract_address: Address::from(hex!("0000000000000000000000000000000000000000")), gas_used: 202813, block_hash: B256::from(hex!("bee7192e575af30420cae0c7776304ac196077ee72b048970549e4f08e875453")), block_number: receipt.number, @@ -173,7 +152,7 @@ pub(crate) mod test { } } - pub(crate) fn receipt_block_1() -> ReceiptWithBlockNumber { + pub(crate) fn receipt_block_1() -> ReceiptWithBlockNumber { let log_1 = Log { address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), data: LogData::new( @@ -233,20 +212,16 @@ pub(crate) mod test { .unwrap(), }; - let mut receipt = Receipt { - tx_type: TxType::Legacy, - success: true, + let receipt = OpReceipt::Legacy(Receipt { + status: true.into(), cumulative_gas_used: 202813, - ..Default::default() - }; - // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism - // feature must not be brought into scope - receipt.logs = vec![log_1, log_2, log_3]; + logs: vec![log_1, log_2, log_3], + }); ReceiptWithBlockNumber { receipt, number: 1 } } - pub(crate) fn receipt_block_2() -> ReceiptWithBlockNumber { + pub(crate) fn receipt_block_2() -> ReceiptWithBlockNumber { let log_1 = Log { address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), data: LogData::new( @@ -285,20 +260,16 @@ pub(crate) mod test { .unwrap(), }; - let mut receipt = Receipt { - tx_type: TxType::Legacy, - success: true, + let receipt = OpReceipt::Legacy(Receipt { + status: true.into(), cumulative_gas_used: 116237, - ..Default::default() - }; - // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism - // feature must not be brought into scope - receipt.logs = vec![log_1, log_2]; + logs: vec![log_1, log_2], + }); ReceiptWithBlockNumber { receipt, number: 2 } } - pub(crate) fn receipt_block_3() -> ReceiptWithBlockNumber { + pub(crate) fn receipt_block_3() -> ReceiptWithBlockNumber { let log_1 = Log { address: Address::from(hex!("8ce8c13d816fe6daf12d6fd9e4952e1fc88850af")), data: LogData::new( @@ -337,15 +308,11 @@ pub(crate) mod test { .unwrap(), }; - let mut receipt = Receipt { - tx_type: TxType::Legacy, - success: true, + let receipt = OpReceipt::Legacy(Receipt { + status: true.into(), cumulative_gas_used: 116237, - ..Default::default() - }; - // #[allow(clippy::needless_update)] not recognised, ..Default::default() needed so optimism - // feature must not be brought into scope - receipt.logs = vec![log_1, log_2]; + logs: vec![log_1, log_2], + }); ReceiptWithBlockNumber { receipt, number: 3 } } diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 31925620c398d..7f2378a5b267b 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -55,4 +55,4 @@ std = [ "alloy-trie/std", "op-alloy-consensus/std", ] -optimism = ["reth-primitives/optimism", "reth-optimism-primitives/optimism"] +optimism = ["reth-optimism-primitives/optimism"] diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 19b63d9fe0336..ea3fe19bd2d5e 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -78,7 +78,6 @@ std = [ "reth-consensus-common/std", ] optimism = [ - "reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-consensus/optimism", "revm/optimism", diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 033d632b505dc..129c4c35d650e 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -312,7 +312,8 @@ mod tests { use alloy_eips::eip2718::Decodable2718; use reth_optimism_chainspec::OP_MAINNET; use reth_optimism_forks::OpHardforks; - use reth_primitives::{Block, BlockBody, TransactionSigned}; + use reth_optimism_primitives::OpTransactionSigned; + use reth_primitives::{Block, BlockBody}; use super::*; @@ -320,10 +321,9 @@ mod tests { fn sanity_l1_block() { use alloy_consensus::Header; use alloy_primitives::{hex_literal::hex, Bytes}; - use reth_primitives::TransactionSigned; let bytes = Bytes::from_static(&hex!("7ef9015aa044bae9d41b8380d781187b426c6fe43df5fb2fb57bd4466ef6a701e1f01e015694deaddeaddeaddeaddeaddeaddeaddeaddead000194420000000000000000000000000000000000001580808408f0d18001b90104015d8eb900000000000000000000000000000000000000000000000000000000008057650000000000000000000000000000000000000000000000000000000063d96d10000000000000000000000000000000000000000000000000000000000009f35273d89754a1e0387b89520d989d3be9c37c1f32495a88faf1ea05c61121ab0d1900000000000000000000000000000000000000000000000000000000000000010000000000000000000000002d679b567db6187c0c8323fa982cfb88b74dbcc7000000000000000000000000000000000000000000000000000000000000083400000000000000000000000000000000000000000000000000000000000f4240")); - let l1_info_tx = TransactionSigned::decode_2718(&mut bytes.as_ref()).unwrap(); + let l1_info_tx = OpTransactionSigned::decode_2718(&mut bytes.as_ref()).unwrap(); let mock_block = Block { header: Header::default(), body: BlockBody { transactions: vec![l1_info_tx], ..Default::default() }, @@ -351,7 +351,7 @@ mod tests { // https://optimistic.etherscan.io/getRawTx?tx=0x88501da5d5ca990347c2193be90a07037af1e3820bb40774c8154871c7669150 const TX: [u8; 251] = hex!("7ef8f8a0a539eb753df3b13b7e386e147d45822b67cb908c9ddc5618e3dbaa22ed00850b94deaddeaddeaddeaddeaddeaddeaddeaddead00019442000000000000000000000000000000000000158080830f424080b8a4440a5e2000000558000c5fc50000000000000000000000006605a89f00000000012a10d90000000000000000000000000000000000000000000000000000000af39ac3270000000000000000000000000000000000000000000000000000000d5ea528d24e582fa68786f080069bdbfe06a43f8e67bfd31b8e4d8a8837ba41da9a82a54a0000000000000000000000006887246668a3b87f54deb3b94ba47a6f63f32985"); - let tx = TransactionSigned::decode_2718(&mut TX.as_slice()).unwrap(); + let tx = OpTransactionSigned::decode_2718(&mut TX.as_slice()).unwrap(); let block = Block { body: BlockBody { transactions: vec![tx], ..Default::default() }, ..Default::default() diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 25adb3d108932..8a597b9ae8a7b 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -89,7 +89,6 @@ futures.workspace = true [features] default = ["reth-codec"] optimism = [ - "reth-primitives/optimism", "reth-provider/optimism", "reth-optimism-evm/optimism", "reth-optimism-payload-builder/optimism", diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 69104acd7ce3a..2f7b20af39945 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -55,7 +55,6 @@ sha2.workspace = true [features] optimism = [ - "reth-primitives/optimism", "reth-provider/optimism", "reth-optimism-evm/optimism", "revm/optimism", diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index fe2aa5bb2e222..3f8c72ecea702 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -25,10 +25,9 @@ use reth_payload_builder_primitives::PayloadBuilderError; use reth_payload_primitives::PayloadBuilderAttributes; use reth_payload_util::{NoopPayloadTransactions, PayloadTransactions}; use reth_primitives::{ - proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, RecoveredBlock, - SealedHeader, TxType, + proofs, transaction::SignedTransactionIntoRecoveredExt, Block, BlockBody, SealedHeader, }; -use reth_primitives_traits::block::Block as _; +use reth_primitives_traits::{block::Block as _, RecoveredBlock}; use reth_provider::{ HashedPostStateProvider, ProviderError, StateProofProvider, StateProviderFactory, StateRootProvider, @@ -864,7 +863,7 @@ where } // A sequencer's block should never contain blob or deposit transactions from the pool. - if tx.is_eip4844() || tx.tx_type() == TxType::Deposit as u8 { + if tx.is_eip4844() || tx.tx_type() == OpTxType::Deposit { best_txs.mark_invalid(tx.signer(), tx.nonce()); continue } diff --git a/crates/optimism/primitives/Cargo.toml b/crates/optimism/primitives/Cargo.toml index 5e1fd17311a81..6632cc53be93a 100644 --- a/crates/optimism/primitives/Cargo.toml +++ b/crates/optimism/primitives/Cargo.toml @@ -49,6 +49,7 @@ rstest.workspace = true arbitrary.workspace = true secp256k1 = { workspace = true, features = ["rand"] } proptest.workspace = true +rand.workspace = true [features] default = ["std"] diff --git a/crates/optimism/primitives/src/receipt.rs b/crates/optimism/primitives/src/receipt.rs index b235dfe5fc356..74f9a4e49f31b 100644 --- a/crates/optimism/primitives/src/receipt.rs +++ b/crates/optimism/primitives/src/receipt.rs @@ -308,4 +308,197 @@ mod compact { (receipt.into(), buf) } } + + #[cfg(test)] + #[test] + fn test_ensure_backwards_compatibility() { + use reth_codecs::{test_utils::UnusedBits, validate_bitflag_backwards_compat}; + + assert_eq!(CompactOpReceipt::bitflag_encoded_bytes(), 2); + validate_bitflag_backwards_compat!(CompactOpReceipt<'_>, UnusedBits::NotZero); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_eips::eip2718::Encodable2718; + use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes}; + use alloy_rlp::Encodable; + use reth_codecs::Compact; + + #[test] + fn test_decode_receipt() { + reth_codecs::test_utils::test_decode::(&hex!( + "c30328b52ffd23fc426961a00105007eb0042307705a97e503562eacf2b95060cce9de6de68386b6c155b73a9650021a49e2f8baad17f30faff5899d785c4c0873e45bc268bcf07560106424570d11f9a59e8f3db1efa4ceec680123712275f10d92c3411e1caaa11c7c5d591bc11487168e09934a9986848136da1b583babf3a7188e3aed007a1520f1cf4c1ca7d3482c6c28d37c298613c70a76940008816c4c95644579fd08471dc34732fd0f24" + )); + } + + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + fn encode_legacy_receipt() { + let expected = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); + + let mut data = Vec::with_capacity(expected.length()); + let receipt = ReceiptWithBloom { + receipt: OpReceipt::Legacy(Receipt { + status: Eip658Value::Eip658(false), + cumulative_gas_used: 0x1, + logs: vec![Log::new_unchecked( + address!("0000000000000000000000000000000000000011"), + vec![ + b256!("000000000000000000000000000000000000000000000000000000000000dead"), + b256!("000000000000000000000000000000000000000000000000000000000000beef"), + ], + bytes!("0100ff"), + )], + }), + logs_bloom: [0; 256].into(), + }; + + receipt.encode(&mut data); + + // check that the rlp length equals the length of the expected rlp + assert_eq!(receipt.length(), expected.len()); + assert_eq!(data, expected); + } + + // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 + #[test] + fn decode_legacy_receipt() { + let data = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); + + // EIP658Receipt + let expected = ReceiptWithBloom { + receipt: OpReceipt::Legacy(Receipt { + status: Eip658Value::Eip658(false), + cumulative_gas_used: 0x1, + logs: vec![Log::new_unchecked( + address!("0000000000000000000000000000000000000011"), + vec![ + b256!("000000000000000000000000000000000000000000000000000000000000dead"), + b256!("000000000000000000000000000000000000000000000000000000000000beef"), + ], + bytes!("0100ff"), + )], + }), + logs_bloom: [0; 256].into(), + }; + + let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); + assert_eq!(receipt, expected); + } + + #[test] + fn decode_deposit_receipt_regolith_roundtrip() { + let data = hex!("b901107ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"); + + // Deposit Receipt (post-regolith) + let expected = ReceiptWithBloom { + receipt: OpReceipt::Deposit(OpDepositReceipt { + inner: Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 46913, + logs: vec![], + }, + deposit_nonce: Some(4012991), + deposit_receipt_version: None, + }), + logs_bloom: [0; 256].into(), + }; + + let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); + assert_eq!(receipt, expected); + + let mut buf = Vec::with_capacity(data.len()); + receipt.encode(&mut buf); + assert_eq!(buf, &data[..]); + } + + #[test] + fn decode_deposit_receipt_canyon_roundtrip() { + let data = hex!("b901117ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"); + + // Deposit Receipt (post-regolith) + let expected = ReceiptWithBloom { + receipt: OpReceipt::Deposit(OpDepositReceipt { + inner: Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 46913, + logs: vec![], + }, + deposit_nonce: Some(4012991), + deposit_receipt_version: Some(1), + }), + logs_bloom: [0; 256].into(), + }; + + let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); + assert_eq!(receipt, expected); + + let mut buf = Vec::with_capacity(data.len()); + expected.encode(&mut buf); + assert_eq!(buf, &data[..]); + } + + #[test] + fn gigantic_receipt() { + let receipt = OpReceipt::Legacy(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 16747627, + logs: vec![ + Log::new_unchecked( + address!("4bf56695415f725e43c3e04354b604bcfb6dfb6e"), + vec![b256!("c69dc3d7ebff79e41f525be431d5cd3cc08f80eaf0f7819054a726eeb7086eb9")], + Bytes::from(vec![1; 0xffffff]), + ), + Log::new_unchecked( + address!("faca325c86bf9c2d5b413cd7b90b209be92229c2"), + vec![b256!("8cca58667b1e9ffa004720ac99a3d61a138181963b294d270d91c53d36402ae2")], + Bytes::from(vec![1; 0xffffff]), + ), + ], + }); + + let mut data = vec![]; + receipt.to_compact(&mut data); + let (decoded, _) = OpReceipt::from_compact(&data[..], data.len()); + assert_eq!(decoded, receipt); + } + + #[test] + fn test_encode_2718_length() { + let receipt = ReceiptWithBloom { + receipt: OpReceipt::Eip1559(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 21000, + logs: vec![], + }), + logs_bloom: Bloom::default(), + }; + + let encoded = receipt.encoded_2718(); + assert_eq!( + encoded.len(), + receipt.encode_2718_len(), + "Encoded length should match the actual encoded data length" + ); + + // Test for legacy receipt as well + let legacy_receipt = ReceiptWithBloom { + receipt: OpReceipt::Legacy(Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 21000, + logs: vec![], + }), + logs_bloom: Bloom::default(), + }; + + let legacy_encoded = legacy_receipt.encoded_2718(); + assert_eq!( + legacy_encoded.len(), + legacy_receipt.encode_2718_len(), + "Encoded length for legacy receipt should match the actual encoded data length" + ); + } } diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 1bc4071f16d0c..13da62a14b776 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -70,7 +70,6 @@ reth-optimism-chainspec.workspace = true [features] optimism = [ "reth-optimism-evm/optimism", - "reth-primitives/optimism", "reth-provider/optimism", "revm/optimism", "reth-optimism-consensus/optimism", diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index b72e9c287df34..ae2bf13751a06 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -21,7 +21,6 @@ reth-stages-types.workspace = true [features] optimism = [ - "reth-primitives/optimism", "reth-codecs/op", "reth-db-api/optimism" ] diff --git a/crates/optimism/storage/src/lib.rs b/crates/optimism/storage/src/lib.rs index 0db8f4e20a9d4..3f13133dd3912 100644 --- a/crates/optimism/storage/src/lib.rs +++ b/crates/optimism/storage/src/lib.rs @@ -16,7 +16,7 @@ mod tests { CompactClientVersion, CompactU256, CompactU64, StoredBlockBodyIndices, StoredBlockWithdrawals, }; - use reth_primitives::{Account, Receipt}; + use reth_primitives::Account; use reth_prune_types::{PruneCheckpoint, PruneMode, PruneSegment}; use reth_stages_types::{ AccountHashingCheckpoint, CheckpointBlockRange, EntitiesCheckpoint, ExecutionCheckpoint, @@ -39,7 +39,6 @@ mod tests { assert_eq!(PruneCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(PruneMode::bitflag_encoded_bytes(), 1); assert_eq!(PruneSegment::bitflag_encoded_bytes(), 1); - assert_eq!(Receipt::bitflag_encoded_bytes(), 2); assert_eq!(StageCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StageUnitCheckpoint::bitflag_encoded_bytes(), 1); assert_eq!(StoredBlockBodyIndices::bitflag_encoded_bytes(), 1); @@ -62,7 +61,6 @@ mod tests { validate_bitflag_backwards_compat!(PruneCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(PruneMode, UnusedBits::Zero); validate_bitflag_backwards_compat!(PruneSegment, UnusedBits::Zero); - validate_bitflag_backwards_compat!(Receipt, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageCheckpoint, UnusedBits::NotZero); validate_bitflag_backwards_compat!(StageUnitCheckpoint, UnusedBits::Zero); validate_bitflag_backwards_compat!(StoredBlockBodyIndices, UnusedBits::Zero); diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 2ccaf4b0d0676..0206035bf117e 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -13,48 +13,24 @@ workspace = true [dependencies] # reth +reth-ethereum-primitives.workspace = true reth-primitives-traits = { workspace = true, features = ["serde"] } reth-ethereum-forks.workspace = true reth-static-file-types.workspace = true -revm-primitives = { workspace = true, features = ["serde"] } -reth-codecs = { workspace = true, optional = true } -reth-zstd-compressors = { workspace = true, optional = true } # ethereum alloy-consensus.workspace = true -alloy-network = { workspace = true, optional = true } alloy-primitives = { workspace = true, features = ["rand", "rlp"] } -alloy-rlp = { workspace = true, features = ["arrayvec"] } -alloy-rpc-types = { workspace = true, optional = true } -alloy-serde = { workspace = true, optional = true } alloy-eips = { workspace = true, features = ["serde"] } alloy-trie = { workspace = true, features = ["serde"] } -# optimism -op-alloy-rpc-types = { workspace = true, optional = true } -op-alloy-consensus = { workspace = true, features = [ - "arbitrary", - "serde", -], optional = true } - # for eip-4844 c-kzg = { workspace = true, features = ["serde"], optional = true } -# crypto -secp256k1 = { workspace = true, features = [ - "global-context", - "recovery", - "rand", -], optional = true } - # misc -bytes.workspace = true derive_more.workspace = true -modular-bitfield = { workspace = true, optional = true } once_cell.workspace = true -rand = { workspace = true, optional = true } serde.workspace = true -serde_with = { workspace = true, optional = true } # arbitrary utils arbitrary = { workspace = true, features = ["derive"], optional = true } @@ -62,30 +38,20 @@ arbitrary = { workspace = true, features = ["derive"], optional = true } [dev-dependencies] # eth reth-chainspec = { workspace = true, features = ["arbitrary"] } -reth-codecs = { workspace = true, features = ["test-utils"] } reth-primitives-traits = { workspace = true, features = ["arbitrary", "test-utils"] } -reth-testing-utils.workspace = true reth-trie-common = { workspace = true, features = ["arbitrary"] } -revm-primitives = { workspace = true, features = ["arbitrary"] } +alloy-rlp.workspace = true alloy-eips = { workspace = true, features = ["arbitrary"] } alloy-genesis.workspace = true arbitrary = { workspace = true, features = ["derive"] } -secp256k1 = { workspace = true, features = [ - "global-context", - "recovery", - "rand", -] } assert_matches.workspace = true -bincode.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true -rand.workspace = true serde_json.workspace = true -test-fuzz.workspace = true -rstest.workspace = true +reth-codecs.workspace = true criterion.workspace = true pprof = { workspace = true, features = [ @@ -102,50 +68,36 @@ std = [ "alloy-eips/std", "alloy-genesis/std", "alloy-primitives/std", - "alloy-serde?/std", "once_cell/std", - "revm-primitives/std", "serde/std", "alloy-trie/std", - "serde_with?/std", - "alloy-rlp/std", "reth-ethereum-forks/std", - "bytes/std", "derive_more/std", - "reth-zstd-compressors?/std", - "secp256k1?/std", "reth-trie-common/std", - "op-alloy-consensus?/std", - "op-alloy-rpc-types?/std", "serde_json/std", - "reth-chainspec/std" + "reth-chainspec/std", + "reth-ethereum-primitives/std", + "alloy-rlp/std" ] reth-codec = [ - "dep:reth-codecs", - "dep:reth-zstd-compressors", - "dep:modular-bitfield", "std", + "std", "reth-primitives-traits/reth-codec", + "reth-ethereum-primitives/reth-codec" ] -asm-keccak = ["alloy-primitives/asm-keccak", "revm-primitives/asm-keccak"] +asm-keccak = ["alloy-primitives/asm-keccak"] arbitrary = [ "dep:arbitrary", "alloy-eips/arbitrary", - "rand", "reth-codec", "reth-ethereum-forks/arbitrary", "reth-primitives-traits/arbitrary", - "revm-primitives/arbitrary", "reth-chainspec/arbitrary", "alloy-consensus/arbitrary", "alloy-primitives/arbitrary", - "alloy-rpc-types?/arbitrary", - "alloy-serde?/arbitrary", - "op-alloy-consensus?/arbitrary", - "op-alloy-rpc-types?/arbitrary", - "reth-codecs?/arbitrary", "alloy-trie/arbitrary", "reth-trie-common/arbitrary", - "dep:secp256k1" + "reth-ethereum-primitives/arbitrary", + "reth-codecs/arbitrary" ] secp256k1 = [ "reth-primitives-traits/secp256k1", @@ -154,33 +106,23 @@ c-kzg = [ "dep:c-kzg", "alloy-consensus/kzg", "alloy-eips/kzg", - "revm-primitives/c-kzg", -] -optimism = [ - "dep:op-alloy-consensus", - "reth-codecs?/op", - "revm-primitives/optimism", ] alloy-compat = [ - "dep:alloy-rpc-types", - "dep:alloy-serde", - "dep:op-alloy-rpc-types", - "dep:alloy-network", + "reth-ethereum-primitives/alloy-compat" ] test-utils = [ "reth-primitives-traits/test-utils", "reth-chainspec/test-utils", - "reth-codecs?/test-utils", "reth-trie-common/test-utils", "arbitrary", + "reth-codecs/test-utils" ] serde-bincode-compat = [ - "serde_with", "alloy-eips/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", - "op-alloy-consensus?/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", "reth-trie-common/serde-bincode-compat", + "reth-ethereum-primitives/serde-bincode-compat" ] [[bench]] diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs deleted file mode 100644 index a269ebfb725ce..0000000000000 --- a/crates/primitives/src/alloy_compat.rs +++ /dev/null @@ -1,189 +0,0 @@ -//! Common conversions from alloy types. - -use crate::{Transaction, TransactionSigned}; -use alloc::string::ToString; -use alloy_consensus::TxEnvelope; -use alloy_network::{AnyRpcTransaction, AnyTxEnvelope}; -use alloy_serde::WithOtherFields; - -use op_alloy_rpc_types as _; - -impl TryFrom for TransactionSigned { - type Error = alloy_rpc_types::ConversionError; - - fn try_from(tx: AnyRpcTransaction) -> Result { - use alloy_rpc_types::ConversionError; - - let WithOtherFields { inner: tx, other: _ } = tx; - - #[allow(unreachable_patterns)] - let (transaction, signature, hash) = match tx.inner { - AnyTxEnvelope::Ethereum(TxEnvelope::Legacy(tx)) => { - let (tx, signature, hash) = tx.into_parts(); - (Transaction::Legacy(tx), signature, hash) - } - AnyTxEnvelope::Ethereum(TxEnvelope::Eip2930(tx)) => { - let (tx, signature, hash) = tx.into_parts(); - (Transaction::Eip2930(tx), signature, hash) - } - AnyTxEnvelope::Ethereum(TxEnvelope::Eip1559(tx)) => { - let (tx, signature, hash) = tx.into_parts(); - (Transaction::Eip1559(tx), signature, hash) - } - AnyTxEnvelope::Ethereum(TxEnvelope::Eip4844(tx)) => { - let (tx, signature, hash) = tx.into_parts(); - (Transaction::Eip4844(tx.into()), signature, hash) - } - AnyTxEnvelope::Ethereum(TxEnvelope::Eip7702(tx)) => { - let (tx, signature, hash) = tx.into_parts(); - (Transaction::Eip7702(tx), signature, hash) - } - #[cfg(feature = "optimism")] - AnyTxEnvelope::Unknown(alloy_network::UnknownTxEnvelope { hash, inner }) => { - use alloy_consensus::{Transaction as _, Typed2718}; - - if inner.ty() == crate::TxType::Deposit { - let fields: op_alloy_rpc_types::OpTransactionFields = inner - .fields - .clone() - .deserialize_into::() - .map_err(|e| ConversionError::Custom(e.to_string()))?; - ( - Transaction::Deposit(op_alloy_consensus::TxDeposit { - source_hash: fields.source_hash.ok_or_else(|| { - ConversionError::Custom("MissingSourceHash".to_string()) - })?, - from: tx.from, - to: revm_primitives::TxKind::from(inner.to()), - mint: fields.mint.filter(|n| *n != 0), - value: inner.value(), - gas_limit: inner.gas_limit(), - is_system_transaction: fields.is_system_tx.unwrap_or(false), - input: inner.input().clone(), - }), - op_alloy_consensus::TxDeposit::signature(), - hash, - ) - } else { - return Err(ConversionError::Custom("unknown transaction type".to_string())) - } - } - _ => return Err(ConversionError::Custom("unknown transaction type".to_string())), - }; - - Ok(Self { transaction, signature, hash: hash.into() }) - } -} - -#[cfg(test)] -#[cfg(feature = "optimism")] -mod tests { - use super::*; - use alloy_primitives::{address, Address, B256, U256}; - use revm_primitives::TxKind; - - #[test] - fn optimism_deposit_tx_conversion_no_mint() { - let input = r#"{ - "blockHash": "0xef664d656f841b5ad6a2b527b963f1eb48b97d7889d742f6cbff6950388e24cd", - "blockNumber": "0x73a78fd", - "depositReceiptVersion": "0x1", - "from": "0x36bde71c97b33cc4729cf772ae268934f7ab70b2", - "gas": "0xc27a8", - "gasPrice": "0x0", - "hash": "0x0bf1845c5d7a82ec92365d5027f7310793d53004f3c86aa80965c67bf7e7dc80", - "input": -"0xd764ad0b000100000000000000000000000000000000000000000000000000000001cf5400000000000000000000000099c9fc46f92e8a1c0dec1b1747d010903e884be100000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007a12000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e40166a07a0000000000000000000000000994206dfe8de6ec6920ff4d779b0d950605fb53000000000000000000000000d533a949740bb3306d119cc777fa900ba034cd52000000000000000000000000ca74f404e0c7bfa35b13b511097df966d5a65597000000000000000000000000ca74f404e0c7bfa35b13b511097df966d5a65597000000000000000000000000000000000000000000000216614199391dbba2ba00000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" -, "mint": "0x0", - "nonce": "0x74060", - "r": "0x0", - "s": "0x0", - "sourceHash": "0x074adb22f2e6ed9bdd31c52eefc1f050e5db56eb85056450bccd79a6649520b3", - "to": "0x4200000000000000000000000000000000000007", - "transactionIndex": "0x1", - "type": "0x7e", - "v": "0x0", - "value": "0x0" - }"#; - let alloy_tx: WithOtherFields> = - serde_json::from_str(input).expect("failed to deserialize"); - - let TransactionSigned { transaction: reth_tx, .. } = - alloy_tx.try_into().expect("failed to convert"); - if let Transaction::Deposit(deposit_tx) = reth_tx { - assert_eq!( - deposit_tx.source_hash, - "0x074adb22f2e6ed9bdd31c52eefc1f050e5db56eb85056450bccd79a6649520b3" - .parse::() - .unwrap() - ); - assert_eq!( - deposit_tx.from, - "0x36bde71c97b33cc4729cf772ae268934f7ab70b2".parse::

().unwrap() - ); - assert_eq!( - deposit_tx.to, - TxKind::from(address!("4200000000000000000000000000000000000007")) - ); - assert_eq!(deposit_tx.mint, None); - assert_eq!(deposit_tx.value, U256::ZERO); - assert_eq!(deposit_tx.gas_limit, 796584); - assert!(!deposit_tx.is_system_transaction); - } else { - panic!("Expected Deposit transaction"); - } - } - - #[test] - fn optimism_deposit_tx_conversion_mint() { - let input = r#"{ - "blockHash": "0x7194f63b105e93fb1a27c50d23d62e422d4185a68536c55c96284911415699b2", - "blockNumber": "0x73a82cc", - "depositReceiptVersion": "0x1", - "from": "0x36bde71c97b33cc4729cf772ae268934f7ab70b2", - "gas": "0x7812e", - "gasPrice": "0x0", - "hash": "0xf7e83886d3c6864f78e01c453ebcd57020c5795d96089e8f0e0b90a467246ddb", - "input": -"0xd764ad0b000100000000000000000000000000000000000000000000000000000001cf5f00000000000000000000000099c9fc46f92e8a1c0dec1b1747d010903e884be100000000000000000000000042000000000000000000000000000000000000100000000000000000000000000000000000000000000000239c2e16a5ca5900000000000000000000000000000000000000000000000000000000000000030d4000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000e41635f5fd0000000000000000000000002ce910fbba65b454bbaf6a18c952a70f3bcd82990000000000000000000000002ce910fbba65b454bbaf6a18c952a70f3bcd82990000000000000000000000000000000000000000000000239c2e16a5ca590000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" -, "mint": "0x239c2e16a5ca590000", - "nonce": "0x7406b", - "r": "0x0", - "s": "0x0", - "sourceHash": "0xe0358cd2b2686d297c5c859646a613124a874fb9d9c4a2c88636a46a65c06e48", - "to": "0x4200000000000000000000000000000000000007", - "transactionIndex": "0x1", - "type": "0x7e", - "v": "0x0", - "value": "0x239c2e16a5ca590000" - }"#; - let alloy_tx: WithOtherFields> = - serde_json::from_str(input).expect("failed to deserialize"); - - let TransactionSigned { transaction: reth_tx, .. } = - alloy_tx.try_into().expect("failed to convert"); - - if let Transaction::Deposit(deposit_tx) = reth_tx { - assert_eq!( - deposit_tx.source_hash, - "0xe0358cd2b2686d297c5c859646a613124a874fb9d9c4a2c88636a46a65c06e48" - .parse::() - .unwrap() - ); - assert_eq!( - deposit_tx.from, - "0x36bde71c97b33cc4729cf772ae268934f7ab70b2".parse::
().unwrap() - ); - assert_eq!( - deposit_tx.to, - TxKind::from(address!("4200000000000000000000000000000000000007")) - ); - assert_eq!(deposit_tx.mint, Some(656890000000000000000)); - assert_eq!(deposit_tx.value, U256::from(0x239c2e16a5ca590000_u128)); - assert_eq!(deposit_tx.gas_limit, 491822); - assert!(!deposit_tx.is_system_transaction); - } else { - panic!("Expected Deposit transaction"); - } - } -} diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 04d59400025ce..f30ce2c70aa0a 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,4 +1,4 @@ -use crate::TransactionSigned; +use reth_ethereum_primitives::TransactionSigned; #[cfg(any(test, feature = "arbitrary"))] pub use reth_primitives_traits::test_utils::{generate_valid_header, valid_header_strategy}; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index b613141242073..3d05fb9bf2370 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -21,8 +21,6 @@ extern crate alloc; -#[cfg(feature = "alloy-compat")] -mod alloy_compat; mod block; pub mod proofs; mod receipt; @@ -69,7 +67,6 @@ pub use c_kzg as kzg; /// Read more: #[cfg(feature = "serde-bincode-compat")] pub mod serde_bincode_compat { - pub use super::transaction::{serde_bincode_compat as transaction, serde_bincode_compat::*}; pub use reth_primitives_traits::serde_bincode_compat::*; } diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index 10ffb437d9a08..1d4a95e730ea7 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -1,6 +1,7 @@ //! Helper function for calculating Merkle proofs and hashes. use crate::Receipt; +use alloy_consensus::TxReceipt; use alloy_eips::eip2718::Encodable2718; use alloy_primitives::B256; pub use alloy_trie::root::ordered_trie_root_with_encoder; @@ -31,20 +32,15 @@ pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { #[cfg(test)] mod tests { use super::*; - use crate::Block; + use crate::{Block, TxType}; use alloy_consensus::EMPTY_ROOT_HASH; use alloy_genesis::GenesisAccount; - use alloy_primitives::{b256, hex_literal::hex, Address, U256}; + use alloy_primitives::{b256, bloom, hex_literal::hex, Address, Log, LogData, U256}; use alloy_rlp::Decodable; use reth_chainspec::{HOLESKY, MAINNET, SEPOLIA}; use reth_trie_common::root::{state_root_ref_unhashed, state_root_unhashed}; use std::collections::HashMap; - #[cfg(not(feature = "optimism"))] - use crate::TxType; - #[cfg(not(feature = "optimism"))] - use alloy_primitives::{bloom, Log, LogData}; - #[test] fn check_transaction_root() { let data = &hex!("f90262f901f9a092230ce5476ae868e98c7979cfc165a93f8b6ad1922acf2df62e340916efd49da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942adc25665018aa1fe0e6bc666dac8fc2697ff9baa02307107a867056ca33b5087e77c4174f47625e48fb49f1c70ced34890ddd88f3a08151d548273f6683169524b66ca9fe338b9ce42bc3540046c828fd939ae23bcba0c598f69a5674cae9337261b669970e24abc0b46e6d284372a239ec8ccbf20b0ab901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000018502540be40082a8618203e800a00000000000000000000000000000000000000000000000000000000000000000880000000000000000f863f861800a8405f5e10094100000000000000000000000000000000000000080801ba07e09e26678ed4fac08a249ebe8ed680bf9051a5e14ad223e4b2b9d26e0208f37a05f6e3f188e3e6eab7d7d3b6568f5eac7d687b08d307d3154ccd8c87b4630509bc0"); @@ -55,7 +51,6 @@ mod tests { assert_eq!(block.transactions_root, tx_root, "Must be the same"); } - #[cfg(not(feature = "optimism"))] #[test] fn check_receipt_root_optimism() { use alloy_consensus::ReceiptWithBloom; diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 0faf361ac7baf..0d946dc45a75e 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,287 +1,13 @@ use alloc::{vec, vec::Vec}; -use reth_primitives_traits::InMemorySize; - -use alloy_consensus::{ - Eip2718EncodableReceipt, Eip658Value, ReceiptWithBloom, RlpDecodableReceipt, - RlpEncodableReceipt, TxReceipt, Typed2718, -}; -use alloy_primitives::{Bloom, Log, B256}; -use alloy_rlp::{Decodable, Encodable, Header, RlpDecodable, RlpEncodable}; -use bytes::BufMut; +use alloy_primitives::B256; use derive_more::{DerefMut, From, IntoIterator}; use serde::{Deserialize, Serialize}; -use crate::TxType; - /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). pub use reth_primitives_traits::receipt::gas_spent_by_transactions; /// Receipt containing result of transaction execution. -#[derive( - Clone, Debug, PartialEq, Eq, Default, RlpEncodable, RlpDecodable, Serialize, Deserialize, -)] -#[cfg_attr(any(test, feature = "reth-codec"), derive(reth_codecs::CompactZstd))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_zstd( - compressor = reth_zstd_compressors::RECEIPT_COMPRESSOR, - decompressor = reth_zstd_compressors::RECEIPT_DECOMPRESSOR -))] -#[rlp(trailing)] -pub struct Receipt { - /// Receipt type. - pub tx_type: TxType, - /// If transaction is executed successfully. - /// - /// This is the `statusCode` - pub success: bool, - /// Gas used - pub cumulative_gas_used: u64, - /// Log send from contracts. - pub logs: Vec, - /// Deposit nonce for Optimism deposit transactions - #[cfg(feature = "optimism")] - pub deposit_nonce: Option, - /// Deposit receipt version for Optimism deposit transactions - /// - /// - /// The deposit receipt version was introduced in Canyon to indicate an update to how - /// receipt hashes should be computed when set. The state transition process - /// ensures this is only set for post-Canyon deposit transactions. - #[cfg(feature = "optimism")] - pub deposit_receipt_version: Option, -} - -impl Receipt { - /// Calculates [`Log`]'s bloom filter. this is slow operation and [`ReceiptWithBloom`] can - /// be used to cache this value. - pub fn bloom_slow(&self) -> Bloom { - alloy_primitives::logs_bloom(self.logs.iter()) - } - - /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] container - /// type. - pub fn with_bloom(self) -> ReceiptWithBloom { - self.into() - } - - /// Calculates the bloom filter for the receipt and returns the [`ReceiptWithBloom`] - /// container type. - pub fn with_bloom_ref(&self) -> ReceiptWithBloom<&Self> { - self.into() - } - - /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. - pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize { - let len = self.success.length() + - self.cumulative_gas_used.length() + - bloom.length() + - self.logs.length(); - - #[cfg(feature = "optimism")] - if self.tx_type == TxType::Deposit { - let mut len = len; - - if let Some(deposit_nonce) = self.deposit_nonce { - len += deposit_nonce.length(); - } - if let Some(deposit_receipt_version) = self.deposit_receipt_version { - len += deposit_receipt_version.length(); - } - - return len - } - - len - } - - /// RLP-encodes receipt fields with the given [`Bloom`] without an RLP header. - pub fn rlp_encode_fields(&self, bloom: &Bloom, out: &mut dyn BufMut) { - self.success.encode(out); - self.cumulative_gas_used.encode(out); - bloom.encode(out); - self.logs.encode(out); - - #[cfg(feature = "optimism")] - if self.tx_type == TxType::Deposit { - if let Some(nonce) = self.deposit_nonce { - nonce.encode(out); - } - if let Some(version) = self.deposit_receipt_version { - version.encode(out); - } - } - } - - /// Returns RLP header for inner encoding. - pub fn rlp_header_inner(&self, bloom: &Bloom) -> Header { - Header { list: true, payload_length: self.rlp_encoded_fields_length(bloom) } - } - - fn decode_receipt_with_bloom( - buf: &mut &[u8], - tx_type: TxType, - ) -> alloy_rlp::Result> { - let b = &mut &**buf; - let rlp_head = alloy_rlp::Header::decode(b)?; - if !rlp_head.list { - return Err(alloy_rlp::Error::UnexpectedString) - } - let started_len = b.len(); - - let success = Decodable::decode(b)?; - let cumulative_gas_used = Decodable::decode(b)?; - let bloom = Decodable::decode(b)?; - let logs = Decodable::decode(b)?; - - let receipt = match tx_type { - #[cfg(feature = "optimism")] - TxType::Deposit => { - let remaining = |b: &[u8]| rlp_head.payload_length - (started_len - b.len()) > 0; - let deposit_nonce = remaining(b).then(|| Decodable::decode(b)).transpose()?; - let deposit_receipt_version = - remaining(b).then(|| Decodable::decode(b)).transpose()?; - - Self { - tx_type, - success, - cumulative_gas_used, - logs, - deposit_nonce, - deposit_receipt_version, - } - } - _ => Self { - tx_type, - success, - cumulative_gas_used, - logs, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - }; - - let this = ReceiptWithBloom { receipt, logs_bloom: bloom }; - let consumed = started_len - b.len(); - if consumed != rlp_head.payload_length { - return Err(alloy_rlp::Error::ListLengthMismatch { - expected: rlp_head.payload_length, - got: consumed, - }) - } - *buf = *b; - Ok(this) - } -} - -impl Eip2718EncodableReceipt for Receipt { - fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { - self.rlp_header_inner(bloom).length_with_payload() + - !matches!(self.tx_type, TxType::Legacy) as usize // account for type prefix - } - - fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { - if !matches!(self.tx_type, TxType::Legacy) { - out.put_u8(self.tx_type as u8); - } - self.rlp_header_inner(bloom).encode(out); - self.rlp_encode_fields(bloom, out); - } -} - -impl RlpEncodableReceipt for Receipt { - fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { - let mut len = self.eip2718_encoded_length_with_bloom(bloom); - if !matches!(self.tx_type, TxType::Legacy) { - len += Header { - list: false, - payload_length: self.eip2718_encoded_length_with_bloom(bloom), - } - .length(); - } - - len - } - - fn rlp_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { - if !matches!(self.tx_type, TxType::Legacy) { - Header { list: false, payload_length: self.eip2718_encoded_length_with_bloom(bloom) } - .encode(out); - } - self.eip2718_encode_with_bloom(bloom, out); - } -} - -impl RlpDecodableReceipt for Receipt { - fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result> { - let header_buf = &mut &**buf; - let header = Header::decode(header_buf)?; - - if header.list { - return Self::decode_receipt_with_bloom(buf, TxType::Legacy); - } - - *buf = *header_buf; - - let remaining = buf.len(); - let tx_type = TxType::decode(buf)?; - let this = Self::decode_receipt_with_bloom(buf, tx_type)?; - - if buf.len() + header.payload_length != remaining { - return Err(alloy_rlp::Error::UnexpectedLength); - } - - Ok(this) - } -} - -impl TxReceipt for Receipt { - type Log = Log; - - fn status_or_post_state(&self) -> Eip658Value { - self.success.into() - } - - fn status(&self) -> bool { - self.success - } - - fn bloom(&self) -> Bloom { - alloy_primitives::logs_bloom(self.logs.iter()) - } - - fn cumulative_gas_used(&self) -> u64 { - self.cumulative_gas_used - } - - fn logs(&self) -> &[Log] { - &self.logs - } -} - -impl Typed2718 for Receipt { - fn ty(&self) -> u8 { - self.tx_type as u8 - } -} - -impl reth_primitives_traits::Receipt for Receipt {} - -impl InMemorySize for Receipt { - /// Calculates a heuristic for the in-memory size of the [Receipt]. - #[inline] - fn size(&self) -> usize { - let total_size = self.tx_type.size() + - core::mem::size_of::() + - core::mem::size_of::() + - self.logs.capacity() * core::mem::size_of::(); - - #[cfg(feature = "optimism")] - return total_size + 2 * core::mem::size_of::>(); - #[cfg(not(feature = "optimism"))] - total_size - } -} +pub use reth_ethereum_primitives::Receipt; /// A collection of receipts organized as a two-dimensional vector. #[derive( @@ -342,248 +68,3 @@ impl Default for Receipts { Self { receipt_vec: Vec::new() } } } - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for Receipt { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let tx_type = TxType::arbitrary(u)?; - let success = bool::arbitrary(u)?; - let cumulative_gas_used = u64::arbitrary(u)?; - let logs = Vec::::arbitrary(u)?; - - // Only receipts for deposit transactions may contain a deposit nonce - #[cfg(feature = "optimism")] - let (deposit_nonce, deposit_receipt_version) = if tx_type == TxType::Deposit { - let deposit_nonce = Option::::arbitrary(u)?; - let deposit_nonce_version = - deposit_nonce.map(|_| Option::::arbitrary(u)).transpose()?.flatten(); - (deposit_nonce, deposit_nonce_version) - } else { - (None, None) - }; - - Ok(Self { - tx_type, - success, - cumulative_gas_used, - logs, - #[cfg(feature = "optimism")] - deposit_nonce, - #[cfg(feature = "optimism")] - deposit_receipt_version, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_eips::eip2718::Encodable2718; - use alloy_primitives::{address, b256, bytes, hex_literal::hex, Bytes}; - use reth_codecs::Compact; - - #[test] - fn test_decode_receipt() { - #[cfg(not(feature = "optimism"))] - reth_codecs::test_utils::test_decode::(&hex!( - "c428b52ffd23fc42696156b10200f034792b6a94c3850215c2fef7aea361a0c31b79d9a32652eefc0d4e2e730036061cff7344b6fc6132b50cda0ed810a991ae58ef013150c12b2522533cb3b3a8b19b7786a8b5ff1d3cdc84225e22b02def168c8858df" - )); - #[cfg(feature = "optimism")] - reth_codecs::test_utils::test_decode::(&hex!( - "c30328b52ffd23fc426961a00105007eb0042307705a97e503562eacf2b95060cce9de6de68386b6c155b73a9650021a49e2f8baad17f30faff5899d785c4c0873e45bc268bcf07560106424570d11f9a59e8f3db1efa4ceec680123712275f10d92c3411e1caaa11c7c5d591bc11487168e09934a9986848136da1b583babf3a7188e3aed007a1520f1cf4c1ca7d3482c6c28d37c298613c70a76940008816c4c95644579fd08471dc34732fd0f24" - )); - } - - // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 - #[test] - fn encode_legacy_receipt() { - let expected = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); - - let mut data = Vec::with_capacity(expected.length()); - let receipt = ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Legacy, - cumulative_gas_used: 0x1u64, - logs: vec![Log::new_unchecked( - address!("0000000000000000000000000000000000000011"), - vec![ - b256!("000000000000000000000000000000000000000000000000000000000000dead"), - b256!("000000000000000000000000000000000000000000000000000000000000beef"), - ], - bytes!("0100ff"), - )], - success: false, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - logs_bloom: [0; 256].into(), - }; - - receipt.encode(&mut data); - - // check that the rlp length equals the length of the expected rlp - assert_eq!(receipt.length(), expected.len()); - assert_eq!(data, expected); - } - - // Test vector from: https://eips.ethereum.org/EIPS/eip-2481 - #[test] - fn decode_legacy_receipt() { - let data = hex!("f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"); - - // EIP658Receipt - let expected = ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Legacy, - cumulative_gas_used: 0x1u64, - logs: vec![Log::new_unchecked( - address!("0000000000000000000000000000000000000011"), - vec![ - b256!("000000000000000000000000000000000000000000000000000000000000dead"), - b256!("000000000000000000000000000000000000000000000000000000000000beef"), - ], - bytes!("0100ff"), - )], - success: false, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - logs_bloom: [0; 256].into(), - }; - - let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); - assert_eq!(receipt, expected); - } - - #[cfg(feature = "optimism")] - #[test] - fn decode_deposit_receipt_regolith_roundtrip() { - let data = hex!("b901107ef9010c0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf"); - - // Deposit Receipt (post-regolith) - let expected = ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Deposit, - cumulative_gas_used: 46913, - logs: vec![], - success: true, - deposit_nonce: Some(4012991), - deposit_receipt_version: None, - }, - logs_bloom: [0; 256].into(), - }; - - let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); - assert_eq!(receipt, expected); - - let mut buf = Vec::with_capacity(data.len()); - receipt.encode(&mut buf); - assert_eq!(buf, &data[..]); - } - - #[cfg(feature = "optimism")] - #[test] - fn decode_deposit_receipt_canyon_roundtrip() { - let data = hex!("b901117ef9010d0182b741b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0833d3bbf01"); - - // Deposit Receipt (post-regolith) - let expected = ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Deposit, - cumulative_gas_used: 46913, - logs: vec![], - success: true, - deposit_nonce: Some(4012991), - deposit_receipt_version: Some(1), - }, - logs_bloom: [0; 256].into(), - }; - - let receipt = ReceiptWithBloom::decode(&mut &data[..]).unwrap(); - assert_eq!(receipt, expected); - - let mut buf = Vec::with_capacity(data.len()); - expected.encode(&mut buf); - assert_eq!(buf, &data[..]); - } - - #[test] - fn gigantic_receipt() { - let receipt = Receipt { - cumulative_gas_used: 16747627, - success: true, - tx_type: TxType::Legacy, - logs: vec![ - Log::new_unchecked( - address!("4bf56695415f725e43c3e04354b604bcfb6dfb6e"), - vec![b256!("c69dc3d7ebff79e41f525be431d5cd3cc08f80eaf0f7819054a726eeb7086eb9")], - Bytes::from(vec![1; 0xffffff]), - ), - Log::new_unchecked( - address!("faca325c86bf9c2d5b413cd7b90b209be92229c2"), - vec![b256!("8cca58667b1e9ffa004720ac99a3d61a138181963b294d270d91c53d36402ae2")], - Bytes::from(vec![1; 0xffffff]), - ), - ], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }; - - let mut data = vec![]; - receipt.to_compact(&mut data); - let (decoded, _) = Receipt::from_compact(&data[..], data.len()); - assert_eq!(decoded, receipt); - } - - #[test] - fn test_encode_2718_length() { - let receipt = ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Eip1559, - success: true, - cumulative_gas_used: 21000, - logs: vec![], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - logs_bloom: Bloom::default(), - }; - - let encoded = receipt.encoded_2718(); - assert_eq!( - encoded.len(), - receipt.encode_2718_len(), - "Encoded length should match the actual encoded data length" - ); - - // Test for legacy receipt as well - let legacy_receipt = ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Legacy, - success: true, - cumulative_gas_used: 21000, - logs: vec![], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, - logs_bloom: Bloom::default(), - }; - - let legacy_encoded = legacy_receipt.encoded_2718(); - assert_eq!( - legacy_encoded.len(), - legacy_receipt.encode_2718_len(), - "Encoded length for legacy receipt should match the actual encoded data length" - ); - } -} diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 6189eb10c20c0..3e5c48a4660a5 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1,30 +1,8 @@ //! Transaction types. use crate::RecoveredTx; -use alloc::vec::Vec; pub use alloy_consensus::transaction::PooledTransaction; -use alloy_consensus::{ - transaction::RlpEcdsaTx, SignableTransaction, Signed, Transaction as _, TxEip1559, TxEip2930, - TxEip4844, TxEip4844Variant, TxEip4844WithSidecar, TxEip7702, TxEnvelope, TxLegacy, Typed2718, - TypedTransaction, -}; -use alloy_eips::{ - eip2718::{Decodable2718, Eip2718Error, Eip2718Result, Encodable2718}, - eip2930::AccessList, - eip4844::BlobTransactionSidecar, - eip7702::SignedAuthorization, -}; -use alloy_primitives::{ - keccak256, Address, Bytes, ChainId, PrimitiveSignature as Signature, TxHash, TxKind, B256, U256, -}; -use alloy_rlp::{Decodable, Encodable, Header}; -use core::hash::{Hash, Hasher}; -use derive_more::{AsRef, Deref}; use once_cell as _; -#[cfg(feature = "optimism")] -use op_alloy_consensus::DepositTransaction; -#[cfg(feature = "optimism")] -use op_alloy_consensus::TxDeposit; pub use pooled::PooledTransactionsElementEcRecovered; pub use reth_primitives_traits::{ sync::{LazyLock, OnceLock}, @@ -36,9 +14,6 @@ pub use reth_primitives_traits::{ }, FillTxEnv, WithEncoded, }; -use reth_primitives_traits::{InMemorySize, SignedTransaction}; -use revm_primitives::{AuthorizationList, TxEnv}; -use serde::{Deserialize, Serialize}; pub use signature::{recover_signer, recover_signer_unchecked}; pub use tx_type::TxType; @@ -50,1680 +25,20 @@ pub mod util; mod pooled; mod tx_type; -/// A raw transaction. -/// -/// Transaction types were introduced in [EIP-2718](https://eips.ethereum.org/EIPS/eip-2718). -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, derive_more::From)] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -pub enum Transaction { - /// Legacy transaction (type `0x0`). - /// - /// Traditional Ethereum transactions, containing parameters `nonce`, `gasPrice`, `gasLimit`, - /// `to`, `value`, `data`, `v`, `r`, and `s`. - /// - /// These transactions do not utilize access lists nor do they incorporate EIP-1559 fee market - /// changes. - Legacy(TxLegacy), - /// Transaction with an [`AccessList`] ([EIP-2930](https://eips.ethereum.org/EIPS/eip-2930)), type `0x1`. - /// - /// The `accessList` specifies an array of addresses and storage keys that the transaction - /// plans to access, enabling gas savings on cross-contract calls by pre-declaring the accessed - /// contract and storage slots. - Eip2930(TxEip2930), - /// A transaction with a priority fee ([EIP-1559](https://eips.ethereum.org/EIPS/eip-1559)), type `0x2`. - /// - /// Unlike traditional transactions, EIP-1559 transactions use an in-protocol, dynamically - /// changing base fee per gas, adjusted at each block to manage network congestion. - /// - /// - `maxPriorityFeePerGas`, specifying the maximum fee above the base fee the sender is - /// willing to pay - /// - `maxFeePerGas`, setting the maximum total fee the sender is willing to pay. - /// - /// The base fee is burned, while the priority fee is paid to the miner who includes the - /// transaction, incentivizing miners to include transactions with higher priority fees per - /// gas. - Eip1559(TxEip1559), - /// Shard Blob Transactions ([EIP-4844](https://eips.ethereum.org/EIPS/eip-4844)), type `0x3`. - /// - /// Shard Blob Transactions introduce a new transaction type called a blob-carrying transaction - /// to reduce gas costs. These transactions are similar to regular Ethereum transactions but - /// include additional data called a blob. - /// - /// Blobs are larger (~125 kB) and cheaper than the current calldata, providing an immutable - /// and read-only memory for storing transaction data. - /// - /// EIP-4844, also known as proto-danksharding, implements the framework and logic of - /// danksharding, introducing new transaction formats and verification rules. - Eip4844(TxEip4844), - /// EOA Set Code Transactions ([EIP-7702](https://eips.ethereum.org/EIPS/eip-7702)), type `0x4`. - /// - /// EOA Set Code Transactions give the ability to temporarily set contract code for an - /// EOA for a single transaction. This allows for temporarily adding smart contract - /// functionality to the EOA. - Eip7702(TxEip7702), - /// Optimism deposit transaction. - #[cfg(feature = "optimism")] - Deposit(TxDeposit), -} - -#[cfg(feature = "optimism")] -impl DepositTransaction for Transaction { - fn source_hash(&self) -> Option { - match self { - Self::Deposit(tx) => tx.source_hash(), - _ => None, - } - } - fn mint(&self) -> Option { - match self { - Self::Deposit(tx) => tx.mint(), - _ => None, - } - } - fn is_system_transaction(&self) -> bool { - match self { - Self::Deposit(tx) => tx.is_system_transaction(), - _ => false, - } - } - fn is_deposit(&self) -> bool { - matches!(self, Self::Deposit(_)) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for Transaction { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - let mut tx = match TxType::arbitrary(u)? { - TxType::Legacy => { - let tx = TxLegacy::arbitrary(u)?; - Self::Legacy(tx) - } - TxType::Eip2930 => { - let tx = TxEip2930::arbitrary(u)?; - Self::Eip2930(tx) - } - TxType::Eip1559 => { - let tx = TxEip1559::arbitrary(u)?; - Self::Eip1559(tx) - } - TxType::Eip4844 => { - let tx = TxEip4844::arbitrary(u)?; - Self::Eip4844(tx) - } - - TxType::Eip7702 => { - let tx = TxEip7702::arbitrary(u)?; - Self::Eip7702(tx) - } - #[cfg(feature = "optimism")] - TxType::Deposit => { - let tx = TxDeposit::arbitrary(u)?; - Self::Deposit(tx) - } - }; - - // Otherwise we might overflow when calculating `v` on `recalculate_hash` - if let Some(chain_id) = tx.chain_id() { - tx.set_chain_id(chain_id % (u64::MAX / 2 - 36)); - } - - Ok(tx) - } -} - -impl Typed2718 for Transaction { - fn ty(&self) -> u8 { - match self { - Self::Legacy(tx) => tx.ty(), - Self::Eip2930(tx) => tx.ty(), - Self::Eip1559(tx) => tx.ty(), - Self::Eip4844(tx) => tx.ty(), - Self::Eip7702(tx) => tx.ty(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.ty(), - } - } -} - -// === impl Transaction === - -impl Transaction { - /// Heavy operation that return signature hash over rlp encoded transaction. - /// It is only for signature signing or signer recovery. - pub fn signature_hash(&self) -> B256 { - match self { - Self::Legacy(tx) => tx.signature_hash(), - Self::Eip2930(tx) => tx.signature_hash(), - Self::Eip1559(tx) => tx.signature_hash(), - Self::Eip4844(tx) => tx.signature_hash(), - Self::Eip7702(tx) => tx.signature_hash(), - #[cfg(feature = "optimism")] - Self::Deposit(_) => B256::ZERO, - } - } - - /// Sets the transaction's chain id to the provided value. - pub fn set_chain_id(&mut self, chain_id: u64) { - match self { - Self::Legacy(TxLegacy { chain_id: ref mut c, .. }) => *c = Some(chain_id), - Self::Eip2930(TxEip2930 { chain_id: ref mut c, .. }) | - Self::Eip1559(TxEip1559 { chain_id: ref mut c, .. }) | - Self::Eip4844(TxEip4844 { chain_id: ref mut c, .. }) | - Self::Eip7702(TxEip7702 { chain_id: ref mut c, .. }) => *c = chain_id, - #[cfg(feature = "optimism")] - Self::Deposit(_) => { /* noop */ } - } - } - - /// Get the transaction's type - pub const fn tx_type(&self) -> TxType { - match self { - Self::Legacy(_) => TxType::Legacy, - Self::Eip2930(_) => TxType::Eip2930, - Self::Eip1559(_) => TxType::Eip1559, - Self::Eip4844(_) => TxType::Eip4844, - Self::Eip7702(_) => TxType::Eip7702, - #[cfg(feature = "optimism")] - Self::Deposit(_) => TxType::Deposit, - } - } - - /// Returns the blob gas used for all blobs of the EIP-4844 transaction if it is an EIP-4844 - /// transaction. - /// - /// This is the number of blobs times the - /// [`DATA_GAS_PER_BLOB`](alloy_eips::eip4844::DATA_GAS_PER_BLOB) a single blob consumes. - pub fn blob_gas_used(&self) -> Option { - self.as_eip4844().map(TxEip4844::blob_gas) - } - - /// Returns the effective miner gas tip cap (`gasTipCap`) for the given base fee: - /// `min(maxFeePerGas - baseFee, maxPriorityFeePerGas)` - /// - /// If the base fee is `None`, the `max_priority_fee_per_gas`, or gas price for non-EIP1559 - /// transactions is returned. - /// - /// Returns `None` if the basefee is higher than the [`Transaction::max_fee_per_gas`]. - pub fn effective_tip_per_gas(&self, base_fee: Option) -> Option { - let base_fee = match base_fee { - Some(base_fee) => base_fee as u128, - None => return Some(self.priority_fee_or_price()), - }; - - let max_fee_per_gas = self.max_fee_per_gas(); - - // Check if max_fee_per_gas is less than base_fee - if max_fee_per_gas < base_fee { - return None - } - - // Calculate the difference between max_fee_per_gas and base_fee - let fee = max_fee_per_gas - base_fee; - - // Compare the fee with max_priority_fee_per_gas (or gas price for non-EIP1559 transactions) - if let Some(priority_fee) = self.max_priority_fee_per_gas() { - Some(fee.min(priority_fee)) - } else { - Some(fee) - } - } - - /// This encodes the transaction _without_ the signature, and is only suitable for creating a - /// hash intended for signing. - pub fn encode_for_signing(&self, out: &mut dyn bytes::BufMut) { - match self { - Self::Legacy(tx) => tx.encode_for_signing(out), - Self::Eip2930(tx) => tx.encode_for_signing(out), - Self::Eip1559(tx) => tx.encode_for_signing(out), - Self::Eip4844(tx) => tx.encode_for_signing(out), - Self::Eip7702(tx) => tx.encode_for_signing(out), - #[cfg(feature = "optimism")] - Self::Deposit(_) => {} - } - } - - /// Produces EIP-2718 encoding of the transaction - pub fn eip2718_encode(&self, signature: &Signature, out: &mut dyn bytes::BufMut) { - match self { - Self::Legacy(legacy_tx) => { - // do nothing w/ with_header - legacy_tx.eip2718_encode(signature, out); - } - Self::Eip2930(access_list_tx) => { - access_list_tx.eip2718_encode(signature, out); - } - Self::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.eip2718_encode(signature, out); - } - Self::Eip4844(blob_tx) => blob_tx.eip2718_encode(signature, out), - Self::Eip7702(set_code_tx) => { - set_code_tx.eip2718_encode(signature, out); - } - #[cfg(feature = "optimism")] - Self::Deposit(deposit_tx) => deposit_tx.encode_2718(out), - } - } - - /// This sets the transaction's gas limit. - pub fn set_gas_limit(&mut self, gas_limit: u64) { - match self { - Self::Legacy(tx) => tx.gas_limit = gas_limit, - Self::Eip2930(tx) => tx.gas_limit = gas_limit, - Self::Eip1559(tx) => tx.gas_limit = gas_limit, - Self::Eip4844(tx) => tx.gas_limit = gas_limit, - Self::Eip7702(tx) => tx.gas_limit = gas_limit, - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.gas_limit = gas_limit, - } - } - - /// This sets the transaction's nonce. - pub fn set_nonce(&mut self, nonce: u64) { - match self { - Self::Legacy(tx) => tx.nonce = nonce, - Self::Eip2930(tx) => tx.nonce = nonce, - Self::Eip1559(tx) => tx.nonce = nonce, - Self::Eip4844(tx) => tx.nonce = nonce, - Self::Eip7702(tx) => tx.nonce = nonce, - #[cfg(feature = "optimism")] - Self::Deposit(_) => { /* noop */ } - } - } - - /// This sets the transaction's value. - pub fn set_value(&mut self, value: U256) { - match self { - Self::Legacy(tx) => tx.value = value, - Self::Eip2930(tx) => tx.value = value, - Self::Eip1559(tx) => tx.value = value, - Self::Eip4844(tx) => tx.value = value, - Self::Eip7702(tx) => tx.value = value, - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.value = value, - } - } - - /// This sets the transaction's input field. - pub fn set_input(&mut self, input: Bytes) { - match self { - Self::Legacy(tx) => tx.input = input, - Self::Eip2930(tx) => tx.input = input, - Self::Eip1559(tx) => tx.input = input, - Self::Eip4844(tx) => tx.input = input, - Self::Eip7702(tx) => tx.input = input, - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.input = input, - } - } - - /// Returns true if the transaction is a legacy transaction. - #[inline] - pub const fn is_legacy(&self) -> bool { - matches!(self, Self::Legacy(_)) - } - - /// Returns true if the transaction is an EIP-2930 transaction. - #[inline] - pub const fn is_eip2930(&self) -> bool { - matches!(self, Self::Eip2930(_)) - } - - /// Returns true if the transaction is an EIP-1559 transaction. - #[inline] - pub const fn is_eip1559(&self) -> bool { - matches!(self, Self::Eip1559(_)) - } - - /// Returns true if the transaction is an EIP-4844 transaction. - #[inline] - pub const fn is_eip4844(&self) -> bool { - matches!(self, Self::Eip4844(_)) - } - - /// Returns true if the transaction is an EIP-7702 transaction. - #[inline] - pub const fn is_eip7702(&self) -> bool { - matches!(self, Self::Eip7702(_)) - } - - /// Returns the [`TxLegacy`] variant if the transaction is a legacy transaction. - pub const fn as_legacy(&self) -> Option<&TxLegacy> { - match self { - Self::Legacy(tx) => Some(tx), - _ => None, - } - } - - /// Returns the [`TxEip2930`] variant if the transaction is an EIP-2930 transaction. - pub const fn as_eip2930(&self) -> Option<&TxEip2930> { - match self { - Self::Eip2930(tx) => Some(tx), - _ => None, - } - } - - /// Returns the [`TxEip1559`] variant if the transaction is an EIP-1559 transaction. - pub const fn as_eip1559(&self) -> Option<&TxEip1559> { - match self { - Self::Eip1559(tx) => Some(tx), - _ => None, - } - } - - /// Returns the [`TxEip4844`] variant if the transaction is an EIP-4844 transaction. - pub const fn as_eip4844(&self) -> Option<&TxEip4844> { - match self { - Self::Eip4844(tx) => Some(tx), - _ => None, - } - } - - /// Returns the [`TxEip7702`] variant if the transaction is an EIP-7702 transaction. - pub const fn as_eip7702(&self) -> Option<&TxEip7702> { - match self { - Self::Eip7702(tx) => Some(tx), - _ => None, - } - } -} - -impl InMemorySize for Transaction { - /// Calculates a heuristic for the in-memory size of the [Transaction]. - #[inline] - fn size(&self) -> usize { - match self { - Self::Legacy(tx) => tx.size(), - Self::Eip2930(tx) => tx.size(), - Self::Eip1559(tx) => tx.size(), - Self::Eip4844(tx) => tx.size(), - Self::Eip7702(tx) => tx.size(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.size(), - } - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for Transaction { - // Serializes the TxType to the buffer if necessary, returning 2 bits of the type as an - // identifier instead of the length. - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let identifier = self.tx_type().to_compact(buf); - match self { - Self::Legacy(tx) => { - tx.to_compact(buf); - } - Self::Eip2930(tx) => { - tx.to_compact(buf); - } - Self::Eip1559(tx) => { - tx.to_compact(buf); - } - Self::Eip4844(tx) => { - tx.to_compact(buf); - } - Self::Eip7702(tx) => { - tx.to_compact(buf); - } - #[cfg(feature = "optimism")] - Self::Deposit(tx) => { - tx.to_compact(buf); - } - } - identifier - } - - // For backwards compatibility purposes, only 2 bits of the type are encoded in the identifier - // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type - // is read from the buffer as a single byte. - // - // # Panics - // - // A panic will be triggered if an identifier larger than 3 is passed from the database. For - // optimism a identifier with value [`DEPOSIT_TX_TYPE_ID`] is allowed. - fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { - use bytes::Buf; - - match identifier { - reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => { - let (tx, buf) = TxLegacy::from_compact(buf, buf.len()); - (Self::Legacy(tx), buf) - } - reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => { - let (tx, buf) = TxEip2930::from_compact(buf, buf.len()); - (Self::Eip2930(tx), buf) - } - reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => { - let (tx, buf) = TxEip1559::from_compact(buf, buf.len()); - (Self::Eip1559(tx), buf) - } - reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { - // An identifier of 3 indicates that the transaction type did not fit into - // the backwards compatible 2 bit identifier, their transaction types are - // larger than 2 bits (eg. 4844 and Deposit Transactions). In this case, - // we need to read the concrete transaction type from the buffer by - // reading the full 8 bits (single byte) and match on this transaction type. - let identifier = buf.get_u8(); - match identifier { - alloy_consensus::constants::EIP4844_TX_TYPE_ID => { - let (tx, buf) = TxEip4844::from_compact(buf, buf.len()); - (Self::Eip4844(tx), buf) - } - alloy_consensus::constants::EIP7702_TX_TYPE_ID => { - let (tx, buf) = TxEip7702::from_compact(buf, buf.len()); - (Self::Eip7702(tx), buf) - } - #[cfg(feature = "optimism")] - op_alloy_consensus::DEPOSIT_TX_TYPE_ID => { - let (tx, buf) = TxDeposit::from_compact(buf, buf.len()); - (Self::Deposit(tx), buf) - } - _ => unreachable!( - "Junk data in database: unknown Transaction variant: {identifier}" - ), - } - } - _ => unreachable!("Junk data in database: unknown Transaction variant: {identifier}"), - } - } -} - -impl Default for Transaction { - fn default() -> Self { - Self::Legacy(TxLegacy::default()) - } -} - -impl alloy_consensus::Transaction for Transaction { - fn chain_id(&self) -> Option { - match self { - Self::Legacy(tx) => tx.chain_id(), - Self::Eip2930(tx) => tx.chain_id(), - Self::Eip1559(tx) => tx.chain_id(), - Self::Eip4844(tx) => tx.chain_id(), - Self::Eip7702(tx) => tx.chain_id(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.chain_id(), - } - } - - fn nonce(&self) -> u64 { - match self { - Self::Legacy(tx) => tx.nonce(), - Self::Eip2930(tx) => tx.nonce(), - Self::Eip1559(tx) => tx.nonce(), - Self::Eip4844(tx) => tx.nonce(), - Self::Eip7702(tx) => tx.nonce(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.nonce(), - } - } - - fn gas_limit(&self) -> u64 { - match self { - Self::Legacy(tx) => tx.gas_limit(), - Self::Eip2930(tx) => tx.gas_limit(), - Self::Eip1559(tx) => tx.gas_limit(), - Self::Eip4844(tx) => tx.gas_limit(), - Self::Eip7702(tx) => tx.gas_limit(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.gas_limit(), - } - } - - fn gas_price(&self) -> Option { - match self { - Self::Legacy(tx) => tx.gas_price(), - Self::Eip2930(tx) => tx.gas_price(), - Self::Eip1559(tx) => tx.gas_price(), - Self::Eip4844(tx) => tx.gas_price(), - Self::Eip7702(tx) => tx.gas_price(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.gas_price(), - } - } - - fn max_fee_per_gas(&self) -> u128 { - match self { - Self::Legacy(tx) => tx.max_fee_per_gas(), - Self::Eip2930(tx) => tx.max_fee_per_gas(), - Self::Eip1559(tx) => tx.max_fee_per_gas(), - Self::Eip4844(tx) => tx.max_fee_per_gas(), - Self::Eip7702(tx) => tx.max_fee_per_gas(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.max_fee_per_gas(), - } - } - - fn max_priority_fee_per_gas(&self) -> Option { - match self { - Self::Legacy(tx) => tx.max_priority_fee_per_gas(), - Self::Eip2930(tx) => tx.max_priority_fee_per_gas(), - Self::Eip1559(tx) => tx.max_priority_fee_per_gas(), - Self::Eip4844(tx) => tx.max_priority_fee_per_gas(), - Self::Eip7702(tx) => tx.max_priority_fee_per_gas(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.max_priority_fee_per_gas(), - } - } - - fn max_fee_per_blob_gas(&self) -> Option { - match self { - Self::Legacy(tx) => tx.max_fee_per_blob_gas(), - Self::Eip2930(tx) => tx.max_fee_per_blob_gas(), - Self::Eip1559(tx) => tx.max_fee_per_blob_gas(), - Self::Eip4844(tx) => tx.max_fee_per_blob_gas(), - Self::Eip7702(tx) => tx.max_fee_per_blob_gas(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.max_fee_per_blob_gas(), - } - } - - fn priority_fee_or_price(&self) -> u128 { - match self { - Self::Legacy(tx) => tx.priority_fee_or_price(), - Self::Eip2930(tx) => tx.priority_fee_or_price(), - Self::Eip1559(tx) => tx.priority_fee_or_price(), - Self::Eip4844(tx) => tx.priority_fee_or_price(), - Self::Eip7702(tx) => tx.priority_fee_or_price(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.priority_fee_or_price(), - } - } - - fn effective_gas_price(&self, base_fee: Option) -> u128 { - match self { - Self::Legacy(tx) => tx.effective_gas_price(base_fee), - Self::Eip2930(tx) => tx.effective_gas_price(base_fee), - Self::Eip1559(tx) => tx.effective_gas_price(base_fee), - Self::Eip4844(tx) => tx.effective_gas_price(base_fee), - Self::Eip7702(tx) => tx.effective_gas_price(base_fee), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.effective_gas_price(base_fee), - } - } - - fn is_dynamic_fee(&self) -> bool { - match self { - Self::Legacy(_) | Self::Eip2930(_) => false, - Self::Eip1559(_) | Self::Eip4844(_) | Self::Eip7702(_) => true, - #[cfg(feature = "optimism")] - Self::Deposit(_) => false, - } - } - - fn kind(&self) -> TxKind { - match self { - Self::Legacy(tx) => tx.kind(), - Self::Eip2930(tx) => tx.kind(), - Self::Eip1559(tx) => tx.kind(), - Self::Eip4844(tx) => tx.kind(), - Self::Eip7702(tx) => tx.kind(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.kind(), - } - } - - fn is_create(&self) -> bool { - match self { - Self::Legacy(tx) => tx.is_create(), - Self::Eip2930(tx) => tx.is_create(), - Self::Eip1559(tx) => tx.is_create(), - Self::Eip4844(tx) => tx.is_create(), - Self::Eip7702(tx) => tx.is_create(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.is_create(), - } - } - - fn value(&self) -> U256 { - match self { - Self::Legacy(tx) => tx.value(), - Self::Eip2930(tx) => tx.value(), - Self::Eip1559(tx) => tx.value(), - Self::Eip4844(tx) => tx.value(), - Self::Eip7702(tx) => tx.value(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.value(), - } - } - - fn input(&self) -> &Bytes { - match self { - Self::Legacy(tx) => tx.input(), - Self::Eip2930(tx) => tx.input(), - Self::Eip1559(tx) => tx.input(), - Self::Eip4844(tx) => tx.input(), - Self::Eip7702(tx) => tx.input(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.input(), - } - } - - fn access_list(&self) -> Option<&AccessList> { - match self { - Self::Legacy(tx) => tx.access_list(), - Self::Eip2930(tx) => tx.access_list(), - Self::Eip1559(tx) => tx.access_list(), - Self::Eip4844(tx) => tx.access_list(), - Self::Eip7702(tx) => tx.access_list(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.access_list(), - } - } - - fn blob_versioned_hashes(&self) -> Option<&[B256]> { - match self { - Self::Legacy(tx) => tx.blob_versioned_hashes(), - Self::Eip2930(tx) => tx.blob_versioned_hashes(), - Self::Eip1559(tx) => tx.blob_versioned_hashes(), - Self::Eip4844(tx) => tx.blob_versioned_hashes(), - Self::Eip7702(tx) => tx.blob_versioned_hashes(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.blob_versioned_hashes(), - } - } - - fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - match self { - Self::Legacy(tx) => tx.authorization_list(), - Self::Eip2930(tx) => tx.authorization_list(), - Self::Eip1559(tx) => tx.authorization_list(), - Self::Eip4844(tx) => tx.authorization_list(), - Self::Eip7702(tx) => tx.authorization_list(), - #[cfg(feature = "optimism")] - Self::Deposit(tx) => tx.authorization_list(), - } - } -} - -impl From for Transaction { - fn from(value: TxEip4844Variant) -> Self { - match value { - TxEip4844Variant::TxEip4844(tx) => tx.into(), - TxEip4844Variant::TxEip4844WithSidecar(tx) => tx.tx.into(), - } - } -} - -impl From for Transaction { - fn from(value: TypedTransaction) -> Self { - match value { - TypedTransaction::Legacy(tx) => tx.into(), - TypedTransaction::Eip2930(tx) => tx.into(), - TypedTransaction::Eip1559(tx) => tx.into(), - TypedTransaction::Eip4844(tx) => tx.into(), - TypedTransaction::Eip7702(tx) => tx.into(), - } - } -} - /// Signed transaction. -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(rlp))] -#[derive(Debug, Clone, Eq, AsRef, Deref, Serialize, Deserialize)] -pub struct TransactionSigned { - /// Transaction hash - #[serde(skip)] - pub hash: OnceLock, - /// The transaction signature values - pub signature: Signature, - /// Raw transaction info - #[deref] - #[as_ref] - pub transaction: Transaction, -} - -impl Default for TransactionSigned { - fn default() -> Self { - Self { - hash: Default::default(), - signature: Signature::test_signature(), - transaction: Default::default(), - } - } -} - -impl AsRef for TransactionSigned { - fn as_ref(&self) -> &Self { - self - } -} - -impl Hash for TransactionSigned { - fn hash(&self, state: &mut H) { - self.signature.hash(state); - self.transaction.hash(state); - } -} - -impl PartialEq for TransactionSigned { - fn eq(&self, other: &Self) -> bool { - self.signature == other.signature && - self.transaction == other.transaction && - self.tx_hash() == other.tx_hash() - } -} - -impl Typed2718 for TransactionSigned { - fn ty(&self) -> u8 { - self.deref().ty() - } -} - -// === impl TransactionSigned === - -impl TransactionSigned { - /// Creates a new signed transaction from the given parts. - pub fn new(transaction: Transaction, signature: Signature, hash: B256) -> Self { - Self { hash: hash.into(), signature, transaction } - } - - /// Creates a new signed transaction from the given transaction and signature without the hash. - /// - /// Note: this only calculates the hash on the first [`TransactionSigned::hash`] call. - pub fn new_unhashed(transaction: Transaction, signature: Signature) -> Self { - Self { hash: Default::default(), signature, transaction } - } - - /// Transaction - pub const fn transaction(&self) -> &Transaction { - &self.transaction - } - - /// Tries to convert a [`TransactionSigned`] into a [`PooledTransaction`]. - /// - /// This function used as a helper to convert from a decoded p2p broadcast message to - /// [`PooledTransaction`]. Since EIP4844 variants are disallowed to be broadcasted on - /// p2p, return an err if `tx` is [`Transaction::Eip4844`]. - pub fn try_into_pooled(self) -> Result { - let hash = self.hash(); - match self { - Self { transaction: Transaction::Legacy(tx), signature, .. } => { - Ok(PooledTransaction::Legacy(Signed::new_unchecked(tx, signature, hash))) - } - Self { transaction: Transaction::Eip2930(tx), signature, .. } => { - Ok(PooledTransaction::Eip2930(Signed::new_unchecked(tx, signature, hash))) - } - Self { transaction: Transaction::Eip1559(tx), signature, .. } => { - Ok(PooledTransaction::Eip1559(Signed::new_unchecked(tx, signature, hash))) - } - Self { transaction: Transaction::Eip7702(tx), signature, .. } => { - Ok(PooledTransaction::Eip7702(Signed::new_unchecked(tx, signature, hash))) - } - // Not supported because missing blob sidecar - tx @ Self { transaction: Transaction::Eip4844(_), .. } => Err(tx), - #[cfg(feature = "optimism")] - // Not supported because deposit transactions are never pooled - tx @ Self { transaction: Transaction::Deposit(_), .. } => Err(tx), - } - } - - /// Converts from an EIP-4844 transaction to a [`PooledTransaction`] with the given sidecar. - /// - /// Returns an `Err` containing the original `TransactionSigned` if the transaction is not - /// EIP-4844. - pub fn try_into_pooled_eip4844( - self, - sidecar: BlobTransactionSidecar, - ) -> Result { - let hash = self.hash(); - Ok(match self { - // If the transaction is an EIP-4844 transaction... - Self { transaction: Transaction::Eip4844(tx), signature, .. } => { - // Construct a pooled eip488 tx with the provided sidecar. - PooledTransaction::Eip4844(Signed::new_unchecked( - TxEip4844WithSidecar { tx, sidecar }, - signature, - hash, - )) - } - // If the transaction is not EIP-4844, return an error with the original - // transaction. - _ => return Err(self), - }) - } - - /// Transaction hash. Used to identify transaction. - pub fn hash(&self) -> TxHash { - *self.tx_hash() - } - - /// Returns the [`RecoveredTx`] transaction with the given sender. - #[inline] - pub const fn with_signer(self, signer: Address) -> RecoveredTx { - RecoveredTx::new_unchecked(self, signer) - } - - /// Consumes the type, recover signer and return [`RecoveredTx`] - /// - /// Returns `None` if the transaction's signature is invalid, see also [`Self::recover_signer`]. - pub fn into_ecrecovered(self) -> Option> { - let signer = self.recover_signer()?; - Some(RecoveredTx::new_unchecked(self, signer)) - } - - /// Consumes the type, recover signer and return [`RecoveredTx`] _without - /// ensuring that the signature has a low `s` value_ (EIP-2). - /// - /// Returns `None` if the transaction's signature is invalid, see also - /// [`Self::recover_signer_unchecked`]. - pub fn into_ecrecovered_unchecked(self) -> Option> { - let signer = self.recover_signer_unchecked()?; - Some(RecoveredTx::new_unchecked(self, signer)) - } - - /// Tries to recover signer and return [`RecoveredTx`]. _without ensuring that - /// the signature has a low `s` value_ (EIP-2). - /// - /// Returns `Err(Self)` if the transaction's signature is invalid, see also - /// [`Self::recover_signer_unchecked`]. - pub fn try_into_ecrecovered_unchecked(self) -> Result, Self> { - match self.recover_signer_unchecked() { - None => Err(self), - Some(signer) => Ok(RecoveredTx::new_unchecked(self, signer)), - } - } - - /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with - /// tx type. - pub fn recalculate_hash(&self) -> B256 { - keccak256(self.encoded_2718()) - } - - /// Splits the transaction into parts. - pub fn into_parts(self) -> (Transaction, Signature, B256) { - let hash = self.hash(); - (self.transaction, self.signature, hash) - } -} - -impl SignedTransaction for TransactionSigned { - fn tx_hash(&self) -> &TxHash { - self.hash.get_or_init(|| self.recalculate_hash()) - } - - fn signature(&self) -> &Signature { - &self.signature - } - - fn recover_signer(&self) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - let signature_hash = self.signature_hash(); - recover_signer(&self.signature, signature_hash) - } - - fn recover_signer_unchecked_with_buf(&self, buf: &mut Vec) -> Option
{ - // Optimism's Deposit transaction does not have a signature. Directly return the - // `from` address. - #[cfg(feature = "optimism")] - if let Transaction::Deposit(TxDeposit { from, .. }) = self.transaction { - return Some(from) - } - self.encode_for_signing(buf); - let signature_hash = keccak256(buf); - recover_signer_unchecked(&self.signature, signature_hash) - } -} - -impl reth_primitives_traits::FillTxEnv for TransactionSigned { - fn fill_tx_env(&self, tx_env: &mut TxEnv, sender: Address) { - #[cfg(feature = "optimism")] - let envelope = alloy_eips::eip2718::Encodable2718::encoded_2718(self); - - tx_env.caller = sender; - match self.as_ref() { - Transaction::Legacy(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.gas_price); - tx_env.gas_priority_fee = None; - tx_env.transact_to = tx.to; - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = tx.chain_id; - tx_env.nonce = Some(tx.nonce); - tx_env.access_list.clear(); - tx_env.blob_hashes.clear(); - tx_env.max_fee_per_blob_gas.take(); - tx_env.authorization_list = None; - } - Transaction::Eip2930(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.gas_price); - tx_env.gas_priority_fee = None; - tx_env.transact_to = tx.to; - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = Some(tx.chain_id); - tx_env.nonce = Some(tx.nonce); - tx_env.access_list.clone_from(&tx.access_list.0); - tx_env.blob_hashes.clear(); - tx_env.max_fee_per_blob_gas.take(); - tx_env.authorization_list = None; - } - Transaction::Eip1559(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.max_fee_per_gas); - tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); - tx_env.transact_to = tx.to; - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = Some(tx.chain_id); - tx_env.nonce = Some(tx.nonce); - tx_env.access_list.clone_from(&tx.access_list.0); - tx_env.blob_hashes.clear(); - tx_env.max_fee_per_blob_gas.take(); - tx_env.authorization_list = None; - } - Transaction::Eip4844(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.max_fee_per_gas); - tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); - tx_env.transact_to = TxKind::Call(tx.to); - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = Some(tx.chain_id); - tx_env.nonce = Some(tx.nonce); - tx_env.access_list.clone_from(&tx.access_list.0); - tx_env.blob_hashes.clone_from(&tx.blob_versioned_hashes); - tx_env.max_fee_per_blob_gas = Some(U256::from(tx.max_fee_per_blob_gas)); - tx_env.authorization_list = None; - } - Transaction::Eip7702(tx) => { - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::from(tx.max_fee_per_gas); - tx_env.gas_priority_fee = Some(U256::from(tx.max_priority_fee_per_gas)); - tx_env.transact_to = tx.to.into(); - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = Some(tx.chain_id); - tx_env.nonce = Some(tx.nonce); - tx_env.access_list.clone_from(&tx.access_list.0); - tx_env.blob_hashes.clear(); - tx_env.max_fee_per_blob_gas.take(); - tx_env.authorization_list = - Some(AuthorizationList::Signed(tx.authorization_list.clone())); - } - #[cfg(feature = "optimism")] - Transaction::Deposit(tx) => { - tx_env.access_list.clear(); - tx_env.gas_limit = tx.gas_limit; - tx_env.gas_price = U256::ZERO; - tx_env.gas_priority_fee = None; - tx_env.transact_to = tx.to; - tx_env.value = tx.value; - tx_env.data = tx.input.clone(); - tx_env.chain_id = None; - tx_env.nonce = None; - tx_env.authorization_list = None; - - tx_env.optimism = revm_primitives::OptimismFields { - source_hash: Some(tx.source_hash), - mint: tx.mint, - is_system_transaction: Some(tx.is_system_transaction), - enveloped_tx: Some(envelope.into()), - }; - return; - } - } - - #[cfg(feature = "optimism")] - if !self.is_deposit() { - tx_env.optimism = revm_primitives::OptimismFields { - source_hash: None, - mint: None, - is_system_transaction: Some(false), - enveloped_tx: Some(envelope.into()), - } - } - } -} - -impl InMemorySize for TransactionSigned { - /// Calculate a heuristic for the in-memory size of the [`TransactionSigned`]. - #[inline] - fn size(&self) -> usize { - self.hash().size() + self.transaction.size() + self.signature().size() - } -} - -impl alloy_consensus::Transaction for TransactionSigned { - fn chain_id(&self) -> Option { - self.deref().chain_id() - } - - fn nonce(&self) -> u64 { - self.deref().nonce() - } - - fn gas_limit(&self) -> u64 { - self.deref().gas_limit() - } - - fn gas_price(&self) -> Option { - self.deref().gas_price() - } - - fn max_fee_per_gas(&self) -> u128 { - self.deref().max_fee_per_gas() - } - - fn max_priority_fee_per_gas(&self) -> Option { - self.deref().max_priority_fee_per_gas() - } - - fn max_fee_per_blob_gas(&self) -> Option { - self.deref().max_fee_per_blob_gas() - } - - fn priority_fee_or_price(&self) -> u128 { - self.deref().priority_fee_or_price() - } - - fn effective_gas_price(&self, base_fee: Option) -> u128 { - self.deref().effective_gas_price(base_fee) - } - - fn is_dynamic_fee(&self) -> bool { - self.deref().is_dynamic_fee() - } - - fn kind(&self) -> TxKind { - self.deref().kind() - } - - fn is_create(&self) -> bool { - self.deref().is_create() - } - - fn value(&self) -> U256 { - self.deref().value() - } - - fn input(&self) -> &Bytes { - self.deref().input() - } - - fn access_list(&self) -> Option<&AccessList> { - self.deref().access_list() - } - - fn blob_versioned_hashes(&self) -> Option<&[B256]> { - alloy_consensus::Transaction::blob_versioned_hashes(self.deref()) - } - - fn authorization_list(&self) -> Option<&[SignedAuthorization]> { - self.deref().authorization_list() - } -} - -impl From> for TransactionSigned { - fn from(recovered: RecoveredTx) -> Self { - recovered.into_tx() - } -} - -impl From> for TransactionSigned { - fn from(recovered: RecoveredTx) -> Self { - recovered.into_tx().into() - } -} - -impl TryFrom for PooledTransaction { - type Error = TransactionConversionError; - - fn try_from(tx: TransactionSigned) -> Result { - tx.try_into_pooled().map_err(|_| TransactionConversionError::UnsupportedForP2P) - } -} - -impl From for TransactionSigned { - fn from(tx: PooledTransaction) -> Self { - match tx { - PooledTransaction::Legacy(signed) => signed.into(), - PooledTransaction::Eip2930(signed) => signed.into(), - PooledTransaction::Eip1559(signed) => signed.into(), - PooledTransaction::Eip4844(signed) => signed.into(), - PooledTransaction::Eip7702(signed) => signed.into(), - } - } -} - -impl Encodable for TransactionSigned { - /// This encodes the transaction _with_ the signature, and an rlp header. - /// - /// For legacy transactions, it encodes the transaction data: - /// `rlp(tx-data)` - /// - /// For EIP-2718 typed transactions, it encodes the transaction type followed by the rlp of the - /// transaction: - /// `rlp(tx-type || rlp(tx-data))` - fn encode(&self, out: &mut dyn bytes::BufMut) { - self.network_encode(out); - } - - fn length(&self) -> usize { - let mut payload_length = self.encode_2718_len(); - if !Encodable2718::is_legacy(self) { - payload_length += Header { list: false, payload_length }.length(); - } - - payload_length - } -} - -impl Decodable for TransactionSigned { - /// This `Decodable` implementation only supports decoding rlp encoded transactions as it's used - /// by p2p. - /// - /// The p2p encoding format always includes an RLP header, although the type RLP header depends - /// on whether or not the transaction is a legacy transaction. - /// - /// If the transaction is a legacy transaction, it is just encoded as a RLP list: - /// `rlp(tx-data)`. - /// - /// If the transaction is a typed transaction, it is encoded as a RLP string: - /// `rlp(tx-type || rlp(tx-data))` - /// - /// This can be used for decoding all signed transactions in p2p `BlockBodies` responses. - /// - /// This cannot be used for decoding EIP-4844 transactions in p2p `PooledTransactions`, since - /// the EIP-4844 variant of [`TransactionSigned`] does not include the blob sidecar. - /// - /// For a method suitable for decoding pooled transactions, see [`PooledTransaction`]. - /// - /// CAUTION: Due to a quirk in [`Header::decode`], this method will succeed even if a typed - /// transaction is encoded in this format, and does not start with a RLP header: - /// `tx-type || rlp(tx-data)`. - /// - /// This is because [`Header::decode`] does not advance the buffer, and returns a length-1 - /// string header if the first byte is less than `0xf7`. - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - Self::network_decode(buf).map_err(Into::into) - } -} - -impl Encodable2718 for TransactionSigned { - fn type_flag(&self) -> Option { - match self.transaction.tx_type() { - TxType::Legacy => None, - tx_type => Some(tx_type as u8), - } - } - - fn encode_2718_len(&self) -> usize { - match &self.transaction { - Transaction::Legacy(legacy_tx) => legacy_tx.eip2718_encoded_length(&self.signature), - Transaction::Eip2930(access_list_tx) => { - access_list_tx.eip2718_encoded_length(&self.signature) - } - Transaction::Eip1559(dynamic_fee_tx) => { - dynamic_fee_tx.eip2718_encoded_length(&self.signature) - } - Transaction::Eip4844(blob_tx) => blob_tx.eip2718_encoded_length(&self.signature), - Transaction::Eip7702(set_code_tx) => { - set_code_tx.eip2718_encoded_length(&self.signature) - } - #[cfg(feature = "optimism")] - Transaction::Deposit(deposit_tx) => deposit_tx.eip2718_encoded_length(), - } - } - - fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { - self.transaction.eip2718_encode(&self.signature, out) - } - - fn trie_hash(&self) -> B256 { - self.hash() - } -} - -impl Decodable2718 for TransactionSigned { - fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { - match ty.try_into().map_err(|_| Eip2718Error::UnexpectedType(ty))? { - TxType::Legacy => Err(Eip2718Error::UnexpectedType(0)), - TxType::Eip2930 => { - let (tx, signature) = TxEip2930::rlp_decode_with_signature(buf)?; - Ok(Self { - transaction: Transaction::Eip2930(tx), - signature, - hash: Default::default(), - }) - } - TxType::Eip1559 => { - let (tx, signature) = TxEip1559::rlp_decode_with_signature(buf)?; - Ok(Self { - transaction: Transaction::Eip1559(tx), - signature, - hash: Default::default(), - }) - } - TxType::Eip7702 => { - let (tx, signature) = TxEip7702::rlp_decode_with_signature(buf)?; - Ok(Self { - transaction: Transaction::Eip7702(tx), - signature, - hash: Default::default(), - }) - } - TxType::Eip4844 => { - let (tx, signature) = TxEip4844::rlp_decode_with_signature(buf)?; - Ok(Self { - transaction: Transaction::Eip4844(tx), - signature, - hash: Default::default(), - }) - } - #[cfg(feature = "optimism")] - TxType::Deposit => Ok(Self::new_unhashed( - Transaction::Deposit(TxDeposit::rlp_decode(buf)?), - TxDeposit::signature(), - )), - } - } - - fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { - let (tx, signature) = TxLegacy::rlp_decode_with_signature(buf)?; - Ok(Self { transaction: Transaction::Legacy(tx), signature, hash: Default::default() }) - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for TransactionSigned { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - let start = buf.as_mut().len(); - - // Placeholder for bitflags. - // The first byte uses 4 bits as flags: IsCompressed[1bit], TxType[2bits], Signature[1bit] - buf.put_u8(0); - - let sig_bit = self.signature.to_compact(buf) as u8; - let zstd_bit = self.transaction.input().len() >= 32; - - let tx_bits = if zstd_bit { - let mut tmp = Vec::with_capacity(256); - if cfg!(feature = "std") { - reth_zstd_compressors::TRANSACTION_COMPRESSOR.with(|compressor| { - let mut compressor = compressor.borrow_mut(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - }) - } else { - let mut compressor = reth_zstd_compressors::create_tx_compressor(); - let tx_bits = self.transaction.to_compact(&mut tmp); - buf.put_slice(&compressor.compress(&tmp).expect("Failed to compress")); - tx_bits as u8 - } - } else { - self.transaction.to_compact(buf) as u8 - }; - - // Replace bitflags with the actual values - buf.as_mut()[start] = sig_bit | (tx_bits << 1) | ((zstd_bit as u8) << 3); - - buf.as_mut().len() - start - } - - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { - use bytes::Buf; - - // The first byte uses 4 bits as flags: IsCompressed[1], TxType[2], Signature[1] - let bitflags = buf.get_u8() as usize; - - let sig_bit = bitflags & 1; - let (signature, buf) = Signature::from_compact(buf, sig_bit); - - let zstd_bit = bitflags >> 3; - let (transaction, buf) = if zstd_bit != 0 { - if cfg!(feature = "std") { - reth_zstd_compressors::TRANSACTION_DECOMPRESSOR.with(|decompressor| { - let mut decompressor = decompressor.borrow_mut(); - - // TODO: enforce that zstd is only present at a "top" level type - - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = - Transaction::from_compact(decompressor.decompress(buf), transaction_type); - - (transaction, buf) - }) - } else { - let mut decompressor = reth_zstd_compressors::create_tx_decompressor(); - let transaction_type = (bitflags & 0b110) >> 1; - let (transaction, _) = - Transaction::from_compact(decompressor.decompress(buf), transaction_type); - - (transaction, buf) - } - } else { - let transaction_type = bitflags >> 1; - Transaction::from_compact(buf, transaction_type) - }; - - (Self { signature, transaction, hash: Default::default() }, buf) - } -} - -macro_rules! impl_from_signed { - ($($tx:ident),*) => { - $( - impl From> for TransactionSigned { - fn from(value: Signed<$tx>) -> Self { - let(tx,sig,hash) = value.into_parts(); - Self::new(tx.into(), sig, hash) - } - } - )* - }; -} - -impl_from_signed!(TxLegacy, TxEip2930, TxEip1559, TxEip7702, TxEip4844, TypedTransaction); - -impl From> for TransactionSigned { - fn from(value: Signed) -> Self { - let (tx, sig, hash) = value.into_parts(); - Self::new(tx, sig, hash) - } -} - -impl From> for TransactionSigned { - fn from(value: Signed) -> Self { - let (tx, sig, hash) = value.into_parts(); - Self::new(tx.tx.into(), sig, hash) - } -} - -impl From> for TransactionSigned { - fn from(value: Signed) -> Self { - let (tx, sig, hash) = value.into_parts(); - Self::new(tx.into(), sig, hash) - } -} - -impl From for TransactionSigned { - fn from(value: TxEnvelope) -> Self { - match value { - TxEnvelope::Legacy(tx) => tx.into(), - TxEnvelope::Eip2930(tx) => tx.into(), - TxEnvelope::Eip1559(tx) => tx.into(), - TxEnvelope::Eip4844(tx) => tx.into(), - TxEnvelope::Eip7702(tx) => tx.into(), - } - } -} - -impl From for Signed { - fn from(value: TransactionSigned) -> Self { - let (tx, sig, hash) = value.into_parts(); - Self::new_unchecked(tx, sig, hash) - } -} - -#[cfg(any(test, feature = "arbitrary"))] -impl<'a> arbitrary::Arbitrary<'a> for TransactionSigned { - fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { - #[allow(unused_mut)] - let mut transaction = Transaction::arbitrary(u)?; - - let secp = secp256k1::Secp256k1::new(); - let key_pair = secp256k1::Keypair::new(&secp, &mut rand::thread_rng()); - let signature = reth_primitives_traits::crypto::secp256k1::sign_message( - B256::from_slice(&key_pair.secret_bytes()[..]), - transaction.signature_hash(), - ) - .unwrap(); - - #[cfg(feature = "optimism")] - // Both `Some(0)` and `None` values are encoded as empty string byte. This introduces - // ambiguity in roundtrip tests. Patch the mint value of deposit transaction here, so that - // it's `None` if zero. - if let Transaction::Deposit(ref mut tx_deposit) = transaction { - if tx_deposit.mint == Some(0) { - tx_deposit.mint = None; - } - } - - #[cfg(feature = "optimism")] - let signature = if transaction.is_deposit() { TxDeposit::signature() } else { signature }; - Ok(Self::new_unhashed(transaction, signature)) - } -} +pub use reth_ethereum_primitives::{Transaction, TransactionSigned}; /// Type alias kept for backward compatibility. pub type TransactionSignedEcRecovered = RecoveredTx; -/// Bincode-compatible transaction type serde implementations. -#[cfg(feature = "serde-bincode-compat")] -pub mod serde_bincode_compat { - use alloc::borrow::Cow; - use alloy_consensus::{ - transaction::serde_bincode_compat::{TxEip1559, TxEip2930, TxEip7702, TxLegacy}, - TxEip4844, - }; - use alloy_primitives::{PrimitiveSignature as Signature, TxHash}; - use reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; - use serde_with::{DeserializeAs, SerializeAs}; - - /// Bincode-compatible [`super::Transaction`] serde implementation. - /// - /// Intended to use with the [`serde_with::serde_as`] macro in the following way: - /// ```rust - /// use reth_primitives::{serde_bincode_compat, Transaction}; - /// use serde::{Deserialize, Serialize}; - /// use serde_with::serde_as; - /// - /// #[serde_as] - /// #[derive(Serialize, Deserialize)] - /// struct Data { - /// #[serde_as(as = "serde_bincode_compat::transaction::Transaction")] - /// transaction: Transaction, - /// } - /// ``` - #[derive(Debug, Serialize, Deserialize)] - #[allow(missing_docs)] - pub enum Transaction<'a> { - Legacy(TxLegacy<'a>), - Eip2930(TxEip2930<'a>), - Eip1559(TxEip1559<'a>), - Eip4844(Cow<'a, TxEip4844>), - Eip7702(TxEip7702<'a>), - #[cfg(feature = "optimism")] - Deposit(op_alloy_consensus::serde_bincode_compat::TxDeposit<'a>), - } - - impl<'a> From<&'a super::Transaction> for Transaction<'a> { - fn from(value: &'a super::Transaction) -> Self { - match value { - super::Transaction::Legacy(tx) => Self::Legacy(TxLegacy::from(tx)), - super::Transaction::Eip2930(tx) => Self::Eip2930(TxEip2930::from(tx)), - super::Transaction::Eip1559(tx) => Self::Eip1559(TxEip1559::from(tx)), - super::Transaction::Eip4844(tx) => Self::Eip4844(Cow::Borrowed(tx)), - super::Transaction::Eip7702(tx) => Self::Eip7702(TxEip7702::from(tx)), - #[cfg(feature = "optimism")] - super::Transaction::Deposit(tx) => { - Self::Deposit(op_alloy_consensus::serde_bincode_compat::TxDeposit::from(tx)) - } - } - } - } - - impl<'a> From> for super::Transaction { - fn from(value: Transaction<'a>) -> Self { - match value { - Transaction::Legacy(tx) => Self::Legacy(tx.into()), - Transaction::Eip2930(tx) => Self::Eip2930(tx.into()), - Transaction::Eip1559(tx) => Self::Eip1559(tx.into()), - Transaction::Eip4844(tx) => Self::Eip4844(tx.into_owned()), - Transaction::Eip7702(tx) => Self::Eip7702(tx.into()), - #[cfg(feature = "optimism")] - Transaction::Deposit(tx) => Self::Deposit(tx.into()), - } - } - } - - impl SerializeAs for Transaction<'_> { - fn serialize_as(source: &super::Transaction, serializer: S) -> Result - where - S: Serializer, - { - Transaction::from(source).serialize(serializer) - } - } - - impl<'de> DeserializeAs<'de, super::Transaction> for Transaction<'de> { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - Transaction::deserialize(deserializer).map(Into::into) - } - } - - /// Bincode-compatible [`super::TransactionSigned`] serde implementation. - /// - /// Intended to use with the [`serde_with::serde_as`] macro in the following way: - /// ```rust - /// use reth_primitives::{serde_bincode_compat, TransactionSigned}; - /// use serde::{Deserialize, Serialize}; - /// use serde_with::serde_as; - /// - /// #[serde_as] - /// #[derive(Serialize, Deserialize)] - /// struct Data { - /// #[serde_as(as = "serde_bincode_compat::transaction::TransactionSigned")] - /// transaction: TransactionSigned, - /// } - /// ``` - #[derive(Debug, Serialize, Deserialize)] - pub struct TransactionSigned<'a> { - hash: TxHash, - signature: Signature, - transaction: Transaction<'a>, - } - - impl<'a> From<&'a super::TransactionSigned> for TransactionSigned<'a> { - fn from(value: &'a super::TransactionSigned) -> Self { - Self { - hash: value.hash(), - signature: value.signature, - transaction: Transaction::from(&value.transaction), - } - } - } - - impl<'a> From> for super::TransactionSigned { - fn from(value: TransactionSigned<'a>) -> Self { - Self { - hash: value.hash.into(), - signature: value.signature, - transaction: value.transaction.into(), - } - } - } - - impl SerializeAs for TransactionSigned<'_> { - fn serialize_as( - source: &super::TransactionSigned, - serializer: S, - ) -> Result - where - S: Serializer, - { - TransactionSigned::from(source).serialize(serializer) - } - } - - impl<'de> DeserializeAs<'de, super::TransactionSigned> for TransactionSigned<'de> { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - TransactionSigned::deserialize(deserializer).map(Into::into) - } - } - - impl SerdeBincodeCompat for super::TransactionSigned { - type BincodeRepr<'a> = TransactionSigned<'a>; - } - - #[cfg(test)] - mod tests { - use super::super::{serde_bincode_compat, Transaction, TransactionSigned}; - use arbitrary::Arbitrary; - use rand::Rng; - use reth_testing_utils::generators; - use serde::{Deserialize, Serialize}; - use serde_with::serde_as; - - #[test] - fn test_transaction_bincode_roundtrip() { - #[serde_as] - #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] - struct Data { - #[serde_as(as = "serde_bincode_compat::Transaction")] - transaction: Transaction, - } - - let mut bytes = [0u8; 1024]; - generators::rng().fill(bytes.as_mut_slice()); - let data = Data { - transaction: Transaction::arbitrary(&mut arbitrary::Unstructured::new(&bytes)) - .unwrap(), - }; - - let encoded = bincode::serialize(&data).unwrap(); - let decoded: Data = bincode::deserialize(&encoded).unwrap(); - assert_eq!(decoded, data); - } - - #[test] - fn test_transaction_signed_bincode_roundtrip() { - #[serde_as] - #[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] - struct Data { - #[serde_as(as = "serde_bincode_compat::TransactionSigned")] - transaction: TransactionSigned, - } - - let mut bytes = [0u8; 1024]; - generators::rng().fill(bytes.as_mut_slice()); - let data = Data { - transaction: TransactionSigned::arbitrary(&mut arbitrary::Unstructured::new( - &bytes, - )) - .unwrap(), - }; - - let encoded = bincode::serialize(&data).unwrap(); - let decoded: Data = bincode::deserialize(&encoded).unwrap(); - assert_eq!(decoded, data); - } - } -} - #[cfg(test)] mod tests { - use crate::{ - transaction::{TxEip1559, TxKind, TxLegacy}, - Transaction, TransactionSigned, - }; - use alloy_consensus::Transaction as _; + use crate::{Transaction, TransactionSigned}; + use alloy_consensus::{Transaction as _, TxEip1559, TxLegacy}; use alloy_eips::eip2718::{Decodable2718, Encodable2718}; use alloy_primitives::{ - address, b256, bytes, hex, Address, Bytes, PrimitiveSignature as Signature, B256, U256, + address, b256, bytes, hex, Address, Bytes, PrimitiveSignature as Signature, TxKind, B256, + U256, }; use alloy_rlp::{Decodable, Encodable, Error as RlpError}; use reth_chainspec::MIN_TRANSACTION_GAS; @@ -1782,7 +97,7 @@ mod tests { // https://sepolia.etherscan.io/getRawTx?tx=0x9a22ccb0029bc8b0ddd073be1a1d923b7ae2b2ea52100bae0db4424f9107e9c0 let raw_tx = alloy_primitives::hex::decode("0x03f9011d83aa36a7820fa28477359400852e90edd0008252089411e9ca82a3a762b4b5bd264d4173a242e7a770648080c08504a817c800f8a5a0012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921aa00152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4a0013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7a001148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1a0011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e654901a0c8de4cced43169f9aa3d36506363b2d2c44f6c49fc1fd91ea114c86f3757077ea01e11fdd0d1934eda0492606ee0bb80a7bf8f35cc5f86ec60fe5031ba48bfd544").unwrap(); let decoded = TransactionSigned::decode_2718(&mut raw_tx.as_slice()).unwrap(); - assert!(decoded.is_eip4844()); + assert!(alloy_consensus::Typed2718::is_eip4844(&decoded)); let from = decoded.recover_signer(); assert_eq!(from, Some(address!("A83C816D4f9b2783761a22BA6FADB0eB0606D7B2"))); @@ -1926,7 +241,7 @@ mod tests { ) { let expected = TransactionSigned::new_unhashed(transaction, signature); if let Some(hash) = hash { - assert_eq!(hash, expected.hash()); + assert_eq!(hash, *expected.tx_hash()); } assert_eq!(bytes.len(), expected.length()); @@ -1947,7 +262,7 @@ mod tests { let mut pointer = raw.as_ref(); let tx = TransactionSigned::decode(&mut pointer).unwrap(); - assert_eq!(tx.hash(), hash, "Expected same hash"); + assert_eq!(*tx.tx_hash(), hash, "Expected same hash"); assert_eq!(tx.recover_signer(), Some(signer), "Recovering signer should pass."); } diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index 183d0af07ee3e..4e483f1d0c146 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -12,10 +12,9 @@ mod tests { use super::*; use alloy_consensus::{transaction::RlpEcdsaTx, Transaction as _, TxLegacy}; use alloy_eips::eip2718::Decodable2718; - use alloy_primitives::{address, hex}; + use alloy_primitives::{address, hex, Bytes}; use alloy_rlp::Decodable; use assert_matches::assert_matches; - use bytes::Bytes; #[test] fn invalid_legacy_pooled_decoding_input_too_short() { diff --git a/crates/primitives/src/transaction/tx_type.rs b/crates/primitives/src/transaction/tx_type.rs index cbd7f1c1e3170..d8e23f9792000 100644 --- a/crates/primitives/src/transaction/tx_type.rs +++ b/crates/primitives/src/transaction/tx_type.rs @@ -1,16 +1,3 @@ -use alloy_consensus::{ - constants::{ - EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, - LEGACY_TX_TYPE_ID, - }, - Typed2718, -}; -use alloy_primitives::{U64, U8}; -use alloy_rlp::{Decodable, Encodable}; -use derive_more::Display; -use reth_primitives_traits::InMemorySize; -use serde::{Deserialize, Serialize}; - /// Transaction Type /// /// Currently being used as 2-bit type when encoding it to `reth_codecs::Compact` on @@ -18,292 +5,4 @@ use serde::{Deserialize, Serialize}; /// database format. /// /// Other required changes when adding a new type can be seen on [PR#3953](https://github.com/paradigmxyz/reth/pull/3953/files). -#[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Default, - Serialize, - Deserialize, - Hash, - Display, -)] -#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] -#[cfg_attr(any(test, feature = "reth-codec"), reth_codecs::add_arbitrary_tests(compact))] -#[display("tx type: {_variant}")] -pub enum TxType { - /// Legacy transaction pre EIP-2929 - #[default] - #[display("legacy (0)")] - Legacy = 0_isize, - /// AccessList transaction - #[display("eip2930 (1)")] - Eip2930 = 1_isize, - /// Transaction with Priority fee - #[display("eip1559 (2)")] - Eip1559 = 2_isize, - /// Shard Blob Transactions - EIP-4844 - #[display("eip4844 (3)")] - Eip4844 = 3_isize, - /// EOA Contract Code Transactions - EIP-7702 - #[display("eip7702 (4)")] - Eip7702 = 4_isize, - /// Optimism Deposit transaction. - #[cfg(feature = "optimism")] - #[display("deposit (126)")] - Deposit = 126_isize, -} - -impl TxType { - /// The max type reserved by an EIP. - pub const MAX_RESERVED_EIP: Self = Self::Eip7702; - - /// Check if the transaction type has an access list. - pub const fn has_access_list(&self) -> bool { - match self { - Self::Legacy => false, - Self::Eip2930 | Self::Eip1559 | Self::Eip4844 | Self::Eip7702 => true, - #[cfg(feature = "optimism")] - Self::Deposit => false, - } - } -} - -impl Typed2718 for TxType { - fn ty(&self) -> u8 { - (*self).into() - } -} - -impl InMemorySize for TxType { - /// Calculates a heuristic for the in-memory size of the [`TxType`]. - #[inline] - fn size(&self) -> usize { - core::mem::size_of::() - } -} - -impl From for u8 { - fn from(value: TxType) -> Self { - match value { - TxType::Legacy => LEGACY_TX_TYPE_ID, - TxType::Eip2930 => EIP2930_TX_TYPE_ID, - TxType::Eip1559 => EIP1559_TX_TYPE_ID, - TxType::Eip4844 => EIP4844_TX_TYPE_ID, - TxType::Eip7702 => EIP7702_TX_TYPE_ID, - #[cfg(feature = "optimism")] - TxType::Deposit => op_alloy_consensus::DEPOSIT_TX_TYPE_ID, - } - } -} - -impl From for U8 { - fn from(value: TxType) -> Self { - Self::from(u8::from(value)) - } -} - -impl TryFrom for TxType { - type Error = &'static str; - - fn try_from(value: u8) -> Result { - #[cfg(feature = "optimism")] - if value == Self::Deposit { - return Ok(Self::Deposit) - } - - if value == Self::Legacy { - return Ok(Self::Legacy) - } else if value == Self::Eip2930 { - return Ok(Self::Eip2930) - } else if value == Self::Eip1559 { - return Ok(Self::Eip1559) - } else if value == Self::Eip4844 { - return Ok(Self::Eip4844) - } else if value == Self::Eip7702 { - return Ok(Self::Eip7702) - } - - Err("invalid tx type") - } -} - -impl TryFrom for TxType { - type Error = &'static str; - - fn try_from(value: u64) -> Result { - let value: u8 = value.try_into().map_err(|_| "invalid tx type")?; - Self::try_from(value) - } -} - -impl TryFrom for TxType { - type Error = &'static str; - - fn try_from(value: U64) -> Result { - value.to::().try_into() - } -} - -#[cfg(any(test, feature = "reth-codec"))] -impl reth_codecs::Compact for TxType { - fn to_compact(&self, buf: &mut B) -> usize - where - B: bytes::BufMut + AsMut<[u8]>, - { - use reth_codecs::txtype::*; - - match self { - Self::Legacy => COMPACT_IDENTIFIER_LEGACY, - Self::Eip2930 => COMPACT_IDENTIFIER_EIP2930, - Self::Eip1559 => COMPACT_IDENTIFIER_EIP1559, - Self::Eip4844 => { - buf.put_u8(EIP4844_TX_TYPE_ID); - COMPACT_EXTENDED_IDENTIFIER_FLAG - } - Self::Eip7702 => { - buf.put_u8(EIP7702_TX_TYPE_ID); - COMPACT_EXTENDED_IDENTIFIER_FLAG - } - #[cfg(feature = "optimism")] - Self::Deposit => { - buf.put_u8(op_alloy_consensus::DEPOSIT_TX_TYPE_ID); - COMPACT_EXTENDED_IDENTIFIER_FLAG - } - } - } - - // For backwards compatibility purposes only 2 bits of the type are encoded in the identifier - // parameter. In the case of a [`COMPACT_EXTENDED_IDENTIFIER_FLAG`], the full transaction type - // is read from the buffer as a single byte. - fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { - use bytes::Buf; - ( - match identifier { - reth_codecs::txtype::COMPACT_IDENTIFIER_LEGACY => Self::Legacy, - reth_codecs::txtype::COMPACT_IDENTIFIER_EIP2930 => Self::Eip2930, - reth_codecs::txtype::COMPACT_IDENTIFIER_EIP1559 => Self::Eip1559, - reth_codecs::txtype::COMPACT_EXTENDED_IDENTIFIER_FLAG => { - let extended_identifier = buf.get_u8(); - match extended_identifier { - EIP4844_TX_TYPE_ID => Self::Eip4844, - EIP7702_TX_TYPE_ID => Self::Eip7702, - #[cfg(feature = "optimism")] - op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, - _ => panic!("Unsupported TxType identifier: {extended_identifier}"), - } - } - _ => panic!("Unknown identifier for TxType: {identifier}"), - }, - buf, - ) - } -} - -impl PartialEq for TxType { - fn eq(&self, other: &u8) -> bool { - *self as u8 == *other - } -} - -impl PartialEq for u8 { - fn eq(&self, other: &TxType) -> bool { - *self == *other as Self - } -} - -impl Encodable for TxType { - fn encode(&self, out: &mut dyn bytes::BufMut) { - (*self as u8).encode(out); - } - - fn length(&self) -> usize { - 1 - } -} - -impl Decodable for TxType { - fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { - let ty = u8::decode(buf)?; - - Self::try_from(ty).map_err(alloy_rlp::Error::Custom) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::hex; - use reth_codecs::{txtype::*, Compact}; - use rstest::rstest; - - #[rstest] - #[case(U64::from(LEGACY_TX_TYPE_ID), Ok(TxType::Legacy))] - #[case(U64::from(EIP2930_TX_TYPE_ID), Ok(TxType::Eip2930))] - #[case(U64::from(EIP1559_TX_TYPE_ID), Ok(TxType::Eip1559))] - #[case(U64::from(EIP4844_TX_TYPE_ID), Ok(TxType::Eip4844))] - #[case(U64::from(EIP7702_TX_TYPE_ID), Ok(TxType::Eip7702))] - #[cfg_attr( - feature = "optimism", - case(U64::from(op_alloy_consensus::DEPOSIT_TX_TYPE_ID), Ok(TxType::Deposit)) - )] - #[case(U64::MAX, Err("invalid tx type"))] - fn test_u64_to_tx_type(#[case] input: U64, #[case] expected: Result) { - let tx_type_result = TxType::try_from(input); - assert_eq!(tx_type_result, expected); - } - - #[rstest] - #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] - #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] - #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] - #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] - #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] - fn test_txtype_to_compact( - #[case] tx_type: TxType, - #[case] expected_identifier: usize, - #[case] expected_buf: Vec, - ) { - let mut buf = vec![]; - let identifier = tx_type.to_compact(&mut buf); - - assert_eq!(identifier, expected_identifier, "Unexpected identifier for TxType {tx_type:?}",); - assert_eq!(buf, expected_buf, "Unexpected buffer for TxType {tx_type:?}",); - } - - #[rstest] - #[case(TxType::Legacy, COMPACT_IDENTIFIER_LEGACY, vec![])] - #[case(TxType::Eip2930, COMPACT_IDENTIFIER_EIP2930, vec![])] - #[case(TxType::Eip1559, COMPACT_IDENTIFIER_EIP1559, vec![])] - #[case(TxType::Eip4844, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP4844_TX_TYPE_ID])] - #[case(TxType::Eip7702, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![EIP7702_TX_TYPE_ID])] - #[cfg_attr(feature = "optimism", case(TxType::Deposit, COMPACT_EXTENDED_IDENTIFIER_FLAG, vec![op_alloy_consensus::DEPOSIT_TX_TYPE_ID]))] - fn test_txtype_from_compact( - #[case] expected_type: TxType, - #[case] identifier: usize, - #[case] buf: Vec, - ) { - let (actual_type, remaining_buf) = TxType::from_compact(&buf, identifier); - - assert_eq!(actual_type, expected_type, "Unexpected TxType for identifier {identifier}"); - assert!(remaining_buf.is_empty(), "Buffer not fully consumed for identifier {identifier}"); - } - - #[rstest] - #[case(&hex!("80"), Ok(TxType::Legacy))] - #[case(&[EIP2930_TX_TYPE_ID], Ok(TxType::Eip2930))] - #[case(&[EIP1559_TX_TYPE_ID], Ok(TxType::Eip1559))] - #[case(&[EIP4844_TX_TYPE_ID], Ok(TxType::Eip4844))] - #[case(&[EIP7702_TX_TYPE_ID], Ok(TxType::Eip7702))] - #[case(&[u8::MAX], Err(alloy_rlp::Error::InputTooShort))] - #[cfg_attr(feature = "optimism", case(&[op_alloy_consensus::DEPOSIT_TX_TYPE_ID], Ok(TxType::Deposit)))] - fn decode_tx_type(#[case] input: &[u8], #[case] expected: Result) { - let tx_type_result = TxType::decode(&mut &input[..]); - assert_eq!(tx_type_result, expected) - } -} +pub use alloy_consensus::TxType; diff --git a/crates/prune/prune/src/segments/user/transaction_lookup.rs b/crates/prune/prune/src/segments/user/transaction_lookup.rs index 2629c217f0d0d..73ba14236500d 100644 --- a/crates/prune/prune/src/segments/user/transaction_lookup.rs +++ b/crates/prune/prune/src/segments/user/transaction_lookup.rs @@ -117,6 +117,7 @@ mod tests { Itertools, }; use reth_db::tables; + use reth_primitives_traits::SignedTransaction; use reth_provider::{DatabaseProviderFactory, PruneCheckpointReader}; use reth_prune_types::{ PruneCheckpoint, PruneInterruptReason, PruneMode, PruneProgress, PruneSegment, @@ -141,7 +142,7 @@ mod tests { for block in &blocks { tx_hash_numbers.reserve_exact(block.transaction_count()); for transaction in &block.body().transactions { - tx_hash_numbers.push((transaction.hash(), tx_hash_numbers.len() as u64)); + tx_hash_numbers.push((*transaction.tx_hash(), tx_hash_numbers.len() as u64)); } } let tx_hash_numbers_len = tx_hash_numbers.len(); diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index efcb040fb3d7f..59637ce81b0e0 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -37,7 +37,7 @@ use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_tasks::pool::BlockingTaskGuard; use revm::{ db::{CacheDB, State}, - primitives::{db::DatabaseCommit, Env}, + primitives::db::DatabaseCommit, }; use revm_inspectors::tracing::{ FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, TransactionContext, @@ -450,7 +450,7 @@ where tx_env.clone(), &mut inspector, )?; - let env = Env::boxed( + let env = revm_primitives::Env::boxed( evm_env.cfg_env_with_handler_cfg.cfg_env, evm_env.block_env, tx_env, @@ -785,7 +785,7 @@ where self.eth_api().inspect(&mut *db, evm_env, tx_env, &mut inspector)?; let state = res.state.clone(); - let env = Env::boxed( + let env = revm_primitives::Env::boxed( evm_env.cfg_env_with_handler_cfg.cfg_env, evm_env.block_env, tx_env, diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index e44c5634d10bd..be431f603ab2c 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -1,6 +1,6 @@ //! `Eth` bundle implementation and helpers. -use alloy_consensus::{BlockHeader, Transaction as _}; +use alloy_consensus::{BlockHeader, EnvKzgSettings, Transaction as _}; use alloy_eips::eip4844::MAX_DATA_GAS_PER_BLOCK; use alloy_primitives::{Keccak256, U256}; use alloy_rpc_types_mev::{EthCallBundle, EthCallBundleResponse, EthCallBundleTransactionResult}; @@ -23,7 +23,7 @@ use revm::{ db::{CacheDB, DatabaseCommit, DatabaseRef}, primitives::ResultAndState, }; -use revm_primitives::{EnvKzgSettings, SpecId}; +use revm_primitives::SpecId; use std::sync::Arc; /// `Eth` bundle implementation. diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index c7d77d4a92279..b40fff97f3438 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,6 +1,6 @@ //! Support for building a pending block with transactions from local view of mempool. -use alloy_consensus::{constants::EMPTY_WITHDRAWALS, Header, EMPTY_OMMER_ROOT_HASH}; +use alloy_consensus::{constants::EMPTY_WITHDRAWALS, Header, Transaction, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::{eip7685::EMPTY_REQUESTS_HASH, merge::BEACON_NONCE}; use alloy_primitives::U256; use reth_chainspec::{EthChainSpec, EthereumHardforks}; diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 48a6d0d8ab6c1..4678a3bdcf406 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -6,6 +6,7 @@ use alloy_primitives::PrimitiveSignature as Signature; use alloy_rpc_types::TransactionRequest; use alloy_rpc_types_eth::{Transaction, TransactionInfo}; use reth_primitives::{RecoveredTx, TransactionSigned}; +use reth_primitives_traits::SignedTransaction; use reth_rpc_eth_api::EthApiTypes; use reth_rpc_eth_types::EthApiError; use reth_rpc_types_compat::TransactionCompat; @@ -43,7 +44,7 @@ where tx_info: TransactionInfo, ) -> Result { let from = tx.signer(); - let hash = tx.hash(); + let hash = *tx.tx_hash(); let TransactionSigned { transaction, signature, .. } = tx.into_tx(); let inner: TxEnvelope = match transaction { diff --git a/crates/stages/stages/src/stages/hashing_storage.rs b/crates/stages/stages/src/stages/hashing_storage.rs index c9b959e2595d3..7967aa8542c6f 100644 --- a/crates/stages/stages/src/stages/hashing_storage.rs +++ b/crates/stages/stages/src/stages/hashing_storage.rs @@ -220,6 +220,7 @@ mod tests { models::StoredBlockBodyIndices, }; use reth_primitives::SealedBlock; + use reth_primitives_traits::SignedTransaction; use reth_provider::providers::StaticFileWriter; use reth_testing_utils::generators::{ self, random_block_range, random_contract_account_range, BlockRangeParams, @@ -356,7 +357,7 @@ mod tests { progress.body().transactions.iter().try_for_each( |transaction| -> Result<(), reth_db::DatabaseError> { tx.put::( - transaction.hash(), + *transaction.tx_hash(), next_tx_num, )?; tx.put::(next_tx_num, transaction.clone())?; diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 4e3f4a8776ed7..872af3baf9500 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -387,7 +387,7 @@ mod tests { for block in &blocks[..=max_processed_block] { for transaction in &block.body().transactions { if block.number > max_pruned_block { - tx_hash_numbers.push((transaction.hash(), tx_hash_number)); + tx_hash_numbers.push((*transaction.tx_hash(), tx_hash_number)); } tx_hash_number += 1; } @@ -552,7 +552,10 @@ mod tests { for tx_id in body.tx_num_range() { let transaction = provider.transaction_by_id(tx_id)?.expect("no transaction entry"); - assert_eq!(Some(tx_id), provider.transaction_id(transaction.hash())?); + assert_eq!( + Some(tx_id), + provider.transaction_id(*transaction.tx_hash())? + ); } } } diff --git a/crates/storage/codecs/src/alloy/transaction/optimism.rs b/crates/storage/codecs/src/alloy/transaction/optimism.rs index 1bef8c565ffc8..7b4cf570551c6 100644 --- a/crates/storage/codecs/src/alloy/transaction/optimism.rs +++ b/crates/storage/codecs/src/alloy/transaction/optimism.rs @@ -107,7 +107,7 @@ impl crate::Compact for OpTxType { match extended_identifier { EIP7702_TX_TYPE_ID => Self::Eip7702, op_alloy_consensus::DEPOSIT_TX_TYPE_ID => Self::Deposit, - _ => panic!("Unsupported TxType identifier: {extended_identifier}"), + _ => panic!("Unsupported OpTxType identifier: {extended_identifier}"), } } _ => panic!("Unknown identifier for TxType: {identifier}"), diff --git a/crates/storage/db-api/Cargo.toml b/crates/storage/db-api/Cargo.toml index 671b67d6e5cb6..b15b9473fd510 100644 --- a/crates/storage/db-api/Cargo.toml +++ b/crates/storage/db-api/Cargo.toml @@ -87,7 +87,6 @@ arbitrary = [ "reth-optimism-primitives?/arbitrary" ] optimism = [ - "reth-primitives/optimism", "reth-codecs/op", "reth-optimism-primitives?/optimism", "op", diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index b3d4610f2f935..3dc1bb138d89b 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -112,7 +112,7 @@ arbitrary = [ "reth-stages-types/arbitrary", "alloy-consensus/arbitrary", ] -optimism = ["reth-primitives/optimism", "reth-db-api/optimism"] +optimism = ["reth-db-api/optimism"] op = ["reth-db-api/op"] disable-lock = [] diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 0955821b42372..9405ffadac442 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -88,7 +88,6 @@ alloy-consensus.workspace = true [features] optimism = [ - "reth-primitives/optimism", "reth-execution-types/optimism", "reth-optimism-primitives", "reth-codecs/op", diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 8b8e486e1de54..75ad402b059cc 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -2402,7 +2402,7 @@ mod tests { let mut in_memory_blocks: std::collections::VecDeque<_> = in_memory_blocks.into(); $( - let tx_hash = |block: &SealedBlock| block.body().transactions[0].hash(); + let tx_hash = |block: &SealedBlock| *block.body().transactions[0].tx_hash(); let tx_num = |block: &SealedBlock| { database_blocks .iter() @@ -2726,7 +2726,7 @@ mod tests { // above, we do not see it. assert!(matches!( old_transaction_hash_fn( - to_be_persisted_tx.hash(), + *to_be_persisted_tx.tx_hash(), provider.canonical_in_memory_state(), provider.database.clone() ), @@ -2743,7 +2743,7 @@ mod tests { assert!(matches!( correct_transaction_hash_fn( - to_be_persisted_tx.hash(), + *to_be_persisted_tx.tx_hash(), provider.canonical_in_memory_state(), provider.database ), diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 4c781b304b16e..ab44da3ae10d3 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -708,7 +708,7 @@ mod tests { if sender == block.body().transactions[0].recover_signer().unwrap() ); assert_matches!( - provider.transaction_id(block.body().transactions[0].hash()), + provider.transaction_id(*block.body().transactions[0].tx_hash()), Ok(Some(0)) ); } @@ -726,7 +726,10 @@ mod tests { Ok(_) ); assert_matches!(provider.transaction_sender(0), Ok(None)); - assert_matches!(provider.transaction_id(block.body().transactions[0].hash()), Ok(None)); + assert_matches!( + provider.transaction_id(*block.body().transactions[0].tx_hash()), + Ok(None) + ); } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 8c3b6422e9a47..113c2c509057f 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -253,7 +253,7 @@ impl TransactionsProvider for MockEthProvider { let tx_number = lock .values() .flat_map(|block| &block.body.transactions) - .position(|tx| tx.hash() == tx_hash) + .position(|tx| *tx.tx_hash() == tx_hash) .map(|pos| pos as TxNumber); Ok(tx_number) @@ -280,7 +280,7 @@ impl TransactionsProvider for MockEthProvider { fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { Ok(self.blocks.lock().iter().find_map(|(_, block)| { - block.body.transactions.iter().find(|tx| tx.hash() == hash).cloned() + block.body.transactions.iter().find(|tx| *tx.tx_hash() == hash).cloned() })) } @@ -291,7 +291,7 @@ impl TransactionsProvider for MockEthProvider { let lock = self.blocks.lock(); for (block_hash, block) in lock.iter() { for (index, tx) in block.body.transactions.iter().enumerate() { - if tx.hash() == hash { + if *tx.tx_hash() == hash { let meta = TransactionMeta { tx_hash: hash, index: index as u64, diff --git a/crates/transaction-pool/src/test_utils/gen.rs b/crates/transaction-pool/src/test_utils/gen.rs index 3110997d692d6..feb50050c10d5 100644 --- a/crates/transaction-pool/src/test_utils/gen.rs +++ b/crates/transaction-pool/src/test_utils/gen.rs @@ -1,10 +1,12 @@ use crate::EthPooledTransaction; -use alloy_consensus::{TxEip1559, TxEip4844, TxLegacy}; +use alloy_consensus::{SignableTransaction, TxEip1559, TxEip4844, TxLegacy}; use alloy_eips::{eip1559::MIN_PROTOCOL_BASE_FEE, eip2718::Encodable2718, eip2930::AccessList}; use alloy_primitives::{Address, Bytes, TxKind, B256, U256}; use rand::Rng; use reth_chainspec::MAINNET; -use reth_primitives::{Transaction, TransactionSigned}; +use reth_primitives::{ + transaction::SignedTransactionIntoRecoveredExt, Transaction, TransactionSigned, +}; use reth_primitives_traits::crypto::secp256k1::sign_message; /// A generator for transactions for testing purposes. @@ -99,12 +101,12 @@ impl TransactionGenerator { /// Generates and returns a pooled EIP-1559 transaction with a random signer. pub fn gen_eip1559_pooled(&mut self) -> EthPooledTransaction { - self.gen_eip1559().into_ecrecovered().unwrap().try_into().unwrap() + self.gen_eip1559().try_into_ecrecovered().unwrap().try_into().unwrap() } /// Generates and returns a pooled EIP-4844 transaction with a random signer. pub fn gen_eip4844_pooled(&mut self) -> EthPooledTransaction { - let tx = self.gen_eip4844().into_ecrecovered().unwrap(); + let tx = self.gen_eip4844().try_into_ecrecovered().unwrap(); let encoded_length = tx.encode_2718_len(); EthPooledTransaction::new(tx, encoded_length) } diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 98fa167f014c3..1cd4445e8d1c9 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -32,7 +32,7 @@ use reth_primitives::{ transaction::{SignedTransactionIntoRecoveredExt, TryFromRecoveredTransactionError}, PooledTransaction, RecoveredTx, Transaction, TransactionSigned, TxType, }; -use reth_primitives_traits::InMemorySize; +use reth_primitives_traits::{InMemorySize, SignedTransaction}; use std::{ops::Range, sync::Arc, time::Instant, vec::IntoIter}; /// A transaction pool implementation using [`MockOrdering`] for transaction ordering. @@ -909,7 +909,7 @@ impl TryFrom> for MockTransaction { fn try_from(tx: RecoveredTx) -> Result { let sender = tx.signer(); let transaction = tx.into_tx(); - let hash = transaction.hash(); + let hash = *transaction.tx_hash(); let size = transaction.size(); #[allow(unreachable_patterns)] diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index f7e2f310e8bf5..38f3c7564b252 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -7,7 +7,7 @@ use crate::{ }; use alloy_consensus::{ constants::{EIP1559_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID}, - BlockHeader, Transaction as _, Typed2718, + BlockHeader, Signed, Transaction as _, Typed2718, }; use alloy_eips::{ eip2718::Encodable2718, @@ -1249,7 +1249,8 @@ impl From> for EthPooledTransaction { // include the blob sidecar let (tx, sig, hash) = tx.into_parts(); let (tx, blob) = tx.into_parts(); - let tx = TransactionSigned::new(tx.into(), sig, hash); + let tx = Signed::new_unchecked(tx, sig, hash); + let tx = TransactionSigned::from(tx); let tx = RecoveredTx::new_unchecked(tx, signer); let mut pooled = Self::new(tx, encoded_length); pooled.blob_sidecar = EthBlobTransactionSidecar::Present(blob); @@ -1279,9 +1280,8 @@ impl PoolTransaction for EthPooledTransaction { tx: RecoveredTx, ) -> Result, Self::TryFromConsensusError> { let (tx, signer) = tx.into_parts(); - let pooled = tx - .try_into_pooled() - .map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing)?; + let pooled = + tx.try_into().map_err(|_| TryFromRecoveredTransactionError::BlobSidecarMissing)?; Ok(RecoveredTx::new_unchecked(pooled, signer)) } diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 32a4e910ae9dc..2f5f682180aea 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -1,6 +1,7 @@ use crate::BeaconSidecarConfig; use alloy_consensus::{ transaction::PooledTransaction, BlockHeader, Signed, Transaction as _, TxEip4844WithSidecar, + Typed2718, }; use alloy_primitives::B256; use alloy_rpc_types_beacon::sidecar::{BeaconBlobBundle, SidecarIterator}; @@ -8,6 +9,7 @@ use eyre::Result; use futures_util::{stream::FuturesUnordered, Future, Stream, StreamExt}; use reqwest::{Error, StatusCode}; use reth::{ + core::primitives::SignedTransaction, primitives::RecoveredBlock, providers::CanonStateNotification, transaction_pool::{BlobStoreError, TransactionPoolExt}, @@ -112,7 +114,7 @@ where return } - match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| tx.hash()).collect()) { + match self.pool.get_all_blobs_exact(txs.iter().map(|(tx, _)| *tx.tx_hash()).collect()) { Ok(blobs) => { actions_to_queue.reserve_exact(txs.len()); for ((tx, _), sidecar) in txs.iter().zip(blobs.into_iter()) { @@ -199,7 +201,7 @@ where .transactions() .filter(|tx| tx.is_eip4844()) .map(|tx| { - let transaction_hash = tx.hash(); + let transaction_hash = *tx.tx_hash(); let block_metadata = BlockMetadata { block_hash: new.tip().hash(), block_number: new.tip().number(), diff --git a/examples/custom-beacon-withdrawals/Cargo.toml b/examples/custom-beacon-withdrawals/Cargo.toml index c396ca11df8bd..9a8152b49980c 100644 --- a/examples/custom-beacon-withdrawals/Cargo.toml +++ b/examples/custom-beacon-withdrawals/Cargo.toml @@ -21,6 +21,4 @@ alloy-consensus.workspace = true eyre.workspace = true [features] -optimism = [ - "reth-primitives/optimism" -] \ No newline at end of file +optimism = [] \ No newline at end of file diff --git a/examples/custom-dev-node/src/main.rs b/examples/custom-dev-node/src/main.rs index 7688c1ce2be67..a1eaeefe59a2f 100644 --- a/examples/custom-dev-node/src/main.rs +++ b/examples/custom-dev-node/src/main.rs @@ -15,7 +15,7 @@ use reth::{ tasks::TaskManager, }; use reth_chainspec::ChainSpec; -use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig}; +use reth_node_core::{args::RpcServerArgs, node_config::NodeConfig, primitives::SignedTransaction}; use reth_node_ethereum::EthereumNode; #[tokio::main] @@ -51,7 +51,7 @@ async fn main() -> eyre::Result<()> { let head = notifications.next().await.unwrap(); let tx = &head.tip().body().transactions().next().unwrap(); - assert_eq!(tx.hash(), hash); + assert_eq!(*tx.tx_hash(), hash); println!("mined transaction: {hash}"); Ok(()) } diff --git a/examples/db-access/Cargo.toml b/examples/db-access/Cargo.toml index ec278ac1cc14d..ff48c75b38ee0 100644 --- a/examples/db-access/Cargo.toml +++ b/examples/db-access/Cargo.toml @@ -10,6 +10,7 @@ license.workspace = true reth-chainspec.workspace = true reth-db.workspace = true reth-primitives.workspace = true +reth-primitives-traits.workspace = true reth-provider.workspace = true reth-node-ethereum.workspace = true reth-node-types.workspace = true diff --git a/examples/db-access/src/main.rs b/examples/db-access/src/main.rs index bda1ea26cdb8a..0b5ca570a5bd6 100644 --- a/examples/db-access/src/main.rs +++ b/examples/db-access/src/main.rs @@ -6,6 +6,7 @@ use reth_db::{open_db_read_only, DatabaseEnv}; use reth_node_ethereum::EthereumNode; use reth_node_types::NodeTypesWithDBAdapter; use reth_primitives::{SealedBlock, SealedHeader, TransactionSigned}; +use reth_primitives_traits::transaction::signed::SignedTransaction; use reth_provider::{ providers::StaticFileProvider, AccountReader, BlockReader, BlockSource, HeaderProvider, ProviderFactory, ReceiptProvider, StateProvider, TransactionsProvider, @@ -95,17 +96,17 @@ fn txs_provider_example // Can query the tx by hash let tx_by_hash = - provider.transaction_by_hash(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; + provider.transaction_by_hash(*tx.tx_hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(tx, tx_by_hash); // Can query the tx by hash with info about the block it was included in let (tx, meta) = provider - .transaction_by_hash_with_meta(tx.hash())? + .transaction_by_hash_with_meta(*tx.tx_hash())? .ok_or(eyre::eyre!("txhash not found"))?; - assert_eq!(tx.hash(), meta.tx_hash); + assert_eq!(*tx.tx_hash(), meta.tx_hash); // Can reverse lookup the key too - let id = provider.transaction_id(tx.hash())?.ok_or(eyre::eyre!("txhash not found"))?; + let id = provider.transaction_id(*tx.tx_hash())?.ok_or(eyre::eyre!("txhash not found"))?; assert_eq!(id, txid); // Can find the block of a transaction given its key @@ -181,8 +182,9 @@ fn receipts_provider_example< // Can query receipt by txhash too let tx = provider.transaction_by_id(txid)?.unwrap(); - let receipt_by_hash = - provider.receipt_by_hash(tx.hash())?.ok_or(eyre::eyre!("tx receipt by hash not found"))?; + let receipt_by_hash = provider + .receipt_by_hash(*tx.tx_hash())? + .ok_or(eyre::eyre!("tx receipt by hash not found"))?; assert_eq!(receipt, receipt_by_hash); // Can query all the receipts in a block diff --git a/testing/testing-utils/src/generators.rs b/testing/testing-utils/src/generators.rs index 307ef1cda32b9..895155cc7f04d 100644 --- a/testing/testing-utils/src/generators.rs +++ b/testing/testing-utils/src/generators.rs @@ -1,6 +1,6 @@ //! Generators for different data structures like block headers, block bodies and ranges of those. -use alloy_consensus::{Block, Header, Transaction as _, TxLegacy}; +use alloy_consensus::{Block, Header, SignableTransaction, Transaction as _, TxLegacy}; use alloy_eips::{ eip1898::BlockWithParent, eip4895::{Withdrawal, Withdrawals}, From 64197c0064af293d878148938f33e30c393a323c Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 17 Jan 2025 00:26:57 +0000 Subject: [PATCH 092/113] chore(trie): branch node has only one child log (#13836) --- crates/trie/sparse/src/trie.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 80b0e31985ca7..3441daa4b4ee5 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1094,7 +1094,7 @@ impl RevealedSparseTrie

{ let mut child_path = removed_path.clone(); child_path.push_unchecked(child_nibble); - trace!(target: "trie::sparse", ?removed_path, ?child_path, ?child, "Branch node has only one child"); + trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); if self.nodes.get(&child_path).unwrap().is_hash() { trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); From 83b5619889f8a969d5c44dab1b388e8c0f724afe Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 17 Jan 2025 01:12:23 -0500 Subject: [PATCH 093/113] fix: pin clippy to nightly-2025-01-16 (#13839) --- .github/workflows/lint.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index e0ae216dd38b7..d968fd4292a63 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -30,6 +30,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@clippy with: + toolchain: nightly-2025-01-16 components: clippy - uses: Swatinem/rust-cache@v2 with: @@ -51,6 +52,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly with: + toolchain: nightly-2025-01-16 components: clippy - uses: Swatinem/rust-cache@v2 with: From 4147bd0dc9b497c7c984364166e1a546cfb56ccf Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 17 Jan 2025 11:48:46 +0100 Subject: [PATCH 094/113] chore(trie): derive `Clone` on noop cursor factories (#13840) --- crates/trie/trie/src/hashed_cursor/noop.rs | 2 +- crates/trie/trie/src/trie_cursor/noop.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/trie/trie/src/hashed_cursor/noop.rs b/crates/trie/trie/src/hashed_cursor/noop.rs index a21e1026b3807..07b2cba96d39c 100644 --- a/crates/trie/trie/src/hashed_cursor/noop.rs +++ b/crates/trie/trie/src/hashed_cursor/noop.rs @@ -4,7 +4,7 @@ use reth_primitives::Account; use reth_storage_errors::db::DatabaseError; /// Noop hashed cursor factory. -#[derive(Default, Debug)] +#[derive(Clone, Default, Debug)] #[non_exhaustive] pub struct NoopHashedCursorFactory; diff --git a/crates/trie/trie/src/trie_cursor/noop.rs b/crates/trie/trie/src/trie_cursor/noop.rs index f3239b581090d..de409c59fe163 100644 --- a/crates/trie/trie/src/trie_cursor/noop.rs +++ b/crates/trie/trie/src/trie_cursor/noop.rs @@ -4,7 +4,7 @@ use alloy_primitives::B256; use reth_storage_errors::db::DatabaseError; /// Noop trie cursor factory. -#[derive(Default, Debug)] +#[derive(Clone, Default, Debug)] #[non_exhaustive] pub struct NoopTrieCursorFactory; From 43bd94ac4e9638f8b6fc455739f26a4d696e83dd Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 17 Jan 2025 11:00:57 +0000 Subject: [PATCH 095/113] fix(trie): reveal blinded node along with masks in sparse trie (#13827) --- crates/trie/sparse/src/blinded.rs | 17 +++++++-- crates/trie/sparse/src/trie.rs | 50 +++++++++++++++++++-------- crates/trie/trie/src/proof/blinded.rs | 28 +++++++++------ crates/trie/trie/src/witness.rs | 6 ++-- 4 files changed, 71 insertions(+), 30 deletions(-) diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/blinded.rs index 28a41ba11fa64..46f7e1655a0af 100644 --- a/crates/trie/sparse/src/blinded.rs +++ b/crates/trie/sparse/src/blinded.rs @@ -2,7 +2,7 @@ use alloy_primitives::{Bytes, B256}; use reth_execution_errors::SparseTrieError; -use reth_trie_common::Nibbles; +use reth_trie_common::{Nibbles, TrieMask}; /// Factory for instantiating blinded node providers. pub trait BlindedProviderFactory { @@ -18,10 +18,21 @@ pub trait BlindedProviderFactory { fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider; } +/// Revealed blinded trie node. +#[derive(Debug)] +pub struct RevealedNode { + /// Raw trie node. + pub node: Bytes, + /// Branch node tree mask, if any. + pub tree_mask: Option, + /// Branch node hash mask, if any. + pub hash_mask: Option, +} + /// Trie node provider for retrieving blinded nodes. pub trait BlindedProvider { /// Retrieve blinded node by path. - fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError>; + fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError>; } /// Default blinded node provider factory that creates [`DefaultBlindedProvider`]. @@ -46,7 +57,7 @@ impl BlindedProviderFactory for DefaultBlindedProviderFactory { pub struct DefaultBlindedProvider; impl BlindedProvider for DefaultBlindedProvider { - fn blinded_node(&mut self, _path: &Nibbles) -> Result, SparseTrieError> { + fn blinded_node(&mut self, _path: &Nibbles) -> Result, SparseTrieError> { Ok(None) } } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 3441daa4b4ee5..9c35f7dee3338 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,4 +1,4 @@ -use crate::blinded::{BlindedProvider, DefaultBlindedProvider}; +use crate::blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}; use alloy_primitives::{ hex, keccak256, map::{Entry, HashMap, HashSet}, @@ -945,14 +945,24 @@ impl RevealedSparseTrie

{ if self.updates.is_some() { // Check if the extension node child is a hash that needs to be revealed if self.nodes.get(¤t).unwrap().is_hash() { - if let Some(node) = self.provider.blinded_node(¤t)? { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + self.provider.blinded_node(¤t)? + { let decoded = TrieNode::decode(&mut &node[..])?; - trace!(target: "trie::sparse", ?current, ?decoded, "Revealing extension node child"); - // We'll never have to update the revealed child node, only - // remove or do nothing, so - // we can safely ignore the hash mask here and - // pass `None`. - self.reveal_node(current.clone(), decoded, None, None)?; + trace!( + target: "trie::sparse", + ?current, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing extension node child", + ); + self.reveal_node( + current.clone(), + decoded, + tree_mask, + hash_mask, + )?; } } } @@ -1000,6 +1010,7 @@ impl RevealedSparseTrie

{ return Err(SparseTrieErrorKind::BlindedNode { path: path.clone(), hash }.into()) } + trace!(target: "trie::sparse", ?path, "Leaf node is not present in the trie"); // Leaf is not present in the trie. return Ok(()) } @@ -1098,13 +1109,24 @@ impl RevealedSparseTrie

{ if self.nodes.get(&child_path).unwrap().is_hash() { trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); - if let Some(node) = self.provider.blinded_node(&child_path)? { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + self.provider.blinded_node(&child_path)? + { let decoded = TrieNode::decode(&mut &node[..])?; - trace!(target: "trie::sparse", ?child_path, ?decoded, "Revealing remaining blinded branch child"); - // We'll never have to update the revealed branch node, only remove - // or do nothing, so we can safely ignore the hash mask here and - // pass `None`. - self.reveal_node(child_path.clone(), decoded, None, None)?; + trace!( + target: "trie::sparse", + ?child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + self.reveal_node( + child_path.clone(), + decoded, + tree_mask, + hash_mask, + )?; } } diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/blinded.rs index 57d0de97fbe7d..9b838c2e9dc69 100644 --- a/crates/trie/trie/src/proof/blinded.rs +++ b/crates/trie/trie/src/proof/blinded.rs @@ -2,11 +2,13 @@ use super::{Proof, StorageProof}; use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use alloy_primitives::{ map::{HashMap, HashSet}, - Bytes, B256, + B256, }; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_trie_common::{prefix_set::TriePrefixSetsMut, Nibbles}; -use reth_trie_sparse::blinded::{pad_path_to_key, BlindedProvider, BlindedProviderFactory}; +use reth_trie_sparse::blinded::{ + pad_path_to_key, BlindedProvider, BlindedProviderFactory, RevealedNode, +}; use std::sync::Arc; use tracing::trace; @@ -85,17 +87,20 @@ where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, { - fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { + fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { let targets = HashMap::from_iter([(pad_path_to_key(path), HashSet::default())]); - let proof = + let mut proof = Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone()) .with_prefix_sets_mut(self.prefix_sets.as_ref().clone()) + .with_branch_node_masks(true) .multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; let node = proof.account_subtree.into_inner().remove(path); - + let tree_mask = proof.branch_node_tree_masks.remove(path); + let hash_mask = proof.branch_node_hash_masks.remove(path); trace!(target: "trie::proof::blinded", ?path, ?node, "Blinded node for account trie"); - Ok(node) + + Ok(node.map(|node| RevealedNode { node, tree_mask, hash_mask })) } } @@ -129,21 +134,24 @@ where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, { - fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { + fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { let targets = HashSet::from_iter([pad_path_to_key(path)]); let storage_prefix_set = self.prefix_sets.storage_prefix_sets.get(&self.account).cloned().unwrap_or_default(); - let proof = StorageProof::new_hashed( + let mut proof = StorageProof::new_hashed( self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone(), self.account, ) .with_prefix_set_mut(storage_prefix_set) + .with_branch_node_masks(true) .storage_multiproof(targets) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; let node = proof.subtree.into_inner().remove(path); - + let tree_mask = proof.branch_node_tree_masks.remove(path); + let hash_mask = proof.branch_node_hash_masks.remove(path); trace!(target: "trie::proof::blinded", account = ?self.account, ?path, ?node, "Blinded node for storage trie"); - Ok(node) + + Ok(node.map(|node| RevealedNode { node, tree_mask, hash_mask })) } } diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 5d43c0ea145f1..b044b476cd2d6 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -17,7 +17,7 @@ use reth_execution_errors::{ }; use reth_trie_common::{MultiProofTargets, Nibbles}; use reth_trie_sparse::{ - blinded::{BlindedProvider, BlindedProviderFactory}, + blinded::{BlindedProvider, BlindedProviderFactory, RevealedNode}, SparseStateTrie, }; use std::sync::{mpsc, Arc}; @@ -244,11 +244,11 @@ impl

WitnessBlindedProvider

{ } impl BlindedProvider for WitnessBlindedProvider

{ - fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { + fn blinded_node(&mut self, path: &Nibbles) -> Result, SparseTrieError> { let maybe_node = self.provider.blinded_node(path)?; if let Some(node) = &maybe_node { self.tx - .send(node.clone()) + .send(node.node.clone()) .map_err(|error| SparseTrieErrorKind::Other(Box::new(error)))?; } Ok(maybe_node) From a8c883c6b61329c6fe822d79d25cf4be4a740223 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 17 Jan 2025 11:21:05 +0000 Subject: [PATCH 096/113] feat: extend `BlockBodyIndicesProvider` with `block_body_indices_range` (#13829) --- crates/stages/stages/src/stages/bodies.rs | 3 +- crates/stages/stages/src/stages/execution.rs | 13 +- crates/stages/stages/src/stages/tx_lookup.rs | 15 +- .../src/providers/blockchain_provider.rs | 14 +- .../provider/src/providers/consistent.rs | 7 + .../provider/src/providers/database/mod.rs | 7 + .../src/providers/database/provider.rs | 132 ++++++++++-------- .../provider/src/providers/static_file/jar.rs | 17 ++- .../src/providers/static_file/manager.rs | 8 ++ .../storage/provider/src/test_utils/mock.rs | 6 + .../storage/storage-api/src/block_indices.rs | 8 ++ crates/storage/storage-api/src/noop.rs | 7 + 12 files changed, 159 insertions(+), 78 deletions(-) diff --git a/crates/stages/stages/src/stages/bodies.rs b/crates/stages/stages/src/stages/bodies.rs index 7c796ec6ad108..fd4e373a5e84c 100644 --- a/crates/stages/stages/src/stages/bodies.rs +++ b/crates/stages/stages/src/stages/bodies.rs @@ -112,8 +112,7 @@ impl BodyStage { // fix the inconsistency right away. if let Some(unwind_to) = unwind_block { let next_tx_num_after_unwind = provider - .tx_ref() - .get::(unwind_to)? + .block_body_indices(unwind_to)? .map(|b| b.next_tx_num()) .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 5557beda519aa..afa493b4d999f 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -5,7 +5,6 @@ use alloy_primitives::BlockNumber; use num_traits::Zero; use reth_config::config::ExecutionConfig; use reth_db::{static_file::HeaderMask, tables}; -use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; use reth_evm::{ execute::{BatchExecutor, BlockExecutorProvider}, metrics::ExecutorMetrics, @@ -203,12 +202,8 @@ where } // Get next expected receipt number - let tx = provider.tx_ref(); - let next_receipt_num = tx - .cursor_read::()? - .seek_exact(checkpoint)? - .map(|(_, value)| value.next_tx_num()) - .unwrap_or(0); + let next_receipt_num = + provider.block_body_indices(checkpoint)?.map(|b| b.next_tx_num()).unwrap_or(0); let static_file_provider = provider.static_file_provider(); @@ -237,8 +232,7 @@ where // fix the inconsistency right away. if let Some(unwind_to) = unwind_to { let next_receipt_num_after_unwind = provider - .tx_ref() - .get::(unwind_to)? + .block_body_indices(unwind_to)? .map(|b| b.next_tx_num()) .ok_or(ProviderError::BlockBodyIndicesNotFound(unwind_to))?; @@ -645,6 +639,7 @@ mod tests { use alloy_rlp::Decodable; use assert_matches::assert_matches; use reth_chainspec::ChainSpecBuilder; + use reth_db::transaction::DbTx; use reth_db_api::{models::AccountBeforeTx, transaction::DbTxMut}; use reth_evm::execute::BasicBlockExecutorProvider; use reth_evm_ethereum::execute::EthExecutionStrategyFactory; diff --git a/crates/stages/stages/src/stages/tx_lookup.rs b/crates/stages/stages/src/stages/tx_lookup.rs index 872af3baf9500..42ffd0542f0be 100644 --- a/crates/stages/stages/src/stages/tx_lookup.rs +++ b/crates/stages/stages/src/stages/tx_lookup.rs @@ -5,7 +5,7 @@ use reth_config::config::{EtlConfig, TransactionLookupConfig}; use reth_db::{table::Value, tables, RawKey, RawValue}; use reth_db_api::{ cursor::{DbCursorRO, DbCursorRW}, - transaction::{DbTx, DbTxMut}, + transaction::DbTxMut, }; use reth_etl::Collector; use reth_primitives::NodePrimitives; @@ -195,12 +195,16 @@ where let tx = provider.tx_ref(); let (range, unwind_to, _) = input.unwind_block_range_with_threshold(self.chunk_size); - // Cursors to unwind tx hash to number - let mut body_cursor = tx.cursor_read::()?; + // Cursor to unwind tx hash to number let mut tx_hash_number_cursor = tx.cursor_write::()?; let static_file_provider = provider.static_file_provider(); - let mut rev_walker = body_cursor.walk_back(Some(*range.end()))?; - while let Some((number, body)) = rev_walker.next().transpose()? { + let rev_walker = provider + .block_body_indices_range(range.clone())? + .into_iter() + .zip(range.collect::>()) + .rev(); + + for (body, number) in rev_walker { if number <= unwind_to { break; } @@ -255,6 +259,7 @@ mod tests { }; use alloy_primitives::{BlockNumber, B256}; use assert_matches::assert_matches; + use reth_db::transaction::DbTx; use reth_primitives::SealedBlock; use reth_provider::{ providers::StaticFileWriter, BlockBodyIndicesProvider, DatabaseProviderFactory, diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 75ad402b059cc..92451fab15e70 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -472,6 +472,13 @@ impl BlockBodyIndicesProvider for BlockchainProvider { ) -> ProviderResult> { self.consistent_provider()?.block_body_indices(number) } + + fn block_body_indices_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.consistent_provider()?.block_body_indices_range(range) + } } impl StageCheckpointReader for BlockchainProvider { @@ -882,10 +889,9 @@ mod tests { let static_file_provider = factory.static_file_provider(); // Write transactions to static files with the right `tx_num`` - let mut bodies_cursor = provider_rw.tx_ref().cursor_read::()?; - let mut tx_num = bodies_cursor - .seek_exact(database_blocks.first().as_ref().unwrap().number.saturating_sub(1))? - .map(|(_, indices)| indices.next_tx_num()) + let mut tx_num = provider_rw + .block_body_indices(database_blocks.first().as_ref().unwrap().number.saturating_sub(1))? + .map(|indices| indices.next_tx_num()) .unwrap_or_default(); // Insert blocks into the database diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index d84152f88e9d4..3082faf45b518 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -1204,6 +1204,13 @@ impl BlockBodyIndicesProvider for ConsistentProvider { }, ) } + + fn block_body_indices_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + range.map_while(|b| self.block_body_indices(b).transpose()).collect() + } } impl StageCheckpointReader for ConsistentProvider { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index ab44da3ae10d3..12e33146c913e 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -564,6 +564,13 @@ impl BlockBodyIndicesProvider for ProviderFactory { ) -> ProviderResult> { self.provider()?.block_body_indices(number) } + + fn block_body_indices_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + self.provider()?.block_body_indices_range(range) + } } impl StageCheckpointReader for ProviderFactory { diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 4a9e5affa29d8..6ac93454c865f 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -618,27 +618,23 @@ impl DatabaseProvider { let len = range.end().saturating_sub(*range.start()) as usize; let mut blocks = Vec::with_capacity(len); - let headers = headers_range(range)?; + let headers = headers_range(range.clone())?; let mut tx_cursor = self.tx.cursor_read::>>()?; - let mut block_body_cursor = self.tx.cursor_read::()?; - - let mut present_headers = Vec::new(); - for header in headers { - // If the body indices are not found, this means that the transactions either do - // not exist in the database yet, or they do exit but are - // not indexed. If they exist but are not indexed, we don't - // have enough information to return the block anyways, so - // we skip the block. - if let Some((_, block_body_indices)) = - block_body_cursor.seek_exact(header.as_ref().number())? - { - let tx_range = block_body_indices.tx_num_range(); - present_headers.push((header, tx_range)); - } - } + + // If the body indices are not found, this means that the transactions either do + // not exist in the database yet, or they do exit but are + // not indexed. If they exist but are not indexed, we don't + // have enough information to return the block anyways, so + // we skip the block. + let present_headers = self + .block_body_indices_range(range)? + .into_iter() + .map(|b| b.tx_num_range()) + .zip(headers) + .collect::>(); let mut inputs = Vec::new(); - for (header, tx_range) in &present_headers { + for (tx_range, header) in &present_headers { let transactions = if tx_range.is_empty() { Vec::new() } else { @@ -650,7 +646,7 @@ impl DatabaseProvider { let bodies = self.storage.reader().read_block_bodies(self, inputs)?; - for ((header, tx_range), body) in present_headers.into_iter().zip(bodies) { + for ((tx_range, header), body) in present_headers.into_iter().zip(bodies) { blocks.push(assemble_block(header, body, tx_range)?); } @@ -1476,23 +1472,23 @@ impl TransactionsProvider for Datab &self, range: impl RangeBounds, ) -> ProviderResult>> { + let range = to_range(range); let mut tx_cursor = self.tx.cursor_read::>()?; - let mut results = Vec::new(); - let mut body_cursor = self.tx.cursor_read::()?; - for entry in body_cursor.walk_range(range)? { - let (_, body) = entry?; - let tx_num_range = body.tx_num_range(); - if tx_num_range.is_empty() { - results.push(Vec::new()); - } else { - results.push( - self.transactions_by_tx_range_with_cursor(tx_num_range, &mut tx_cursor)? + + self.block_body_indices_range(range.start..=range.end.saturating_sub(1))? + .into_iter() + .map(|body| { + let tx_num_range = body.tx_num_range(); + if tx_num_range.is_empty() { + Ok(Vec::new()) + } else { + Ok(self + .transactions_by_tx_range_with_cursor(tx_num_range, &mut tx_cursor)? .into_iter() - .collect(), - ); - } - } - Ok(results) + .collect()) + } + }) + .collect() } fn transactions_by_tx_range( @@ -1620,6 +1616,18 @@ impl BlockBodyIndicesProvider fn block_body_indices(&self, num: u64) -> ProviderResult> { Ok(self.tx.get::(num)?) } + + fn block_body_indices_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + Ok(self + .tx_ref() + .cursor_read::()? + .walk_range(range)? + .map(|r| r.map(|(_, b)| b)) + .collect::>()?) + } } impl StageCheckpointReader for DatabaseProvider { @@ -1756,13 +1764,31 @@ impl StateWriter is_value_known: OriginalValuesKnown, write_receipts_to: StorageLocation, ) -> ProviderResult<()> { + let first_block = execution_outcome.first_block(); + let block_count = execution_outcome.receipts.len() as u64; + let block_range = first_block..=first_block.saturating_add(block_count).saturating_sub(1); + let last_block = *block_range.end(); + let (plain_state, reverts) = execution_outcome.bundle.to_plain_state_and_reverts(is_value_known); - self.write_state_reverts(reverts, execution_outcome.first_block)?; + self.write_state_reverts(reverts, first_block)?; self.write_state_changes(plain_state)?; - let mut bodies_cursor = self.tx.cursor_read::()?; + // Fetch the first transaction number for each block in the range + let block_indices: Vec<_> = self + .block_body_indices_range(block_range)? + .into_iter() + .map(|b| b.first_tx_num) + .collect(); + + // Ensure all expected blocks are present. + if block_indices.len() < block_count as usize { + let missing_blocks = block_count - block_indices.len() as u64; + return Err(ProviderError::BlockBodyIndicesNotFound( + last_block.saturating_sub(missing_blocks - 1), + )); + } let has_receipts_pruning = self.prune_modes.has_receipts_pruning() || execution_outcome.receipts.iter().flatten().any(|receipt| receipt.is_none()); @@ -1780,25 +1806,19 @@ impl StateWriter // We are writing to static files if requested and if there's no receipt pruning configured let mut receipts_static_writer = (write_receipts_to.static_files() && !has_receipts_pruning) - .then(|| { - self.static_file_provider - .get_writer(execution_outcome.first_block, StaticFileSegment::Receipts) - }) + .then(|| self.static_file_provider.get_writer(first_block, StaticFileSegment::Receipts)) .transpose()?; - for (idx, receipts) in execution_outcome.receipts.iter().enumerate() { - let block_number = execution_outcome.first_block + idx as u64; + for (idx, (receipts, first_tx_index)) in + execution_outcome.receipts.iter().zip(block_indices).enumerate() + { + let block_number = first_block + idx as u64; // Increment block number for receipts static file writer if let Some(writer) = receipts_static_writer.as_mut() { writer.increment_block(block_number)?; } - let first_tx_index = bodies_cursor - .seek_exact(block_number)? - .map(|(_, indices)| indices.first_tx_num()) - .ok_or(ProviderError::BlockBodyIndicesNotFound(block_number))?; - for (idx, receipt) in receipts.iter().enumerate() { let receipt_idx = first_tx_index + idx as u64; if let Some(receipt) = receipt { @@ -2017,11 +2037,11 @@ impl StateWriter } // We are not removing block meta as it is used to get block changesets. - let block_bodies = self.get::(range.clone())?; + let block_bodies = self.block_body_indices_range(range.clone())?; // get transaction receipts let from_transaction_num = - block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); + block_bodies.first().expect("already checked if there are blocks").first_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); @@ -2113,13 +2133,13 @@ impl StateWriter let start_block_number = *range.start(); // We are not removing block meta as it is used to get block changesets. - let block_bodies = self.get::(range.clone())?; + let block_bodies = self.block_body_indices_range(range.clone())?; // get transaction receipts let from_transaction_num = - block_bodies.first().expect("already checked if there are blocks").1.first_tx_num(); + block_bodies.first().expect("already checked if there are blocks").first_tx_num(); let to_transaction_num = - block_bodies.last().expect("already checked if there are blocks").1.last_tx_num(); + block_bodies.last().expect("already checked if there are blocks").last_tx_num(); let storage_range = BlockNumberAddress::range(range.clone()); @@ -2199,7 +2219,7 @@ impl StateWriter let mut receipts = Vec::with_capacity(block_bodies.len()); // loop break if we are at the end of the blocks. - for (_, block_body) in block_bodies { + for block_body in block_bodies { let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); for num in block_body.tx_num_range() { if receipts_iter.peek().is_some_and(|(n, _)| *n == num) { @@ -2920,8 +2940,7 @@ impl BlockWrite // First transaction to be removed let unwind_tx_from = self - .tx - .get::(block)? + .block_body_indices(block)? .map(|b| b.next_tx_num()) .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; @@ -2957,8 +2976,7 @@ impl BlockWrite // First transaction to be removed let unwind_tx_from = self - .tx - .get::(block)? + .block_body_indices(block)? .map(|b| b.next_tx_num()) .ok_or(ProviderError::BlockBodyIndicesNotFound(block))?; diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 0ff9ed20ac173..4b6525c1d1dc7 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -25,7 +25,7 @@ use reth_storage_api::{BlockBodyIndicesProvider, OmmersProvider, WithdrawalsProv use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ fmt::Debug, - ops::{Deref, RangeBounds}, + ops::{Deref, RangeBounds, RangeInclusive}, sync::Arc, }; @@ -386,4 +386,19 @@ impl BlockBodyIndicesProvider for StaticFileJarProvider<'_, N fn block_body_indices(&self, num: u64) -> ProviderResult> { self.cursor()?.get_one::(num.into()) } + + fn block_body_indices_range( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + let mut cursor = self.cursor()?; + let mut indices = Vec::with_capacity((range.end() - range.start() + 1) as usize); + + for num in range { + if let Some(block) = cursor.get_one::(num.into())? { + indices.push(block) + } + } + Ok(indices) + } } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index d601098490a46..11f768a076428 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -1729,6 +1729,14 @@ impl BlockBodyIndicesProvider for StaticFileProvider { } }) } + + fn block_body_indices_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + // Required data not present in static_files + Err(ProviderError::UnsupportedProvider) + } } impl StatsReader for StaticFileProvider { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 113c2c509057f..e6efe8012492d 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -777,6 +777,12 @@ impl BlockBodyIndicesProvider for MockEthProvider { fn block_body_indices(&self, _num: u64) -> ProviderResult> { Ok(None) } + fn block_body_indices_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(vec![]) + } } impl ChangeSetReader for MockEthProvider { diff --git a/crates/storage/storage-api/src/block_indices.rs b/crates/storage/storage-api/src/block_indices.rs index 25c37bca8e2ca..3c6860fb2717b 100644 --- a/crates/storage/storage-api/src/block_indices.rs +++ b/crates/storage/storage-api/src/block_indices.rs @@ -1,5 +1,7 @@ +use alloy_primitives::BlockNumber; use reth_db_models::StoredBlockBodyIndices; use reth_storage_errors::provider::ProviderResult; +use std::ops::RangeInclusive; /// Client trait for fetching block body indices related data. #[auto_impl::auto_impl(&, Arc)] @@ -8,4 +10,10 @@ pub trait BlockBodyIndicesProvider: Send + Sync { /// /// Returns `None` if block is not found. fn block_body_indices(&self, num: u64) -> ProviderResult>; + + /// Returns the block body indices within the requested range matching number from storage. + fn block_body_indices_range( + &self, + range: RangeInclusive, + ) -> ProviderResult>; } diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index a88fbbdd9ef06..20d975852f4df 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -567,4 +567,11 @@ impl BlockBodyIndicesProvider for NoopProvider ProviderResult> { Ok(None) } + + fn block_body_indices_range( + &self, + _range: RangeInclusive, + ) -> ProviderResult> { + Ok(vec![]) + } } From d9ef7f6df6cc63dee0211fcf38411a67c9ee2d73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=CF=80a?= <76558220+rkdud007@users.noreply.github.com> Date: Fri, 17 Jan 2025 12:42:03 +0000 Subject: [PATCH 097/113] chore: not panic on `RpcBlockProvider` (#13841) --- crates/consensus/debug-client/src/providers/rpc.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/consensus/debug-client/src/providers/rpc.rs b/crates/consensus/debug-client/src/providers/rpc.rs index 787515f1a6004..758371636ac79 100644 --- a/crates/consensus/debug-client/src/providers/rpc.rs +++ b/crates/consensus/debug-client/src/providers/rpc.rs @@ -44,10 +44,7 @@ impl BlockProvider for RpcBlockProvider { } async fn get_block(&self, block_number: u64) -> eyre::Result { - let ws_provider = ProviderBuilder::new() - .on_builtin(&self.ws_rpc_url) - .await - .expect("failed to create WS provider"); + let ws_provider = ProviderBuilder::new().on_builtin(&self.ws_rpc_url).await?; let block: Block = ws_provider .get_block_by_number(BlockNumberOrTag::Number(block_number), true.into()) .await? From a505f4914780c9ec6d1ca4ae1182244b851b89a7 Mon Sep 17 00:00:00 2001 From: DaniPopes <57450786+DaniPopes@users.noreply.github.com> Date: Fri, 17 Jan 2025 13:42:44 +0100 Subject: [PATCH 098/113] ci: install mold as the linker (#13842) --- .github/workflows/book.yml | 5 ++--- .github/workflows/compact.yml | 4 ++-- .github/workflows/docker-git.yml | 1 + .github/workflows/docker.yml | 1 + .github/workflows/integration.yml | 1 + .github/workflows/lint.yml | 15 ++++++++++++--- .github/workflows/prepare-reth.yml | 1 + .github/workflows/release.yml | 1 + .github/workflows/stage.yml | 3 ++- .github/workflows/sync.yml | 1 + .github/workflows/unit.yml | 3 +++ .github/workflows/windows.yml | 2 ++ 12 files changed, 29 insertions(+), 9 deletions(-) diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 56d5c427466e0..837d47e9f84c5 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -57,6 +57,7 @@ jobs: timeout-minutes: 60 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly - name: Install mdbook run: | @@ -81,9 +82,7 @@ jobs: run: cargo docs --exclude "example-*" env: # Keep in sync with ./ci.yml:jobs.docs - RUSTDOCFLAGS: - --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page - -Zunstable-options + RUSTDOCFLAGS: --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page -Zunstable-options - name: Move docs to book folder run: | diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml index 484b27c820d00..37a1588c1dcf7 100644 --- a/.github/workflows/compact.yml +++ b/.github/workflows/compact.yml @@ -1,12 +1,11 @@ # Ensures that `Compact` codec changes are backwards compatible. -# +# # 1) checkout `main` # 2) randomly generate and serialize to disk many different type vectors with `Compact` (eg. Header, Transaction, etc) # 3) checkout `pr` # 4) deserialize previously generated test vectors on: - pull_request: merge_group: push: @@ -26,6 +25,7 @@ jobs: - cargo run --bin reth --features "dev" - cargo run --bin op-reth --features "optimism dev" --manifest-path crates/optimism/bin/Cargo.toml steps: + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/docker-git.yml b/.github/workflows/docker-git.yml index 2e3ad59aea925..716b393841beb 100644 --- a/.github/workflows/docker-git.yml +++ b/.github/workflows/docker-git.yml @@ -26,6 +26,7 @@ jobs: contents: read steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 7b6f0d51e9c9c..d7b42a54edfe5 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -25,6 +25,7 @@ jobs: contents: read steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 82bd5705a3200..2156710fb57a4 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -29,6 +29,7 @@ jobs: timeout-minutes: 60 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - name: Install Geth run: .github/assets/install_geth.sh diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index d968fd4292a63..7a156638fe5cc 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -28,6 +28,7 @@ jobs: features: "" steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@clippy with: toolchain: nightly-2025-01-16 @@ -50,6 +51,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly with: toolchain: nightly-2025-01-16 @@ -66,6 +68,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: wasm32-wasip1 @@ -84,6 +87,7 @@ jobs: timeout-minutes: 60 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: riscv32imac-unknown-none-elf @@ -100,6 +104,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@cargo-hack - uses: Swatinem/rust-cache@v2 @@ -121,6 +126,7 @@ jobs: network: optimism steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@master with: toolchain: "1.82" # MSRV @@ -137,6 +143,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly - uses: Swatinem/rust-cache@v2 with: @@ -153,6 +160,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly with: components: rustfmt @@ -167,6 +175,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly - uses: Swatinem/rust-cache@v2 with: @@ -180,10 +189,8 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@nightly - - uses: dtolnay/rust-toolchain@master - with: - toolchain: "1.82" # MSRV - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true @@ -218,6 +225,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - name: Ensure no arbitrary or proptest dependency on default build run: cargo tree --package reth -e=features,no-dev | grep -Eq "arbitrary|proptest" && exit 1 || exit 0 @@ -229,6 +237,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@clippy - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/prepare-reth.yml b/.github/workflows/prepare-reth.yml index 1b5f0e70cc966..4fadef1265ad5 100644 --- a/.github/workflows/prepare-reth.yml +++ b/.github/workflows/prepare-reth.yml @@ -31,6 +31,7 @@ jobs: steps: - uses: actions/checkout@v4 - run: mkdir artifacts + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5735ae6ef5287..ac2be796b8228 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -54,6 +54,7 @@ jobs: binary: op-reth steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: ${{ matrix.configs.target }} diff --git a/.github/workflows/stage.yml b/.github/workflows/stage.yml index 60ffa8f73d728..5c3262827626c 100644 --- a/.github/workflows/stage.yml +++ b/.github/workflows/stage.yml @@ -6,7 +6,7 @@ on: pull_request: merge_group: push: - branches: [ main ] + branches: [main] env: CARGO_TERM_COLOR: always @@ -30,6 +30,7 @@ jobs: timeout-minutes: 60 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index 531d04b2e4891..a19ca08d7162d 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -38,6 +38,7 @@ jobs: unwind-target: "0x118a6e922a8c6cab221fc5adfe5056d2b72d58c6580e9c5629de55299e2cf8de" steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 4c927df8be004..5fff6721b85b5 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -49,6 +49,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: @@ -84,6 +85,7 @@ jobs: path: testing/ef-tests/ethereum-tests submodules: recursive fetch-depth: 1 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@nextest - uses: Swatinem/rust-cache@v2 @@ -103,6 +105,7 @@ jobs: network: ["ethereum", "optimism"] steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 03c491b368a65..d79c493799239 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -16,6 +16,7 @@ jobs: steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: x86_64-pc-windows-gnu @@ -34,6 +35,7 @@ jobs: steps: - uses: actions/checkout@v4 + - uses: rui314/setup-mold@v1 - uses: dtolnay/rust-toolchain@stable with: target: x86_64-pc-windows-gnu From f66c7cc80d135452aac8eea97c5cfac26005b9d9 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Fri, 17 Jan 2025 13:46:16 +0100 Subject: [PATCH 099/113] chore(trie): make rlp_node fns pub (#13818) --- crates/trie/sparse/src/trie.rs | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 9c35f7dee3338..1c5453cb63302 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -645,12 +645,26 @@ impl

RevealedSparseTrie

{ targets } - fn rlp_node_allocate(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { + /// Look up or calculate the RLP of the node at the given path. + /// + /// # Panics + /// + /// If the node at provided path does not exist. + pub fn rlp_node_allocate(&mut self, path: Nibbles, prefix_set: &mut PrefixSet) -> RlpNode { let mut buffers = RlpNodeBuffers::new_with_path(path); self.rlp_node(prefix_set, &mut buffers) } - fn rlp_node(&mut self, prefix_set: &mut PrefixSet, buffers: &mut RlpNodeBuffers) -> RlpNode { + /// Look up or calculate the RLP of the node at the given path specified in [`RlpNodeBuffers`]. + /// + /// # Panics + /// + /// If the node at provided path does not exist. + pub fn rlp_node( + &mut self, + prefix_set: &mut PrefixSet, + buffers: &mut RlpNodeBuffers, + ) -> RlpNode { 'main: while let Some((path, mut is_in_prefix_set)) = buffers.path_stack.pop() { // Check if the path is in the prefix set. // First, check the cached value. If it's `None`, then check the prefix set, and update @@ -1338,7 +1352,7 @@ struct RemovedSparseNode { /// Collection of reusable buffers for [`RevealedSparseTrie::rlp_node`]. #[derive(Debug, Default)] -struct RlpNodeBuffers { +pub struct RlpNodeBuffers { /// Stack of paths we need rlp nodes for and whether the path is in the prefix set. path_stack: Vec<(Nibbles, Option)>, /// Stack of rlp nodes From 12d3fbefffa54e3b157e8f60b658caa6959a5e35 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 17 Jan 2025 12:49:07 +0000 Subject: [PATCH 100/113] fix(trie): check branch node masks if `store_in_db_trie` is `None` (#13828) --- crates/trie/sparse/src/trie.rs | 42 ++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 1c5453cb63302..4a4f0f185ceb6 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -694,7 +694,10 @@ impl

RevealedSparseTrie

{ if let Some((hash, store_in_db_trie)) = hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) { - (RlpNode::word_rlp(&hash), SparseNodeType::Extension { store_in_db_trie }) + ( + RlpNode::word_rlp(&hash), + SparseNodeType::Extension { store_in_db_trie: Some(store_in_db_trie) }, + ) } else if buffers.rlp_node_stack.last().is_some_and(|e| e.0 == child_path) { let (_, child, child_node_type) = buffers.rlp_node_stack.pop().unwrap(); self.rlp_buf.clear(); @@ -711,7 +714,7 @@ impl

RevealedSparseTrie

{ "Extension node" ); - *store_in_db_trie = Some(store_in_db_trie_value); + *store_in_db_trie = store_in_db_trie_value; ( rlp_node, @@ -734,7 +737,7 @@ impl

RevealedSparseTrie

{ buffers.rlp_node_stack.push(( path, RlpNode::word_rlp(&hash), - SparseNodeType::Branch { store_in_db_trie }, + SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie) }, )); continue } @@ -769,17 +772,19 @@ impl

RevealedSparseTrie

{ let last_child_nibble = child_path.last().unwrap(); // Determine whether we need to set trie mask bit. - let should_set_tree_mask_bit = - // A blinded node has the tree mask bit set - ( - child_node_type.is_hash() && - self.branch_node_tree_masks - .get(&path) - .is_some_and(|mask| mask.is_bit_set(last_child_nibble)) - ) || + let should_set_tree_mask_bit = if let Some(store_in_db_trie) = + child_node_type.store_in_db_trie() + { // A branch or an extension node explicitly set the // `store_in_db_trie` flag - child_node_type.store_in_db_trie(); + store_in_db_trie + } else { + // A blinded node has the tree mask bit set + child_node_type.is_hash() && + self.branch_node_tree_masks.get(&path).is_some_and( + |mask| mask.is_bit_set(last_child_nibble), + ) + }; if should_set_tree_mask_bit { tree_mask.set_bit(last_child_nibble); } @@ -882,7 +887,10 @@ impl

RevealedSparseTrie

{ }; *store_in_db_trie = Some(store_in_db_trie_value); - (rlp_node, SparseNodeType::Branch { store_in_db_trie: store_in_db_trie_value }) + ( + rlp_node, + SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie_value) }, + ) } }; buffers.rlp_node_stack.push((path, rlp_node, node_type)); @@ -1227,12 +1235,12 @@ enum SparseNodeType { /// Sparse extension node. Extension { /// A flag indicating whether the extension node should be stored in the database. - store_in_db_trie: bool, + store_in_db_trie: Option, }, /// Sparse branch node. Branch { /// A flag indicating whether the branch node should be stored in the database. - store_in_db_trie: bool, + store_in_db_trie: Option, }, } @@ -1245,12 +1253,12 @@ impl SparseNodeType { matches!(self, Self::Branch { .. }) } - const fn store_in_db_trie(&self) -> bool { + const fn store_in_db_trie(&self) -> Option { match *self { Self::Extension { store_in_db_trie } | Self::Branch { store_in_db_trie } => { store_in_db_trie } - _ => false, + _ => None, } } } From 0cc1ff0a0a099d8558d6e44803702367d8fa25e5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 17 Jan 2025 13:49:11 +0100 Subject: [PATCH 101/113] feat: expose additional eth functions on engine api (#13837) --- crates/rpc/rpc-api/src/engine.rs | 12 +++++++++++- crates/rpc/rpc/src/engine.rs | 16 +++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index f78b8349be86a..22dbc822ddfb2 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -221,10 +221,12 @@ pub trait EngineApi { /// A subset of the ETH rpc interface: /// +/// This also includes additional eth functions required by optimism. +/// /// Specifically for the engine auth server: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EngineEthApi { +pub trait EngineEthApi { /// Returns an object with data about the sync status or false. #[method(name = "syncing")] fn syncing(&self) -> RpcResult; @@ -259,10 +261,18 @@ pub trait EngineEthApi { #[method(name = "getBlockByNumber")] async fn block_by_number(&self, number: BlockNumberOrTag, full: bool) -> RpcResult>; + /// Returns all transaction receipts for a given block. + #[method(name = "getBlockReceipts")] + async fn block_receipts(&self, block_id: BlockId) -> RpcResult>>; + /// Sends signed transaction, returning its hash. #[method(name = "sendRawTransaction")] async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult; + /// Returns the receipt of a transaction by transaction hash. + #[method(name = "getTransactionReceipt")] + async fn transaction_receipt(&self, hash: B256) -> RpcResult>; + /// Returns logs matching given filter object. #[method(name = "getLogs")] async fn logs(&self, filter: Filter) -> RpcResult>; diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index a9c316571acd9..a398aad05f02b 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -34,7 +34,7 @@ impl EngineEthApi { } #[async_trait::async_trait] -impl EngineEthApiServer> +impl EngineEthApiServer, RpcReceipt> for EngineEthApi where Eth: EthApiServer< @@ -103,11 +103,25 @@ where self.eth.block_by_number(number, full).instrument(engine_span!()).await } + async fn block_receipts( + &self, + block_id: BlockId, + ) -> Result>>> { + self.eth.block_receipts(block_id).instrument(engine_span!()).await + } + /// Handler for: `eth_sendRawTransaction` async fn send_raw_transaction(&self, bytes: Bytes) -> Result { self.eth.send_raw_transaction(bytes).instrument(engine_span!()).await } + async fn transaction_receipt( + &self, + hash: B256, + ) -> Result>> { + self.eth.transaction_receipt(hash).instrument(engine_span!()).await + } + /// Handler for `eth_getLogs` async fn logs(&self, filter: Filter) -> Result> { self.eth_filter.logs(filter).instrument(engine_span!()).await From 5db01290f877ea4b8762c25a343bbcb6ea053ebb Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 17 Jan 2025 08:03:11 -0500 Subject: [PATCH 102/113] chore: remove redundant strategy fn impls (#13838) Co-authored-by: DaniPopes <57450786+DaniPopes@users.noreply.github.com> --- crates/evm/src/execute.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 2b221f14564a7..8ebba4c71236e 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -688,20 +688,9 @@ mod tests { &mut self.state } - fn with_state_hook(&mut self, _hook: Option>) {} - fn finish(&mut self) -> BundleState { self.finish_result.clone() } - - fn validate_block_post_execution( - &self, - _block: &RecoveredBlock, - _receipts: &[Receipt], - _requests: &Requests, - ) -> Result<(), ConsensusError> { - Ok(()) - } } #[derive(Clone)] From 1e0d4bcb72ff2c25d4aadbc1175ad5ae2ec5c32f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 3 Jan 2025 16:47:09 +0100 Subject: [PATCH 103/113] chore(ci): strip CI to op-reth testing essentials (#17) Merges down and cleans up changes to ci from isthmus-devnet-0 branch. Ref https://github.com/ethereum-optimism/op-reth/issues/8. - Removes jobs: for L1, for deploying, some maintenance jobs - Removes cron jobs --- .github/assets/hive/Dockerfile | 9 - .github/assets/hive/build_simulators.sh | 38 --- .github/assets/hive/expected_failures.yaml | 53 ---- .github/assets/hive/load_images.sh | 25 -- .github/assets/hive/no_sim_build.diff | 52 ---- .github/assets/hive/run_simulator.sh | 38 --- .github/assets/kurtosis_network_params.yaml | 13 - .github/assets/label_pr.js | 57 ---- .github/workflows/bench.yml | 6 +- .github/workflows/compact.yml | 5 +- .github/workflows/hive.yml | 232 ----------------- .github/workflows/integration.yml | 8 +- .github/workflows/kurtosis-op.yml | 9 +- .github/workflows/kurtosis.yml | 67 ----- .github/workflows/label-pr.yml | 23 -- .github/workflows/lint.yml | 10 +- .github/workflows/prepare-reth.yml | 5 +- .github/workflows/release-dist.yml | 20 -- .github/workflows/sync.yml | 17 +- .github/workflows/unit.yml | 37 +-- .github/workflows/windows.yml | 35 ++- Cargo.lock | 274 ++++++++++---------- 22 files changed, 206 insertions(+), 827 deletions(-) delete mode 100644 .github/assets/hive/Dockerfile delete mode 100755 .github/assets/hive/build_simulators.sh delete mode 100644 .github/assets/hive/expected_failures.yaml delete mode 100755 .github/assets/hive/load_images.sh delete mode 100644 .github/assets/hive/no_sim_build.diff delete mode 100755 .github/assets/hive/run_simulator.sh delete mode 100644 .github/assets/kurtosis_network_params.yaml delete mode 100644 .github/assets/label_pr.js delete mode 100644 .github/workflows/hive.yml delete mode 100644 .github/workflows/kurtosis.yml delete mode 100644 .github/workflows/label-pr.yml delete mode 100644 .github/workflows/release-dist.yml diff --git a/.github/assets/hive/Dockerfile b/.github/assets/hive/Dockerfile deleted file mode 100644 index 25b71bf218724..0000000000000 --- a/.github/assets/hive/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu - -COPY dist/reth /usr/local/bin - -COPY LICENSE-* ./ - -EXPOSE 30303 30303/udp 9001 8545 8546 -ENV RUST_LOG=debug -ENTRYPOINT ["/usr/local/bin/reth"] diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh deleted file mode 100755 index b33f4db4ee79b..0000000000000 --- a/.github/assets/hive/build_simulators.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -# Create the hive_assets directory -mkdir hive_assets/ - -cd hivetests -go build . - -./hive -client reth # first builds and caches the client - -# Run each hive command in the background for each simulator and wait -echo "Building images" -./hive -client reth --sim "pyspec" -sim.timelimit 1s || true & -./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & -./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & -./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & -./hive -client reth --sim "smoke/genesis" -sim.timelimit 1s || true & -./hive -client reth --sim "smoke/network" -sim.timelimit 1s || true & -./hive -client reth --sim "ethereum/sync" -sim.timelimit 1s || true & -wait - -# Run docker save in parallel and wait -echo "Saving images" -docker save hive/hiveproxy:latest -o ../hive_assets/hiveproxy.tar & -docker save hive/simulators/devp2p:latest -o ../hive_assets/devp2p.tar & -docker save hive/simulators/ethereum/engine:latest -o ../hive_assets/engine.tar & -docker save hive/simulators/ethereum/rpc-compat:latest -o ../hive_assets/rpc_compat.tar & -docker save hive/simulators/ethereum/pyspec:latest -o ../hive_assets/pyspec.tar & -docker save hive/simulators/smoke/genesis:latest -o ../hive_assets/smoke_genesis.tar & -docker save hive/simulators/smoke/network:latest -o ../hive_assets/smoke_network.tar & -docker save hive/simulators/ethereum/sync:latest -o ../hive_assets/ethereum_sync.tar & -wait - -# Make sure we don't rebuild images on the CI jobs -git apply ../.github/assets/hive/no_sim_build.diff -go build . -mv ./hive ../hive_assets/ diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml deleted file mode 100644 index c1c7ff71cf8e9..0000000000000 --- a/.github/assets/hive/expected_failures.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# https://github.com/paradigmxyz/reth/issues/7015 -# https://github.com/paradigmxyz/reth/issues/6332 -rpc-compat: - - debug_getRawBlock/get-invalid-number (reth) - - debug_getRawHeader/get-invalid-number (reth) - - debug_getRawReceipts/get-invalid-number (reth) - - debug_getRawTransaction/get-invalid-hash (reth) - - - eth_call/call-callenv (reth) - - eth_feeHistory/fee-history (reth) - - eth_getStorageAt/get-storage-invalid-key-too-large (reth) - - eth_getStorageAt/get-storage-invalid-key (reth) - - eth_getTransactionReceipt/get-access-list (reth) - - eth_getTransactionReceipt/get-blob-tx (reth) - - eth_getTransactionReceipt/get-dynamic-fee (reth) - -# https://github.com/paradigmxyz/reth/issues/8732 -engine-withdrawals: - - Withdrawals Fork On Genesis (Paris) (reth) - - Withdrawals Fork on Block 1 (Paris) (reth) - - Withdrawals Fork on Block 2 (Paris) (reth) - - Withdrawals Fork on Block 3 (Paris) (reth) - - Withdraw to a single account (Paris) (reth) - - Withdraw to two accounts (Paris) (reth) - - Withdraw many accounts (Paris) (reth) - - Withdraw zero amount (Paris) (reth) - - Empty Withdrawals (Paris) (reth) - - Corrupted Block Hash Payload (INVALID) (Paris) (reth) - - Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload (Paris) (reth) - - Withdrawals Fork on Block 1 - 8 Block Re-Org, Sync (Paris) (reth) - - Withdrawals Fork on Block 8 - 10 Block Re-Org NewPayload (Paris) (reth) - - Withdrawals Fork on Block 8 - 10 Block Re-Org Sync (Paris) (reth) - - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth) - - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org Sync (Paris) (reth) - - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) - - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) - -engine-api: [] - -# https://github.com/paradigmxyz/reth/issues/8305 -# https://github.com/paradigmxyz/reth/issues/6217 -# https://github.com/paradigmxyz/reth/issues/8306 -# https://github.com/paradigmxyz/reth/issues/7144 -engine-cancun: - - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) - - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) - - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, VersionedHashes Version, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, Incomplete VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - - Invalid NewPayload, Extra VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) - -sync: [] diff --git a/.github/assets/hive/load_images.sh b/.github/assets/hive/load_images.sh deleted file mode 100755 index 05e1cb9905fae..0000000000000 --- a/.github/assets/hive/load_images.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -# List of tar files to load -IMAGES=( - "/tmp/hiveproxy.tar" - "/tmp/devp2p.tar" - "/tmp/engine.tar" - "/tmp/rpc_compat.tar" - "/tmp/pyspec.tar" - "/tmp/smoke_genesis.tar" - "/tmp/smoke_network.tar" - "/tmp/ethereum_sync.tar" - "/tmp/reth_image.tar" -) - -# Loop through the images and load them -for IMAGE_TAR in "${IMAGES[@]}"; do - echo "Loading image $IMAGE_TAR..." - docker load -i "$IMAGE_TAR" & -done - -wait - -docker image ls -a \ No newline at end of file diff --git a/.github/assets/hive/no_sim_build.diff b/.github/assets/hive/no_sim_build.diff deleted file mode 100644 index 6127a4ecb7325..0000000000000 --- a/.github/assets/hive/no_sim_build.diff +++ /dev/null @@ -1,52 +0,0 @@ -diff --git a/internal/libdocker/builder.go b/internal/libdocker/builder.go -index e4bf99b6..2023f7e2 100644 ---- a/internal/libdocker/builder.go -+++ b/internal/libdocker/builder.go -@@ -8,7 +8,6 @@ import ( - "io" - "io/fs" - "log/slog" -- "os" - "path/filepath" - "slices" - "strings" -@@ -49,25 +48,8 @@ func (b *Builder) BuildClientImage(ctx context.Context, client libhive.ClientDes - - // BuildSimulatorImage builds a docker image of a simulator. - func (b *Builder) BuildSimulatorImage(ctx context.Context, name string, buildArgs map[string]string) (string, error) { -- dir := b.config.Inventory.SimulatorDirectory(name) -- buildContextPath := dir -- buildDockerfile := "Dockerfile" -- -- // build context dir of simulator can be overridden with "hive_context.txt" file containing the desired build path -- if contextPathBytes, err := os.ReadFile(filepath.Join(filepath.FromSlash(dir), "hive_context.txt")); err == nil { -- buildContextPath = filepath.Join(dir, strings.TrimSpace(string(contextPathBytes))) -- if strings.HasPrefix(buildContextPath, "../") { -- return "", fmt.Errorf("cannot access build directory outside of Hive root: %q", buildContextPath) -- } -- if p, err := filepath.Rel(buildContextPath, filepath.Join(filepath.FromSlash(dir), "Dockerfile")); err != nil { -- return "", fmt.Errorf("failed to derive relative simulator Dockerfile path: %v", err) -- } else { -- buildDockerfile = p -- } -- } - tag := fmt.Sprintf("hive/simulators/%s:latest", name) -- err := b.buildImage(ctx, buildContextPath, buildDockerfile, tag, buildArgs) -- return tag, err -+ return tag, nil - } - - // BuildImage creates a container by archiving the given file system, -diff --git a/internal/libdocker/proxy.go b/internal/libdocker/proxy.go -index d3a14ae6..8779671e 100644 ---- a/internal/libdocker/proxy.go -+++ b/internal/libdocker/proxy.go -@@ -16,7 +16,7 @@ const hiveproxyTag = "hive/hiveproxy" - - // Build builds the hiveproxy image. - func (cb *ContainerBackend) Build(ctx context.Context, b libhive.Builder) error { -- return b.BuildImage(ctx, hiveproxyTag, hiveproxy.Source) -+ return nil - } - - // ServeAPI starts the API server. diff --git a/.github/assets/hive/run_simulator.sh b/.github/assets/hive/run_simulator.sh deleted file mode 100755 index 731c94c3f69b3..0000000000000 --- a/.github/assets/hive/run_simulator.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# set -x - -cd hivetests/ - -sim="${1}" -limit="${2}" - -run_hive() { - hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 4 --client reth 2>&1 | tee /tmp/log || true -} - -check_log() { - tail -n 1 /tmp/log | sed -r 's/\x1B\[[0-9;]*[mK]//g' -} - -attempt=0 -max_attempts=5 - -while [ $attempt -lt $max_attempts ]; do - run_hive - - # Check if no tests were run. sed removes ansi colors - if check_log | grep -q "suites=0"; then - echo "no tests were run, retrying in 10 seconds" - sleep 10 - attempt=$((attempt + 1)) - continue - fi - - # Check the last line of the log for "finished", "tests failed", or "test failed" - if check_log | grep -Eq "(finished|tests? failed)"; then - exit 0 - else - exit 1 - fi -done -exit 1 diff --git a/.github/assets/kurtosis_network_params.yaml b/.github/assets/kurtosis_network_params.yaml deleted file mode 100644 index e8cc1b51dc81c..0000000000000 --- a/.github/assets/kurtosis_network_params.yaml +++ /dev/null @@ -1,13 +0,0 @@ -participants: - - el_type: geth - cl_type: lighthouse - - el_type: reth - el_image: "ghcr.io/paradigmxyz/reth:kurtosis-ci" - cl_type: teku -additional_services: - - assertoor -assertoor_params: - run_block_proposal_check: true - run_transaction_test: true - run_blob_transaction_test: true - run_opcodes_transaction_test: true diff --git a/.github/assets/label_pr.js b/.github/assets/label_pr.js deleted file mode 100644 index 16ace2db03270..0000000000000 --- a/.github/assets/label_pr.js +++ /dev/null @@ -1,57 +0,0 @@ -// Filter function for labels we do not want on PRs automatically. -function shouldIncludeLabel (label) { - const isStatus = label.startsWith('S-'); - const isTrackingIssue = label === 'C-tracking-issue'; - const isPreventStale = label === 'M-prevent-stale'; - const isDifficulty = label.startsWith('D-'); - - return !isStatus && !isTrackingIssue && !isPreventStale && !isDifficulty; -} - -// Get the issue number from an issue link in the forms ` ` or ` #`. -function getIssueLink (repoUrl, body) { - const urlPattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) ${repoUrl}/issues/(?\\d+)`, 'i') - const issuePattern = new RegExp(`(close|closes|closed|fix|fixes|fixed|resolve|resolves|resolved) \#(?\\d+)`, 'i') - - const urlRe = body.match(urlPattern); - const issueRe = body.match(issuePattern); - if (urlRe?.groups?.issue_number) { - return urlRe.groups.issue_number - } else { - return issueRe?.groups?.issue_number - } -} - -module.exports = async ({ github, context }) => { - try { - const prNumber = context.payload.pull_request.number; - const prBody = context.payload.pull_request.body; - const repo = context.repo; - - const repoUrl = context.payload.repository.html_url; - const issueNumber = getIssueLink(repoUrl, prBody); - if (!issueNumber) { - console.log('No issue reference found in PR description.'); - return; - } - - const issue = await github.rest.issues.get({ - ...repo, - issue_number: issueNumber, - }); - - const issueLabels = issue.data.labels - .map(label => label.name) - .filter(shouldIncludeLabel); - if (issueLabels.length > 0) { - await github.rest.issues.addLabels({ - ...repo, - issue_number: prNumber, - labels: issueLabels, - }); - } - } catch (err) { - console.error('Failed to label PR'); - console.error(err); - } -} diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 0215bf304c11f..86e12eccfe962 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -1,11 +1,7 @@ # Runs benchmarks. on: - pull_request: - # TODO: Disabled temporarily for https://github.com/CodSpeedHQ/runner/issues/55 - # merge_group: - push: - branches: [main] + workflow_dispatch: env: CARGO_TERM_COLOR: always diff --git a/.github/workflows/compact.yml b/.github/workflows/compact.yml index 37a1588c1dcf7..a94d9f7f0a08b 100644 --- a/.github/workflows/compact.yml +++ b/.github/workflows/compact.yml @@ -17,12 +17,11 @@ env: name: compact-codec jobs: compact-codec: - runs-on: - group: Reth + runs-on: ubuntu-latest strategy: matrix: bin: - - cargo run --bin reth --features "dev" + #- cargo run --bin reth --features "dev" - cargo run --bin op-reth --features "optimism dev" --manifest-path crates/optimism/bin/Cargo.toml steps: - uses: rui314/setup-mold@v1 diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml deleted file mode 100644 index 334df715a2635..0000000000000 --- a/.github/workflows/hive.yml +++ /dev/null @@ -1,232 +0,0 @@ -# Runs `ethereum/hive` tests. - -name: hive - -on: - workflow_dispatch: - schedule: - # run every 12 hours - - cron: "0 */12 * * *" - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - prepare-reth: - uses: ./.github/workflows/prepare-reth.yml - with: - image_tag: ghcr.io/paradigmxyz/reth:latest - binary_name: reth - - prepare-hive: - if: github.repository == 'paradigmxyz/reth' - timeout-minutes: 45 - runs-on: - group: Reth - steps: - - uses: actions/checkout@v4 - - name: Checkout hive tests - uses: actions/checkout@v4 - with: - repository: ethereum/hive - ref: master - path: hivetests - - - uses: actions/setup-go@v5 - with: - go-version: "^1.13.1" - - run: go version - - - name: Build hive assets - run: .github/assets/hive/build_simulators.sh - - - name: Upload hive assets - uses: actions/upload-artifact@v4 - with: - name: hive_assets - path: ./hive_assets - test: - timeout-minutes: 60 - strategy: - fail-fast: false - matrix: - # ethereum/rpc to be deprecated: - # https://github.com/ethereum/hive/pull/1117 - scenario: - - sim: smoke/genesis - - sim: smoke/network - - sim: ethereum/sync - - sim: devp2p - limit: discv4 - - sim: devp2p - limit: eth - include: - # status - - TestStatus - # get block headers - - TestGetBlockHeaders - - TestSimultaneousRequests - - TestSameRequestID - - TestZeroRequestID - # get block bodies - - TestGetBlockBodies - # malicious handshakes + status - - TestMaliciousHandshake - - TestMaliciousStatus - # test transactions - - TestLargeTxRequest - - TestTransaction - - TestInvalidTxs - - TestNewPooledTxs - - TestBlobViolations - - sim: ethereum/engine - limit: engine-exchange-capabilities - - sim: ethereum/engine - limit: engine-withdrawals - - sim: ethereum/engine - limit: engine-auth - - sim: ethereum/engine - limit: engine-api - - sim: ethereum/engine - limit: cancun - # eth_ rpc methods - - sim: ethereum/rpc-compat - include: - - eth_blockNumber - - eth_call - - eth_chainId - - eth_createAccessList - - eth_estimateGas - - eth_feeHistory - - eth_getBalance - - eth_getBlockBy - - eth_getBlockTransactionCountBy - - eth_getCode - - eth_getProof - - eth_getStorage - - eth_getTransactionBy - - eth_getTransactionCount - - eth_getTransactionReceipt - - eth_sendRawTransaction - - eth_syncing - # debug_ rpc methods - - debug_ - # Pyspec cancun jobs - # TODO: uncomment when https://github.com/ethereum/hive/issues/1147 is fixed - #- sim: pyspec - # include: [cancun/eip4844] - #- sim: pyspec - # include: [cancun/eip4788] - #- sim: pyspec - # include: [cancun/eip6780] - #- sim: pyspec - # include: [cancun/eip5656] - #- sim: pyspec - # include: [cancun/eip1153] - #- sim: pyspec - # include: [cancun/eip7516] - # Pyspec shanghai jobs - #- sim: pyspec - # include: [shanghai/eip3651] - #- sim: pyspec - # include: [shanghai/eip3855] - #- sim: pyspec - # include: [shanghai/eip3860] - #- sim: pyspec - # include: [shanghai/eip4895] - # Pyspec merge and earlier jobs - #- sim: pyspec - # include: [merge/] - #- sim: pyspec - # include: [berlin/] - #- sim: pyspec - # include: [istanbul/] - #- sim: pyspec - # include: [homestead/] - #- sim: pyspec - # include: [frontier/] - needs: - - prepare-reth - - prepare-hive - name: run ${{ matrix.scenario.sim }}${{ matrix.scenario.limit && format(' - {0}', matrix.scenario.limit) }} - runs-on: - group: Reth - permissions: - issues: write - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Download hive assets - uses: actions/download-artifact@v4 - with: - name: hive_assets - path: /tmp - - - name: Download reth image - uses: actions/download-artifact@v4 - with: - name: artifacts - path: /tmp - - - name: Load Docker images - run: .github/assets/hive/load_images.sh - - - name: Move hive binary - run: | - mv /tmp/hive /usr/local/bin - chmod +x /usr/local/bin/hive - - - name: Checkout hive tests - uses: actions/checkout@v4 - with: - repository: ethereum/hive - ref: master - path: hivetests - - - name: Run simulator - run: | - LIMIT="${{ matrix.scenario.limit }}" - TESTS="${{ join(matrix.scenario.include, '|') }}" - if [ -n "$LIMIT" ] && [ -n "$TESTS" ]; then - FILTER="$LIMIT/$TESTS" - elif [ -n "$LIMIT" ]; then - FILTER="$LIMIT" - elif [ -n "$TESTS" ]; then - FILTER="/$TESTS" - else - FILTER="/" - fi - echo "filter: $FILTER" - .github/assets/hive/run_simulator.sh "${{ matrix.scenario.sim }}" "$FILTER" - - - name: Parse hive output - run: | - find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml - - - name: Print simulator output - if: ${{ failure() }} - run: | - cat hivetests/workspace/logs/*simulator*.log - - - name: Print reth client logs - if: ${{ failure() }} - run: | - cat hivetests/workspace/logs/reth/client-*.log - notify-on-error: - needs: test - if: failure() - runs-on: - group: Reth - steps: - - name: Slack Webhook Action - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_COLOR: ${{ job.status }} - SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 2156710fb57a4..0958048dd0352 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -19,13 +19,15 @@ concurrency: jobs: test: name: test / ${{ matrix.network }} - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 strategy: matrix: - network: ["ethereum", "optimism"] + network: [ + #"ethereum", + "optimism" + ] timeout-minutes: 60 steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op.yml index 85a8e706c6ebf..5a6d65941ee83 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op.yml @@ -4,9 +4,6 @@ name: kurtosis-op on: workflow_dispatch: - schedule: - # run every 12 hours - - cron: "0 */12 * * *" env: CARGO_TERM_COLOR: always @@ -29,8 +26,7 @@ jobs: strategy: fail-fast: false name: run kurtosis - runs-on: - group: Reth + runs-on: ubuntu-latest needs: - prepare-reth steps: @@ -84,8 +80,7 @@ jobs: notify-on-error: needs: test if: failure() - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - name: Slack Webhook Action uses: rtCamp/action-slack-notify@v2 diff --git a/.github/workflows/kurtosis.yml b/.github/workflows/kurtosis.yml deleted file mode 100644 index ab0c95939c8e1..0000000000000 --- a/.github/workflows/kurtosis.yml +++ /dev/null @@ -1,67 +0,0 @@ -# Runs `assertoor` tests on a `kurtosis` testnet. - -name: kurtosis - -on: - workflow_dispatch: - schedule: - # run every 12 hours - - cron: "0 */12 * * *" - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - prepare-reth: - uses: ./.github/workflows/prepare-reth.yml - with: - image_tag: ghcr.io/paradigmxyz/reth:kurtosis-ci - binary_name: reth - - test: - timeout-minutes: 60 - strategy: - fail-fast: false - name: run kurtosis - runs-on: - group: Reth - needs: - - prepare-reth - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Download reth image - uses: actions/download-artifact@v4 - with: - name: artifacts - path: /tmp - - - name: Load Docker image - run: | - docker load -i /tmp/reth_image.tar & - wait - docker image ls -a - - - name: Run kurtosis - uses: ethpandaops/kurtosis-assertoor-github-action@v1 - with: - ethereum_package_args: '.github/assets/kurtosis_network_params.yaml' - - notify-on-error: - needs: test - if: failure() - runs-on: - group: Reth - steps: - - name: Slack Webhook Action - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_COLOR: ${{ job.status }} - SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml deleted file mode 100644 index 07727173531be..0000000000000 --- a/.github/workflows/label-pr.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Label PRs - -on: - pull_request: - types: [opened] - -jobs: - label_prs: - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Label PRs - uses: actions/github-script@v7 - with: - script: | - const label_pr = require('./.github/assets/label_pr.js') - await label_pr({github, context}) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 7a156638fe5cc..96f06225b7c78 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,9 +17,9 @@ jobs: strategy: matrix: include: - - type: ethereum - args: --bin reth --workspace --lib --examples --tests --benches --locked - features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" + #- type: ethereum + # args: --bin reth --workspace --lib --examples --tests --benches --locked + # features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" - type: optimism args: --bin op-reth --workspace --lib --examples --tests --benches --locked features: "optimism asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" @@ -120,8 +120,8 @@ jobs: strategy: matrix: include: - - binary: reth - network: ethereum + #- binary: reth + # network: ethereum - binary: op-reth network: optimism steps: diff --git a/.github/workflows/prepare-reth.yml b/.github/workflows/prepare-reth.yml index 4fadef1265ad5..b71ecac620853 100644 --- a/.github/workflows/prepare-reth.yml +++ b/.github/workflows/prepare-reth.yml @@ -24,10 +24,9 @@ on: jobs: prepare-reth: - if: github.repository == 'paradigmxyz/reth' + if: github.repository == 'ethereum-optimism/op-reth' timeout-minutes: 45 - runs-on: - group: Reth + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - run: mkdir artifacts diff --git a/.github/workflows/release-dist.yml b/.github/workflows/release-dist.yml deleted file mode 100644 index f7df80e81f9f6..0000000000000 --- a/.github/workflows/release-dist.yml +++ /dev/null @@ -1,20 +0,0 @@ -# This workflow auto-publishes Reth to external package managers such as -# Homebrew when a release is published. - -name: release externally - -on: - release: - types: [published] - -jobs: - release-homebrew: - runs-on: ubuntu-latest - steps: - - name: Update Homebrew formula - uses: dawidd6/action-homebrew-bump-formula@v4 - with: - token: ${{ secrets.HOMEBREW }} - no_fork: true - tap: paradigmxyz/brew - formula: reth diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index a19ca08d7162d..383664d3cd173 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -3,7 +3,7 @@ name: sync test on: - merge_group: + workflow_dispatch: env: CARGO_TERM_COLOR: always @@ -15,8 +15,7 @@ concurrency: jobs: sync: name: sync (${{ matrix.chain.bin }}) - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -24,12 +23,12 @@ jobs: strategy: matrix: chain: - - build: install - bin: reth - chain: mainnet - tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4" - block: 100000 - unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a" + #- build: install + # bin: reth + # chain: mainnet + # tip: "0x91c90676cab257a59cd956d7cb0bceb9b1a71d79755c23c7277a0697ccfaf8c4" + # block: 100000 + # unwind-target: "0x52e0509d33a988ef807058e2980099ee3070187f7333aae12b64d4d675f34c5a" - build: install-op bin: op-reth chain: base diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 5fff6721b85b5..b1bdbafe00729 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -19,21 +19,20 @@ concurrency: jobs: test: name: test / ${{ matrix.type }} (${{ matrix.partition }}/${{ matrix.total_partitions }}) - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 strategy: matrix: include: - - type: ethereum - args: --features "asm-keccak ethereum" --locked - partition: 1 - total_partitions: 2 - - type: ethereum - args: --features "asm-keccak ethereum" --locked - partition: 2 - total_partitions: 2 + #- type: ethereum + # args: --features "asm-keccak ethereum" --locked + # partition: 1 + # total_partitions: 2 + #- type: ethereum + # args: --features "asm-keccak ethereum" --locked + # partition: 2 + # total_partitions: 2 - type: optimism args: --features "asm-keccak optimism" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum" partition: 1 @@ -69,8 +68,8 @@ jobs: state: name: Ethereum state tests - runs-on: - group: Reth + if: false + runs-on: ubuntu-latest env: RUST_LOG: info,sync=error RUST_BACKTRACE: 1 @@ -95,14 +94,16 @@ jobs: doc: name: doc tests (${{ matrix.network }}) - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 timeout-minutes: 30 strategy: matrix: - network: ["ethereum", "optimism"] + network: [ + #"ethereum", + "optimism" + ] steps: - uses: actions/checkout@v4 - uses: rui314/setup-mold@v1 @@ -117,7 +118,11 @@ jobs: name: unit success runs-on: ubuntu-latest if: always() - needs: [test, state, doc] + needs: [ + test, + #state, + doc + ] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index d79c493799239..6782eb520c7ac 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -6,28 +6,27 @@ on: push: branches: [main] pull_request: - branches: [main] + branches: [optimism] merge_group: jobs: - check-reth: - runs-on: ubuntu-20.04 - timeout-minutes: 60 + #check-reth: + # runs-on: ubuntu-20.04 + # timeout-minutes: 60 - steps: - - uses: actions/checkout@v4 - - uses: rui314/setup-mold@v1 - - uses: dtolnay/rust-toolchain@stable - with: - target: x86_64-pc-windows-gnu - - uses: taiki-e/install-action@cross - - uses: Swatinem/rust-cache@v2 - with: - cache-on-failure: true - - name: mingw-w64 - run: sudo apt-get install -y mingw-w64 - - name: Check Reth - run: cargo check --target x86_64-pc-windows-gnu + # steps: + # - uses: actions/checkout@v4 + # - uses: dtolnay/rust-toolchain@stable + # with: + # target: x86_64-pc-windows-gnu + # - uses: taiki-e/install-action@cross + # - uses: Swatinem/rust-cache@v2 + # with: + # cache-on-failure: true + # - name: mingw-w64 + # run: sudo apt-get install -y mingw-w64 + # - name: Check Reth + # run: cargo check --target x86_64-pc-windows-gnu check-op-reth: runs-on: ubuntu-20.04 diff --git a/Cargo.lock b/Cargo.lock index 1aa711563b322..256f538b176d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -106,9 +106,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.1.53" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da226340862e036ab26336dc99ca85311c6b662267c1440e1733890fd688802c" +checksum = "56f15afc5993458b42739ab3b69bdb6b4c8112acd3997dbea9bc092c9517137c" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -460,7 +460,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -700,7 +700,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -716,7 +716,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", "syn-solidity", "tiny-keccak", ] @@ -732,7 +732,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", "syn-solidity", ] @@ -938,7 +938,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -1136,7 +1136,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -1172,18 +1172,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "1b1244b10dcd56c92219da4e14caa97e312079e185f04ba3eea25061561dc0a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -1221,7 +1221,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -1327,7 +1327,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -1510,7 +1510,7 @@ checksum = "9fd3f870829131332587f607a7ff909f1af5fc523fd1b192db55fbbdf52e8d3c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", "synstructure", ] @@ -1632,7 +1632,7 @@ checksum = "3fa76293b4f7bb636ab88fd78228235b5248b4d05cc589aed610f954af5d7c7a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -1821,9 +1821,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.24" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9560b07a799281c7e0958b9296854d6fafd4c5f31444a7e5bb1ad6dde5ccf1bd" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -1831,9 +1831,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.24" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874e0dd3eb68bf99058751ac9712f622e61e6f393a94f7128fa26e3f02f5c7cd" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -1843,14 +1843,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2326,7 +2326,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2350,7 +2350,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2361,7 +2361,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2470,7 +2470,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2491,7 +2491,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", "unicode-xid", ] @@ -2605,7 +2605,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2751,7 +2751,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2762,7 +2762,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2782,7 +2782,7 @@ checksum = "3bf679796c0322556351f287a51b49e48f7c4986e727b5dd78c972d30e2e16cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -2816,9 +2816,9 @@ dependencies = [ [[package]] name = "ethereum_ssz" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862e41ea8eea7508f70cfd8cd560f0c34bb0af37c719a8e06c2672f0f031d8e5" +checksum = "036c84bd29bff35e29bbee3c8fc0e2fb95db12b6f2f3cae82a827fbc97256f3a" dependencies = [ "alloy-primitives", "ethereum_serde_utils", @@ -2831,14 +2831,14 @@ dependencies = [ [[package]] name = "ethereum_ssz_derive" -version = "0.8.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d31ecf6640112f61dc34b4d8359c081102969af0edd18381fed2052f6db6a410" +checksum = "9dc8e67e1f770f5aa4c2c2069aaaf9daee7ac21bed357a71b911b37a58966cfb" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -3411,7 +3411,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -4093,7 +4093,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -4150,7 +4150,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -4271,7 +4271,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -4520,7 +4520,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -4915,19 +4915,19 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.16.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12779523996a67c13c84906a876ac6fe4d07a6e1adb54978378e13f199251a62" +checksum = "85b6f8152da6d7892ff1b7a1c0fa3f435e92b5918ad67035c3bb432111d9a29b" dependencies = [ "base64 0.22.1", "indexmap 2.7.0", "metrics", - "metrics-util", + "metrics-util 0.18.0", "quanta", "thiserror 1.0.69", ] @@ -4948,6 +4948,20 @@ dependencies = [ "windows 0.58.0", ] +[[package]] +name = "metrics-util" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15b482df36c13dd1869d73d14d28cd4855fbd6cfc32294bee109908a9f4a4ed7" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", + "hashbrown 0.15.2", + "metrics", + "quanta", + "sketches-ddsketch", +] + [[package]] name = "metrics-util" version = "0.19.0" @@ -4960,7 +4974,6 @@ dependencies = [ "indexmap 2.7.0", "metrics", "ordered-float", - "quanta", "rand 0.8.5", "rand_xoshiro", "sketches-ddsketch", @@ -5064,16 +5077,16 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.10" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +checksum = "23db87a7f248211f6a7c8644a1b750541f8a4c68ae7de0f908860e44c0c201f6" dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", "loom", "parking_lot", - "portable-atomic", + "quanta", "rustc_version 0.4.1", "smallvec", "tagptr", @@ -5305,7 +5318,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -5319,9 +5332,9 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.3.4" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +checksum = "a3409fc85ac27b27d971ea7cd1aabafd2eefa6de7e481c8d4f707225c117e81a" dependencies = [ "alloy-rlp", "arbitrary", @@ -5358,9 +5371,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "op-alloy-consensus" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "250244eadaf1a25e0e2ad263110ad2d1b43c2e57ddf4c025e71552d98196a8d3" +checksum = "0adb232ec805af3aa35606c19329aa7dc44c4457ae318ed0b8fc7f799dd7dbfe" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5376,9 +5389,9 @@ dependencies = [ [[package]] name = "op-alloy-genesis" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98334a9cdccc5878e9d5c48afc9cc1b84da58dbc68d41f9488d8f71688b495d3" +checksum = "84c272cfd65317538f5815c2b7059445230b050d48ebe2d0bab3e861d419a785" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5391,9 +5404,9 @@ dependencies = [ [[package]] name = "op-alloy-network" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd588157ac14db601d6497b81ae738b2581c60886fc592976fab6c282619604" +checksum = "19872a58b7acceeffb8e88ea048bee1690e7cde53068bd652976435d61fcd1de" dependencies = [ "alloy-consensus", "alloy-network", @@ -5406,9 +5419,9 @@ dependencies = [ [[package]] name = "op-alloy-protocol" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "753762429c31f838b59c886b31456c9bf02fd38fb890621665523a9087ae06ae" +checksum = "ad65d040648e0963ed378e88489f5805e24fb56b7e6611362299cd4c24debeb2" dependencies = [ "alloc-no-stdlib", "alloy-consensus", @@ -5416,10 +5429,9 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "alloy-serde", - "alloy-sol-types", "async-trait", "brotli", - "derive_more", + "cfg-if", "miniz_oxide", "op-alloy-consensus", "op-alloy-genesis", @@ -5431,9 +5443,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f483fb052ef807682ae5b5729c3a61a092ee4f7334e6e6055de67e9f28ef880" +checksum = "36b1f2547067c5b60f3144ae1033a54ce1d11341d8327fa8f203b048d51465e9" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5444,9 +5456,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1d3872021aa28b10fc6cf8252e792e802d89e8b2cdaa57dcb9243c461b286" +checksum = "e68d1a51fe3ee143f102b82f54fa237f21d12635da363276901e6d3ef6c65b7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -5462,9 +5474,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.9.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43f00d4060a6a38f5bf0a8182b4cc4c7071e2bc96942f414619251b522169eb" +checksum = "9f8833ef149ceb74f8f25a79801d110d88ec2db32e700fa10db6c5f5b5cbb71a" dependencies = [ "alloy-eips", "alloy-primitives", @@ -5664,9 +5676,9 @@ dependencies = [ [[package]] name = "phf" -version = "0.11.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" dependencies = [ "phf_macros", "phf_shared", @@ -5674,9 +5686,9 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" dependencies = [ "phf_shared", "rand 0.8.5", @@ -5684,51 +5696,51 @@ dependencies = [ [[package]] name = "phf_macros" -version = "0.11.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" dependencies = [ "phf_generator", "phf_shared", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] name = "phf_shared" -version = "0.11.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -5863,12 +5875,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.27" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "483f8c21f64f3ea09fe0f30f5d48c3e8eefe5dac9129f0075f76593b4c1da705" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -5919,7 +5931,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -6017,7 +6029,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -6728,7 +6740,7 @@ dependencies = [ "proc-macro2", "quote", "similar-asserts", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -7487,7 +7499,7 @@ dependencies = [ "auto_impl", "futures-util", "metrics", - "metrics-util", + "metrics-util 0.19.0", "parking_lot", "reth-chainspec", "reth-consensus", @@ -8143,7 +8155,7 @@ dependencies = [ "metrics", "metrics-exporter-prometheus", "metrics-process", - "metrics-util", + "metrics-util 0.19.0", "procfs 0.16.0", "reqwest", "reth-metrics", @@ -9710,7 +9722,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.95", + "syn 2.0.94", "unicode-ident", ] @@ -9794,9 +9806,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.43" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.6.0", "errno", @@ -9842,7 +9854,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework 3.1.0", ] [[package]] @@ -10038,9 +10050,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.2.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "81d3f8c9bfcc3cbb6b0179eb57042d75b1582bdc65c3cb95f3fa999509c03cbc" dependencies = [ "bitflags 2.6.0", "core-foundation 0.10.0", @@ -10051,9 +10063,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" dependencies = [ "core-foundation-sys", "libc", @@ -10115,14 +10127,14 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] name = "serde_json" -version = "1.0.135" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" +checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" dependencies = [ "indexmap 2.7.0", "itoa", @@ -10150,7 +10162,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10201,7 +10213,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10234,7 +10246,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10391,9 +10403,9 @@ dependencies = [ [[package]] name = "siphasher" -version = "1.0.1" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" [[package]] name = "sketches-ddsketch" @@ -10536,7 +10548,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10594,9 +10606,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.95" +version = "2.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" +checksum = "987bc0be1cdea8b10216bd06e2ca407d40b9543468fafd3ddfb02f36e77f71f3" dependencies = [ "proc-macro2", "quote", @@ -10612,7 +10624,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10632,7 +10644,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10710,7 +10722,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10758,7 +10770,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10769,7 +10781,7 @@ checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -10926,7 +10938,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -11127,7 +11139,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -11463,7 +11475,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -11533,7 +11545,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", "wasm-bindgen-shared", ] @@ -11568,7 +11580,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -11734,7 +11746,7 @@ checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -11745,7 +11757,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -11756,7 +11768,7 @@ checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -11767,7 +11779,7 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -12042,7 +12054,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", "synstructure", ] @@ -12064,7 +12076,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -12084,7 +12096,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", "synstructure", ] @@ -12105,7 +12117,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] @@ -12127,7 +12139,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.94", ] [[package]] From eee75099ef70d4f4284d8fb3043d2088b8a590f8 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 Jan 2025 01:02:36 +0100 Subject: [PATCH 104/113] fix(ci): `prepare-reth` worfklow (#21) **Description** Adds back hive assets reused in workflow `kurtosis-op`. **Tests** **Additional context** https://github.com/ethereum-optimism/op-reth/actions/runs/12640451509/job/35220890751 **Metadata** --- .github/assets/hive/Dockerfile | 9 ++++ .github/assets/hive/build_simulators.sh | 38 ++++++++++++++++ .github/assets/hive/expected_failures.yaml | 53 ++++++++++++++++++++++ .github/assets/hive/load_images.sh | 25 ++++++++++ .github/assets/hive/no_sim_build.diff | 52 +++++++++++++++++++++ .github/assets/hive/run_simulator.sh | 38 ++++++++++++++++ 6 files changed, 215 insertions(+) create mode 100644 .github/assets/hive/Dockerfile create mode 100755 .github/assets/hive/build_simulators.sh create mode 100644 .github/assets/hive/expected_failures.yaml create mode 100755 .github/assets/hive/load_images.sh create mode 100644 .github/assets/hive/no_sim_build.diff create mode 100755 .github/assets/hive/run_simulator.sh diff --git a/.github/assets/hive/Dockerfile b/.github/assets/hive/Dockerfile new file mode 100644 index 0000000000000..25b71bf218724 --- /dev/null +++ b/.github/assets/hive/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu + +COPY dist/reth /usr/local/bin + +COPY LICENSE-* ./ + +EXPOSE 30303 30303/udp 9001 8545 8546 +ENV RUST_LOG=debug +ENTRYPOINT ["/usr/local/bin/reth"] diff --git a/.github/assets/hive/build_simulators.sh b/.github/assets/hive/build_simulators.sh new file mode 100755 index 0000000000000..b33f4db4ee79b --- /dev/null +++ b/.github/assets/hive/build_simulators.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +set -eo pipefail + +# Create the hive_assets directory +mkdir hive_assets/ + +cd hivetests +go build . + +./hive -client reth # first builds and caches the client + +# Run each hive command in the background for each simulator and wait +echo "Building images" +./hive -client reth --sim "pyspec" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/engine" -sim.timelimit 1s || true & +./hive -client reth --sim "devp2p" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/rpc-compat" -sim.timelimit 1s || true & +./hive -client reth --sim "smoke/genesis" -sim.timelimit 1s || true & +./hive -client reth --sim "smoke/network" -sim.timelimit 1s || true & +./hive -client reth --sim "ethereum/sync" -sim.timelimit 1s || true & +wait + +# Run docker save in parallel and wait +echo "Saving images" +docker save hive/hiveproxy:latest -o ../hive_assets/hiveproxy.tar & +docker save hive/simulators/devp2p:latest -o ../hive_assets/devp2p.tar & +docker save hive/simulators/ethereum/engine:latest -o ../hive_assets/engine.tar & +docker save hive/simulators/ethereum/rpc-compat:latest -o ../hive_assets/rpc_compat.tar & +docker save hive/simulators/ethereum/pyspec:latest -o ../hive_assets/pyspec.tar & +docker save hive/simulators/smoke/genesis:latest -o ../hive_assets/smoke_genesis.tar & +docker save hive/simulators/smoke/network:latest -o ../hive_assets/smoke_network.tar & +docker save hive/simulators/ethereum/sync:latest -o ../hive_assets/ethereum_sync.tar & +wait + +# Make sure we don't rebuild images on the CI jobs +git apply ../.github/assets/hive/no_sim_build.diff +go build . +mv ./hive ../hive_assets/ diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml new file mode 100644 index 0000000000000..c1c7ff71cf8e9 --- /dev/null +++ b/.github/assets/hive/expected_failures.yaml @@ -0,0 +1,53 @@ +# https://github.com/paradigmxyz/reth/issues/7015 +# https://github.com/paradigmxyz/reth/issues/6332 +rpc-compat: + - debug_getRawBlock/get-invalid-number (reth) + - debug_getRawHeader/get-invalid-number (reth) + - debug_getRawReceipts/get-invalid-number (reth) + - debug_getRawTransaction/get-invalid-hash (reth) + + - eth_call/call-callenv (reth) + - eth_feeHistory/fee-history (reth) + - eth_getStorageAt/get-storage-invalid-key-too-large (reth) + - eth_getStorageAt/get-storage-invalid-key (reth) + - eth_getTransactionReceipt/get-access-list (reth) + - eth_getTransactionReceipt/get-blob-tx (reth) + - eth_getTransactionReceipt/get-dynamic-fee (reth) + +# https://github.com/paradigmxyz/reth/issues/8732 +engine-withdrawals: + - Withdrawals Fork On Genesis (Paris) (reth) + - Withdrawals Fork on Block 1 (Paris) (reth) + - Withdrawals Fork on Block 2 (Paris) (reth) + - Withdrawals Fork on Block 3 (Paris) (reth) + - Withdraw to a single account (Paris) (reth) + - Withdraw to two accounts (Paris) (reth) + - Withdraw many accounts (Paris) (reth) + - Withdraw zero amount (Paris) (reth) + - Empty Withdrawals (Paris) (reth) + - Corrupted Block Hash Payload (INVALID) (Paris) (reth) + - Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload (Paris) (reth) + - Withdrawals Fork on Block 1 - 8 Block Re-Org, Sync (Paris) (reth) + - Withdrawals Fork on Block 8 - 10 Block Re-Org NewPayload (Paris) (reth) + - Withdrawals Fork on Block 8 - 10 Block Re-Org Sync (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org Sync (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org (Paris) (reth) + - Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync (Paris) (reth) + +engine-api: [] + +# https://github.com/paradigmxyz/reth/issues/8305 +# https://github.com/paradigmxyz/reth/issues/6217 +# https://github.com/paradigmxyz/reth/issues/8306 +# https://github.com/paradigmxyz/reth/issues/7144 +engine-cancun: + - Blob Transaction Ordering, Multiple Clients (Cancun) (reth) + - Invalid PayloadAttributes, Missing BeaconRoot, Syncing=True (Cancun) (reth) + - Invalid NewPayload, ExcessBlobGas, Syncing=True, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, VersionedHashes Version, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, Incomplete VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + - Invalid NewPayload, Extra VersionedHashes, Syncing=False, EmptyTxs=False, DynFeeTxs=False (Cancun) (reth) + +sync: [] diff --git a/.github/assets/hive/load_images.sh b/.github/assets/hive/load_images.sh new file mode 100755 index 0000000000000..05e1cb9905fae --- /dev/null +++ b/.github/assets/hive/load_images.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -eo pipefail + +# List of tar files to load +IMAGES=( + "/tmp/hiveproxy.tar" + "/tmp/devp2p.tar" + "/tmp/engine.tar" + "/tmp/rpc_compat.tar" + "/tmp/pyspec.tar" + "/tmp/smoke_genesis.tar" + "/tmp/smoke_network.tar" + "/tmp/ethereum_sync.tar" + "/tmp/reth_image.tar" +) + +# Loop through the images and load them +for IMAGE_TAR in "${IMAGES[@]}"; do + echo "Loading image $IMAGE_TAR..." + docker load -i "$IMAGE_TAR" & +done + +wait + +docker image ls -a \ No newline at end of file diff --git a/.github/assets/hive/no_sim_build.diff b/.github/assets/hive/no_sim_build.diff new file mode 100644 index 0000000000000..6127a4ecb7325 --- /dev/null +++ b/.github/assets/hive/no_sim_build.diff @@ -0,0 +1,52 @@ +diff --git a/internal/libdocker/builder.go b/internal/libdocker/builder.go +index e4bf99b6..2023f7e2 100644 +--- a/internal/libdocker/builder.go ++++ b/internal/libdocker/builder.go +@@ -8,7 +8,6 @@ import ( + "io" + "io/fs" + "log/slog" +- "os" + "path/filepath" + "slices" + "strings" +@@ -49,25 +48,8 @@ func (b *Builder) BuildClientImage(ctx context.Context, client libhive.ClientDes + + // BuildSimulatorImage builds a docker image of a simulator. + func (b *Builder) BuildSimulatorImage(ctx context.Context, name string, buildArgs map[string]string) (string, error) { +- dir := b.config.Inventory.SimulatorDirectory(name) +- buildContextPath := dir +- buildDockerfile := "Dockerfile" +- +- // build context dir of simulator can be overridden with "hive_context.txt" file containing the desired build path +- if contextPathBytes, err := os.ReadFile(filepath.Join(filepath.FromSlash(dir), "hive_context.txt")); err == nil { +- buildContextPath = filepath.Join(dir, strings.TrimSpace(string(contextPathBytes))) +- if strings.HasPrefix(buildContextPath, "../") { +- return "", fmt.Errorf("cannot access build directory outside of Hive root: %q", buildContextPath) +- } +- if p, err := filepath.Rel(buildContextPath, filepath.Join(filepath.FromSlash(dir), "Dockerfile")); err != nil { +- return "", fmt.Errorf("failed to derive relative simulator Dockerfile path: %v", err) +- } else { +- buildDockerfile = p +- } +- } + tag := fmt.Sprintf("hive/simulators/%s:latest", name) +- err := b.buildImage(ctx, buildContextPath, buildDockerfile, tag, buildArgs) +- return tag, err ++ return tag, nil + } + + // BuildImage creates a container by archiving the given file system, +diff --git a/internal/libdocker/proxy.go b/internal/libdocker/proxy.go +index d3a14ae6..8779671e 100644 +--- a/internal/libdocker/proxy.go ++++ b/internal/libdocker/proxy.go +@@ -16,7 +16,7 @@ const hiveproxyTag = "hive/hiveproxy" + + // Build builds the hiveproxy image. + func (cb *ContainerBackend) Build(ctx context.Context, b libhive.Builder) error { +- return b.BuildImage(ctx, hiveproxyTag, hiveproxy.Source) ++ return nil + } + + // ServeAPI starts the API server. diff --git a/.github/assets/hive/run_simulator.sh b/.github/assets/hive/run_simulator.sh new file mode 100755 index 0000000000000..731c94c3f69b3 --- /dev/null +++ b/.github/assets/hive/run_simulator.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash +# set -x + +cd hivetests/ + +sim="${1}" +limit="${2}" + +run_hive() { + hive --sim "${sim}" --sim.limit "${limit}" --sim.parallelism 4 --client reth 2>&1 | tee /tmp/log || true +} + +check_log() { + tail -n 1 /tmp/log | sed -r 's/\x1B\[[0-9;]*[mK]//g' +} + +attempt=0 +max_attempts=5 + +while [ $attempt -lt $max_attempts ]; do + run_hive + + # Check if no tests were run. sed removes ansi colors + if check_log | grep -q "suites=0"; then + echo "no tests were run, retrying in 10 seconds" + sleep 10 + attempt=$((attempt + 1)) + continue + fi + + # Check the last line of the log for "finished", "tests failed", or "test failed" + if check_log | grep -Eq "(finished|tests? failed)"; then + exit 0 + else + exit 1 + fi +done +exit 1 From c2525e62b1fb3a93899c1eb0d51360badda0252f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 7 Jan 2025 03:09:22 +0100 Subject: [PATCH 105/113] chore(ci): disable stale issue check (#22) **Description** Closes https://github.com/ethereum-optimism/op-reth/issues/20 **Tests** **Additional context** **Metadata** --- .github/workflows/stale.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 38cca2fb1a9b8..212246a4edaf7 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -3,9 +3,7 @@ name: stale issues on: - workflow_dispatch: {} - schedule: - - cron: "30 1 * * *" + workflow_dispatch: jobs: close-issues: From 65fd0b83ab1a53c3c7af45e9feec6db452775d1f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Tue, 21 Jan 2025 20:24:34 +0100 Subject: [PATCH 106/113] fix(docs): revert overwrite pull #24 (#26) --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 54d9f98e7a351..9e99625df4cd9 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -# reth +# op-reth development lab + +## For production op-reth code visit: https://github.com/paradigmxyz/reth [![CI status](https://github.com/paradigmxyz/reth/workflows/unit/badge.svg)][gh-ci] [![cargo-deny status](https://github.com/paradigmxyz/reth/workflows/deny/badge.svg)][gh-deny] From b61f096d45d8d973c8b23f4099522e988b4e2c46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1n=20Jakub=20Nani=C5=A1ta?= Date: Wed, 22 Jan 2025 08:59:41 -0800 Subject: [PATCH 107/113] chore: Add the ability to run kurtosis against local images in a workflow (#28) --- .github/assets/hive/load_images.sh | 2 +- ... => kurtosis_op_network_params_local.yaml} | 3 + .../kurtosis_op_network_params_remote.yaml | 18 +++ .github/workflows/kurtosis-op-local.yml | 142 ++++++++++++++++++ ...kurtosis-op.yml => kurtosis-op-remote.yml} | 24 +-- .github/workflows/prepare-op-image.yml | 81 ++++++++++ .../{prepare-reth.yml => prepare-op-reth.yml} | 8 +- 7 files changed, 263 insertions(+), 15 deletions(-) rename .github/assets/{kurtosis_op_network_params.yaml => kurtosis_op_network_params_local.yaml} (65%) create mode 100644 .github/assets/kurtosis_op_network_params_remote.yaml create mode 100644 .github/workflows/kurtosis-op-local.yml rename .github/workflows/{kurtosis-op.yml => kurtosis-op-remote.yml} (85%) create mode 100644 .github/workflows/prepare-op-image.yml rename .github/workflows/{prepare-reth.yml => prepare-op-reth.yml} (91%) diff --git a/.github/assets/hive/load_images.sh b/.github/assets/hive/load_images.sh index 05e1cb9905fae..60486c53cb786 100755 --- a/.github/assets/hive/load_images.sh +++ b/.github/assets/hive/load_images.sh @@ -11,7 +11,7 @@ IMAGES=( "/tmp/smoke_genesis.tar" "/tmp/smoke_network.tar" "/tmp/ethereum_sync.tar" - "/tmp/reth_image.tar" + "/tmp/op_reth_image.tar" ) # Loop through the images and load them diff --git a/.github/assets/kurtosis_op_network_params.yaml b/.github/assets/kurtosis_op_network_params_local.yaml similarity index 65% rename from .github/assets/kurtosis_op_network_params.yaml rename to .github/assets/kurtosis_op_network_params_local.yaml index 0e1516cc88903..b8f56973d71b9 100644 --- a/.github/assets/kurtosis_op_network_params.yaml +++ b/.github/assets/kurtosis_op_network_params_local.yaml @@ -6,10 +6,13 @@ optimism_package: chains: - participants: - el_type: op-geth + el_image: "ghcr.io/ethereum-optimism/op-geth:kurtosis-ci" cl_type: op-node + cl_image: "ghcr.io/ethereum-optimism/op-node:kurtosis-ci" - el_type: op-reth el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" cl_type: op-node + cl_image: "ghcr.io/ethereum-optimism/op-node:kurtosis-ci" batcher_params: extra_params: - "--throttle-interval=0" diff --git a/.github/assets/kurtosis_op_network_params_remote.yaml b/.github/assets/kurtosis_op_network_params_remote.yaml new file mode 100644 index 0000000000000..285f1255d15da --- /dev/null +++ b/.github/assets/kurtosis_op_network_params_remote.yaml @@ -0,0 +1,18 @@ +ethereum_package: + participants: + - el_type: reth + cl_type: lighthouse +optimism_package: + chains: + - participants: + - el_type: op-geth + el_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:latest + cl_type: op-node + cl_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop + - el_type: op-reth + el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" + cl_type: op-node + cl_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop + batcher_params: + extra_params: + - "--throttle-interval=0" diff --git a/.github/workflows/kurtosis-op-local.yml b/.github/workflows/kurtosis-op-local.yml new file mode 100644 index 0000000000000..70320721bf589 --- /dev/null +++ b/.github/workflows/kurtosis-op-local.yml @@ -0,0 +1,142 @@ +# Runs simple OP stack setup in Kurtosis + +name: Run kurtosis (local images) + +on: + workflow_dispatch: + inputs: + op-node-repo: + type: string + required: true + description: "An optimizm repo (clone) to use for op-node" + default: https://github.com/ethereum-optimism/optimism.git + + op-node-ref: + type: string + required: true + description: "op-node git ref (branch/commit) to use" + default: develop + + op-geth-repo: + type: string + required: true + description: "An op-geth repo (clone) to use for op-geth" + default: https://github.com/ethereum-optimism/op-geth.git + + op-geth-ref: + type: string + required: true + description: "op-geth git ref (branch/commit) to use" + default: optimism + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + prepare-op-geth: + name: Prepare op-geth docker image + uses: ./.github/workflows/prepare-op-image.yml + with: + repo: ${{ inputs.op-geth-repo }} + ref: ${{ inputs.op-geth-ref }} + image_tag: ghcr.io/ethereum-optimism/op-geth:kurtosis-ci + artifact_name: op_geth_image + + prepare-op-node: + name: Prepare op-node docker image + uses: ./.github/workflows/prepare-op-image.yml + with: + repo: ${{ inputs.op-node-repo }} + ref: ${{ inputs.op-node-ref }} + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-node-target + image_tag: ghcr.io/ethereum-optimism/op-node:kurtosis-ci + artifact_name: op_node_image + + prepare-op-reth: + name: Prepare op-reth docker image + uses: ./.github/workflows/prepare-op-reth.yml + with: + image_tag: ghcr.io/paradigmxyz/op-reth:kurtosis-ci + binary_name: op-reth + cargo_features: optimism,asm-keccak + cargo_package: crates/optimism/bin/Cargo.toml + + test: + timeout-minutes: 60 + strategy: + fail-fast: false + name: run kurtosis + runs-on: ubuntu-latest + needs: + - prepare-op-reth + - prepare-op-geth + - prepare-op-node + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download docker image artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts--* + merge-multiple: true + path: /tmp + + - name: Load Docker images + run: | + # Load all images from artifacts + docker load -i /tmp/op_reth_image.tar + docker load -i /tmp/op_geth_image.tar + docker load -i /tmp/op_node_image.tar + + # List available images + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_local.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" + done + kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis + exit 1 + + + notify-on-error: + needs: test + if: failure() + runs-on: ubuntu-latest + steps: + - name: Slack Webhook Action + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/kurtosis-op.yml b/.github/workflows/kurtosis-op-remote.yml similarity index 85% rename from .github/workflows/kurtosis-op.yml rename to .github/workflows/kurtosis-op-remote.yml index 5a6d65941ee83..1c2ef4425e80b 100644 --- a/.github/workflows/kurtosis-op.yml +++ b/.github/workflows/kurtosis-op-remote.yml @@ -1,6 +1,6 @@ # Runs simple OP stack setup in Kurtosis -name: kurtosis-op +name: Run kurtosis (remote images) on: workflow_dispatch: @@ -13,8 +13,9 @@ concurrency: cancel-in-progress: true jobs: - prepare-reth: - uses: ./.github/workflows/prepare-reth.yml + prepare-op-reth: + name: Prepare op-reth docker image + uses: ./.github/workflows/prepare-op-reth.yml with: image_tag: ghcr.io/paradigmxyz/op-reth:kurtosis-ci binary_name: op-reth @@ -28,22 +29,25 @@ jobs: name: run kurtosis runs-on: ubuntu-latest needs: - - prepare-reth + - prepare-op-reth steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - - name: Download reth image + - name: Download docker image artifacts uses: actions/download-artifact@v4 with: - name: artifacts + pattern: artifacts--* + merge-multiple: true path: /tmp - - name: Load Docker image + - name: Load Docker images run: | - docker load -i /tmp/reth_image.tar & - wait + # Load all images from artifacts + docker load -i /tmp/op_reth_image.tar + + # List available images docker image ls -a - name: Install Foundry @@ -55,7 +59,7 @@ jobs: sudo apt update sudo apt install kurtosis-cli kurtosis engine start - kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params.yaml + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_remote.yaml ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') diff --git a/.github/workflows/prepare-op-image.yml b/.github/workflows/prepare-op-image.yml new file mode 100644 index 0000000000000..d517c2bf41d97 --- /dev/null +++ b/.github/workflows/prepare-op-image.yml @@ -0,0 +1,81 @@ +name: Prepare an OP image + +on: + workflow_call: + inputs: + repo: + type: string + description: "Source repository" + required: true + ref: + type: string + description: "Source repository git ref" + required: true + context: + type: string + description: "Docker context" + required: false + default: "." + dockerfile: + type: string + description: "Dockerfile" + required: false + default: "Dockerfile" + target: + type: string + description: "Docker target stage" + required: false + image_tag: + type: string + description: "Docker image tag" + required: true + artifact_name: + type: string + description: "Docker image artifact filename (without the .tar extension)" + required: true + +jobs: + prepare-op-image: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + + - name: Prepare artifacts directory + shell: bash + run: mkdir ./artifacts + + - name: Clone ${{ inputs.repo }} + shell: bash + run: git clone --recurse-submodules ${{ inputs.repo }} ./tmp/submodule + + - name: Checkout ${{ inputs.ref }} + shell: bash + run: git checkout ${{ inputs.ref }} + working-directory: ./tmp/submodule + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build image + uses: docker/build-push-action@v6 + with: + context: ./tmp/submodule/${{ inputs.context }} + file: ./tmp/submodule/${{ inputs.dockerfile }} + target: ${{ inputs.target }} + tags: ${{ inputs.image_tag }} + outputs: type=docker,dest=./artifacts/${{ inputs.artifact_name }}.tar + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Upload artifacts + uses: actions/upload-artifact@v4 + with: + name: artifacts--${{ inputs.artifact_name }} + path: ./artifacts/${{ inputs.artifact_name }}.tar + + - name: Clean up + shell: bash + run: rm -rf ./tmp/submodule ./artifacts diff --git a/.github/workflows/prepare-reth.yml b/.github/workflows/prepare-op-reth.yml similarity index 91% rename from .github/workflows/prepare-reth.yml rename to .github/workflows/prepare-op-reth.yml index b71ecac620853..6a9709b1061a9 100644 --- a/.github/workflows/prepare-reth.yml +++ b/.github/workflows/prepare-op-reth.yml @@ -23,7 +23,7 @@ on: description: "Optional cargo package path" jobs: - prepare-reth: + prepare-op-reth: if: github.repository == 'ethereum-optimism/op-reth' timeout-minutes: 45 runs-on: ubuntu-latest @@ -54,7 +54,7 @@ jobs: context: . file: .github/assets/hive/Dockerfile tags: ${{ inputs.image_tag }} - outputs: type=docker,dest=./artifacts/reth_image.tar + outputs: type=docker,dest=./artifacts/op_reth_image.tar cache-from: type=gha cache-to: type=gha,mode=max @@ -62,5 +62,5 @@ jobs: id: upload uses: actions/upload-artifact@v4 with: - name: artifacts - path: ./artifacts + name: artifacts--op-reth + path: ./artifacts/op_reth_image.tar From 9384a65a9c1e9e20d1877e5e4dde530c432a9eb9 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jan 2025 20:38:23 +0100 Subject: [PATCH 108/113] feat(ci): Add kurtosis config without op-geth (#29) --- ...is_op_network_params_local_no_op_geth.yaml | 14 ++ ...s_op_network_params_remote_no_op_geth.yaml | 14 ++ .../kurtosis-op-local-no-op-geth.yml | 142 ++++++++++++++++++ .../kurtosis-op-remote-no-op-geth.yml | 94 ++++++++++++ 4 files changed, 264 insertions(+) create mode 100644 .github/assets/kurtosis_op_network_params_local_no_op_geth.yaml create mode 100644 .github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml create mode 100644 .github/workflows/kurtosis-op-local-no-op-geth.yml create mode 100644 .github/workflows/kurtosis-op-remote-no-op-geth.yml diff --git a/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml b/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml new file mode 100644 index 0000000000000..b684b9bd84723 --- /dev/null +++ b/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml @@ -0,0 +1,14 @@ +ethereum_package: + participants: + - el_type: reth + cl_type: lighthouse +optimism_package: + chains: + - participants: + - el_type: op-reth + el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" + cl_type: op-node + cl_image: "ghcr.io/ethereum-optimism/op-node:kurtosis-ci" + batcher_params: + extra_params: + - "--throttle-interval=0" diff --git a/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml b/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml new file mode 100644 index 0000000000000..8876b510331a4 --- /dev/null +++ b/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml @@ -0,0 +1,14 @@ +ethereum_package: + participants: + - el_type: reth + cl_type: lighthouse +optimism_package: + chains: + - participants: + - el_type: op-reth + el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" + cl_type: op-node + cl_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop + batcher_params: + extra_params: + - "--throttle-interval=0" diff --git a/.github/workflows/kurtosis-op-local-no-op-geth.yml b/.github/workflows/kurtosis-op-local-no-op-geth.yml new file mode 100644 index 0000000000000..67b52b919e908 --- /dev/null +++ b/.github/workflows/kurtosis-op-local-no-op-geth.yml @@ -0,0 +1,142 @@ +# Runs simple OP stack setup in Kurtosis + +name: Run kurtosis (local images) + +on: + workflow_dispatch: + inputs: + op-node-repo: + type: string + required: true + description: "An optimizm repo (clone) to use for op-node" + default: https://github.com/ethereum-optimism/optimism.git + + op-node-ref: + type: string + required: true + description: "op-node git ref (branch/commit) to use" + default: develop + + op-geth-repo: + type: string + required: true + description: "An op-geth repo (clone) to use for op-geth" + default: https://github.com/ethereum-optimism/op-geth.git + + op-geth-ref: + type: string + required: true + description: "op-geth git ref (branch/commit) to use" + default: optimism + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + prepare-op-geth: + name: Prepare op-geth docker image + uses: ./.github/workflows/prepare-op-image.yml + with: + repo: ${{ inputs.op-geth-repo }} + ref: ${{ inputs.op-geth-ref }} + image_tag: ghcr.io/ethereum-optimism/op-geth:kurtosis-ci + artifact_name: op_geth_image + + prepare-op-node: + name: Prepare op-node docker image + uses: ./.github/workflows/prepare-op-image.yml + with: + repo: ${{ inputs.op-node-repo }} + ref: ${{ inputs.op-node-ref }} + dockerfile: ops/docker/op-stack-go/Dockerfile + target: op-node-target + image_tag: ghcr.io/ethereum-optimism/op-node:kurtosis-ci + artifact_name: op_node_image + + prepare-op-reth: + name: Prepare op-reth docker image + uses: ./.github/workflows/prepare-op-reth.yml + with: + image_tag: ghcr.io/paradigmxyz/op-reth:kurtosis-ci + binary_name: op-reth + cargo_features: optimism,asm-keccak + cargo_package: crates/optimism/bin/Cargo.toml + + test: + timeout-minutes: 60 + strategy: + fail-fast: false + name: run kurtosis + runs-on: ubuntu-latest + needs: + - prepare-op-reth + - prepare-op-geth + - prepare-op-node + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download docker image artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts--* + merge-multiple: true + path: /tmp + + - name: Load Docker images + run: | + # Load all images from artifacts + docker load -i /tmp/op_reth_image.tar + docker load -i /tmp/op_geth_image.tar + docker load -i /tmp/op_node_image.tar + + # List available images + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_local_no_op_geth.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" + done + kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis + exit 1 + + + notify-on-error: + needs: test + if: failure() + runs-on: ubuntu-latest + steps: + - name: Slack Webhook Action + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/kurtosis-op-remote-no-op-geth.yml b/.github/workflows/kurtosis-op-remote-no-op-geth.yml new file mode 100644 index 0000000000000..813ca097dff4e --- /dev/null +++ b/.github/workflows/kurtosis-op-remote-no-op-geth.yml @@ -0,0 +1,94 @@ +# Runs simple OP stack setup in Kurtosis + +name: Run kurtosis (remote images) + +on: + workflow_dispatch: + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + prepare-op-reth: + name: Prepare op-reth docker image + uses: ./.github/workflows/prepare-op-reth.yml + with: + image_tag: ghcr.io/paradigmxyz/op-reth:kurtosis-ci + binary_name: op-reth + cargo_features: optimism,asm-keccak + cargo_package: crates/optimism/bin/Cargo.toml + + test: + timeout-minutes: 60 + strategy: + fail-fast: false + name: run kurtosis + runs-on: ubuntu-latest + needs: + - prepare-op-reth + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download docker image artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts--* + merge-multiple: true + path: /tmp + + - name: Load Docker images + run: | + # Load all images from artifacts + docker load -i /tmp/op_reth_image.tar + + # List available images + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" + done + kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis + exit 1 + + + notify-on-error: + needs: test + if: failure() + runs-on: ubuntu-latest + steps: + - name: Slack Webhook Action + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_COLOR: ${{ job.status }} + SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} From cad241b40589cb6fd432675b66cf304d94b9a17f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Wed, 22 Jan 2025 20:43:42 +0100 Subject: [PATCH 109/113] fixup! feat(ci): Add kurtosis config without op-geth (#29) --- .github/workflows/kurtosis-op-local-no-op-geth.yml | 2 +- .github/workflows/kurtosis-op-remote-no-op-geth.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/kurtosis-op-local-no-op-geth.yml b/.github/workflows/kurtosis-op-local-no-op-geth.yml index 67b52b919e908..a19455350a637 100644 --- a/.github/workflows/kurtosis-op-local-no-op-geth.yml +++ b/.github/workflows/kurtosis-op-local-no-op-geth.yml @@ -1,6 +1,6 @@ # Runs simple OP stack setup in Kurtosis -name: Run kurtosis (local images) +name: Run kurtosis (local images) no op-geth on: workflow_dispatch: diff --git a/.github/workflows/kurtosis-op-remote-no-op-geth.yml b/.github/workflows/kurtosis-op-remote-no-op-geth.yml index 813ca097dff4e..5a14269870027 100644 --- a/.github/workflows/kurtosis-op-remote-no-op-geth.yml +++ b/.github/workflows/kurtosis-op-remote-no-op-geth.yml @@ -1,6 +1,6 @@ # Runs simple OP stack setup in Kurtosis -name: Run kurtosis (remote images) +name: Run kurtosis (remote images) no op-geth on: workflow_dispatch: From 45508fe17cfb4d5831629cbe00c15ef6f67f623b Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Thu, 23 Jan 2025 15:48:39 +0100 Subject: [PATCH 110/113] feat(ci): Speed up building op-reth in kurtosis workflow (#32) Decrease optimisation level for building op-reth I prepare-op-reth workflow --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 4be2f7fb73c36..5989823c19376 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -263,7 +263,7 @@ unarray.opt-level = 3 [profile.hivetests] inherits = "test" lto = "thin" -opt-level = 3 +opt-level = 1 [profile.release] codegen-units = 16 From a5f2de3d06af6d6edc98a808f1eb8385cc699356 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1n=20Jakub=20Nani=C5=A1ta?= Date: Thu, 23 Jan 2025 06:49:55 -0800 Subject: [PATCH 111/113] chore: Add the ability to trigger between op-geth/no op-geth configs when running kurtosis tests (#30) Both the local & remote image kurtosis tests now allow the user to select which configuration to run against (with or without op-geth) --- .../kurtosis-op-local-no-op-geth.yml | 142 ------------------ .github/workflows/kurtosis-op-local.yml | 11 +- .../kurtosis-op-remote-no-op-geth.yml | 94 ------------ .github/workflows/kurtosis-op-remote.yml | 11 +- 4 files changed, 20 insertions(+), 238 deletions(-) delete mode 100644 .github/workflows/kurtosis-op-local-no-op-geth.yml delete mode 100644 .github/workflows/kurtosis-op-remote-no-op-geth.yml diff --git a/.github/workflows/kurtosis-op-local-no-op-geth.yml b/.github/workflows/kurtosis-op-local-no-op-geth.yml deleted file mode 100644 index a19455350a637..0000000000000 --- a/.github/workflows/kurtosis-op-local-no-op-geth.yml +++ /dev/null @@ -1,142 +0,0 @@ -# Runs simple OP stack setup in Kurtosis - -name: Run kurtosis (local images) no op-geth - -on: - workflow_dispatch: - inputs: - op-node-repo: - type: string - required: true - description: "An optimizm repo (clone) to use for op-node" - default: https://github.com/ethereum-optimism/optimism.git - - op-node-ref: - type: string - required: true - description: "op-node git ref (branch/commit) to use" - default: develop - - op-geth-repo: - type: string - required: true - description: "An op-geth repo (clone) to use for op-geth" - default: https://github.com/ethereum-optimism/op-geth.git - - op-geth-ref: - type: string - required: true - description: "op-geth git ref (branch/commit) to use" - default: optimism - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - prepare-op-geth: - name: Prepare op-geth docker image - uses: ./.github/workflows/prepare-op-image.yml - with: - repo: ${{ inputs.op-geth-repo }} - ref: ${{ inputs.op-geth-ref }} - image_tag: ghcr.io/ethereum-optimism/op-geth:kurtosis-ci - artifact_name: op_geth_image - - prepare-op-node: - name: Prepare op-node docker image - uses: ./.github/workflows/prepare-op-image.yml - with: - repo: ${{ inputs.op-node-repo }} - ref: ${{ inputs.op-node-ref }} - dockerfile: ops/docker/op-stack-go/Dockerfile - target: op-node-target - image_tag: ghcr.io/ethereum-optimism/op-node:kurtosis-ci - artifact_name: op_node_image - - prepare-op-reth: - name: Prepare op-reth docker image - uses: ./.github/workflows/prepare-op-reth.yml - with: - image_tag: ghcr.io/paradigmxyz/op-reth:kurtosis-ci - binary_name: op-reth - cargo_features: optimism,asm-keccak - cargo_package: crates/optimism/bin/Cargo.toml - - test: - timeout-minutes: 60 - strategy: - fail-fast: false - name: run kurtosis - runs-on: ubuntu-latest - needs: - - prepare-op-reth - - prepare-op-geth - - prepare-op-node - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Download docker image artifacts - uses: actions/download-artifact@v4 - with: - pattern: artifacts--* - merge-multiple: true - path: /tmp - - - name: Load Docker images - run: | - # Load all images from artifacts - docker load -i /tmp/op_reth_image.tar - docker load -i /tmp/op_geth_image.tar - docker load -i /tmp/op_node_image.tar - - # List available images - docker image ls -a - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: Run kurtosis - run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list - sudo apt update - sudo apt install kurtosis-cli - kurtosis engine start - kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_local_no_op_geth.yaml - ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') - GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') - RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') - echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV - echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV - - - name: Assert that clients advance - run: | - for i in {1..100}; do - sleep 5 - BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) - BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) - - if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi - echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" - done - kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis - kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis - exit 1 - - - notify-on-error: - needs: test - if: failure() - runs-on: ubuntu-latest - steps: - - name: Slack Webhook Action - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_COLOR: ${{ job.status }} - SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/kurtosis-op-local.yml b/.github/workflows/kurtosis-op-local.yml index 70320721bf589..bdc5a1fff160f 100644 --- a/.github/workflows/kurtosis-op-local.yml +++ b/.github/workflows/kurtosis-op-local.yml @@ -5,6 +5,15 @@ name: Run kurtosis (local images) on: workflow_dispatch: inputs: + args_file: + description: "Kurtosis arguments file" + type: choice + required: true + default: kurtosis_op_network_params_local + options: + - kurtosis_op_network_params_local + - kurtosis_op_network_params_local_no_op_geth + op-node-repo: type: string required: true @@ -107,7 +116,7 @@ jobs: sudo apt update sudo apt install kurtosis-cli kurtosis engine start - kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_local.yaml + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/${{ inputs.args_file }}.yaml ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') diff --git a/.github/workflows/kurtosis-op-remote-no-op-geth.yml b/.github/workflows/kurtosis-op-remote-no-op-geth.yml deleted file mode 100644 index 5a14269870027..0000000000000 --- a/.github/workflows/kurtosis-op-remote-no-op-geth.yml +++ /dev/null @@ -1,94 +0,0 @@ -# Runs simple OP stack setup in Kurtosis - -name: Run kurtosis (remote images) no op-geth - -on: - workflow_dispatch: - -env: - CARGO_TERM_COLOR: always - -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} - cancel-in-progress: true - -jobs: - prepare-op-reth: - name: Prepare op-reth docker image - uses: ./.github/workflows/prepare-op-reth.yml - with: - image_tag: ghcr.io/paradigmxyz/op-reth:kurtosis-ci - binary_name: op-reth - cargo_features: optimism,asm-keccak - cargo_package: crates/optimism/bin/Cargo.toml - - test: - timeout-minutes: 60 - strategy: - fail-fast: false - name: run kurtosis - runs-on: ubuntu-latest - needs: - - prepare-op-reth - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Download docker image artifacts - uses: actions/download-artifact@v4 - with: - pattern: artifacts--* - merge-multiple: true - path: /tmp - - - name: Load Docker images - run: | - # Load all images from artifacts - docker load -i /tmp/op_reth_image.tar - - # List available images - docker image ls -a - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: Run kurtosis - run: | - echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list - sudo apt update - sudo apt install kurtosis-cli - kurtosis engine start - kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml - ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') - GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') - RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') - echo "GETH_RPC=http://127.0.0.1:$GETH_PORT" >> $GITHUB_ENV - echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV - - - name: Assert that clients advance - run: | - for i in {1..100}; do - sleep 5 - BLOCK_GETH=$(cast bn --rpc-url $GETH_RPC) - BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) - - if [ $BLOCK_GETH -ge 100 ] && [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi - echo "Waiting for clients to advance..., Reth: $BLOCK_RETH Geth: $BLOCK_GETH" - done - kurtosis service logs -a op-devnet op-el-2-op-reth-op-node-op-kurtosis - kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis - exit 1 - - - notify-on-error: - needs: test - if: failure() - runs-on: ubuntu-latest - steps: - - name: Slack Webhook Action - uses: rtCamp/action-slack-notify@v2 - env: - SLACK_COLOR: ${{ job.status }} - SLACK_MESSAGE: "Failed run: https://github.com/paradigmxyz/reth/actions/runs/${{ github.run_id }}" - SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_URL }} diff --git a/.github/workflows/kurtosis-op-remote.yml b/.github/workflows/kurtosis-op-remote.yml index 1c2ef4425e80b..f9ec9fe881ecf 100644 --- a/.github/workflows/kurtosis-op-remote.yml +++ b/.github/workflows/kurtosis-op-remote.yml @@ -4,6 +4,15 @@ name: Run kurtosis (remote images) on: workflow_dispatch: + inputs: + args_file: + description: "Kurtosis arguments file" + type: choice + required: true + default: kurtosis_op_network_params_remote + options: + - kurtosis_op_network_params_remote + - kurtosis_op_network_params_remote_no_op_geth env: CARGO_TERM_COLOR: always @@ -59,7 +68,7 @@ jobs: sudo apt update sudo apt install kurtosis-cli kurtosis engine start - kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_remote.yaml + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/${{ inputs.args_file }}.yaml ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') From 3568007ec28c9c7bb91913e6e73cf7f3568475c0 Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 24 Jan 2025 20:30:43 +0100 Subject: [PATCH 112/113] Checkout ci changes from optimism --- .../kurtosis_op_network_params_local.yaml | 6 ++ ...is_op_network_params_local_no_op_geth.yaml | 4 + .../kurtosis_op_network_params_remote.yaml | 4 + ...s_op_network_params_remote_no_op_geth.yaml | 2 + .github/workflows/kurtosis-op-local.yml | 75 +++++++++++++++---- .github/workflows/kurtosis-op-remote.yml | 73 ++++++++++++++---- 6 files changed, 138 insertions(+), 26 deletions(-) diff --git a/.github/assets/kurtosis_op_network_params_local.yaml b/.github/assets/kurtosis_op_network_params_local.yaml index b8f56973d71b9..bb2e87bd8841e 100644 --- a/.github/assets/kurtosis_op_network_params_local.yaml +++ b/.github/assets/kurtosis_op_network_params_local.yaml @@ -9,10 +9,16 @@ optimism_package: el_image: "ghcr.io/ethereum-optimism/op-geth:kurtosis-ci" cl_type: op-node cl_image: "ghcr.io/ethereum-optimism/op-node:kurtosis-ci" + cl_extra_params: + - "--l1.trustrpc=true" - el_type: op-reth el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" cl_type: op-node cl_image: "ghcr.io/ethereum-optimism/op-node:kurtosis-ci" + cl_extra_params: + - "--l1.trustrpc=true" batcher_params: extra_params: - "--throttle-interval=0" + network_params: + isthmus_time_offset: 0 \ No newline at end of file diff --git a/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml b/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml index b684b9bd84723..0acf800fab4bd 100644 --- a/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml +++ b/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml @@ -9,6 +9,10 @@ optimism_package: el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" cl_type: op-node cl_image: "ghcr.io/ethereum-optimism/op-node:kurtosis-ci" + cl_extra_params: + - "--l1.trustrpc=true" batcher_params: extra_params: - "--throttle-interval=0" + network_params: + isthmus_time_offset: 0 \ No newline at end of file diff --git a/.github/assets/kurtosis_op_network_params_remote.yaml b/.github/assets/kurtosis_op_network_params_remote.yaml index 285f1255d15da..486936e058804 100644 --- a/.github/assets/kurtosis_op_network_params_remote.yaml +++ b/.github/assets/kurtosis_op_network_params_remote.yaml @@ -9,10 +9,14 @@ optimism_package: el_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:latest cl_type: op-node cl_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop + cl_extra_params: + - "--l1.trustrpc=true" - el_type: op-reth el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" cl_type: op-node cl_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop + cl_extra_params: + - "--l1.trustrpc=true" batcher_params: extra_params: - "--throttle-interval=0" diff --git a/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml b/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml index 8876b510331a4..9684f2929e9d5 100644 --- a/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml +++ b/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml @@ -9,6 +9,8 @@ optimism_package: el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" cl_type: op-node cl_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop + cl_extra_params: + - "--l1.trustrpc=true" batcher_params: extra_params: - "--throttle-interval=0" diff --git a/.github/workflows/kurtosis-op-local.yml b/.github/workflows/kurtosis-op-local.yml index bdc5a1fff160f..0e2c3337705f7 100644 --- a/.github/workflows/kurtosis-op-local.yml +++ b/.github/workflows/kurtosis-op-local.yml @@ -5,15 +5,6 @@ name: Run kurtosis (local images) on: workflow_dispatch: inputs: - args_file: - description: "Kurtosis arguments file" - type: choice - required: true - default: kurtosis_op_network_params_local - options: - - kurtosis_op_network_params_local - - kurtosis_op_network_params_local_no_op_geth - op-node-repo: type: string required: true @@ -75,11 +66,11 @@ jobs: cargo_features: optimism,asm-keccak cargo_package: crates/optimism/bin/Cargo.toml - test: + test-op-geth: timeout-minutes: 60 strategy: fail-fast: false - name: run kurtosis + name: Kurtosis with op-geth runs-on: ubuntu-latest needs: - prepare-op-reth @@ -116,7 +107,7 @@ jobs: sudo apt update sudo apt install kurtosis-cli kurtosis engine start - kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/${{ inputs.args_file }}.yaml + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_local.yaml ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') @@ -137,9 +128,67 @@ jobs: kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis exit 1 + test-no-op-geth: + timeout-minutes: 60 + strategy: + fail-fast: false + name: Kurtosis without op-geth + runs-on: ubuntu-latest + needs: + - prepare-op-reth + - prepare-op-node + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download docker image artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts--* + merge-multiple: true + path: /tmp + + - name: Load Docker images + run: | + # Load all images from artifacts + docker load -i /tmp/op_reth_image.tar + docker load -i /tmp/op_node_image.tar + + # List available images + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_local_no_op_geth.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH" + done + kurtosis service logs -a op-devnet op-el-1-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-1-op-node-op-reth-op-kurtosis + exit 1 notify-on-error: - needs: test + needs: + - test-op-geth + - test-no-op-geth if: failure() runs-on: ubuntu-latest steps: diff --git a/.github/workflows/kurtosis-op-remote.yml b/.github/workflows/kurtosis-op-remote.yml index f9ec9fe881ecf..ae2889fd3dfde 100644 --- a/.github/workflows/kurtosis-op-remote.yml +++ b/.github/workflows/kurtosis-op-remote.yml @@ -4,15 +4,6 @@ name: Run kurtosis (remote images) on: workflow_dispatch: - inputs: - args_file: - description: "Kurtosis arguments file" - type: choice - required: true - default: kurtosis_op_network_params_remote - options: - - kurtosis_op_network_params_remote - - kurtosis_op_network_params_remote_no_op_geth env: CARGO_TERM_COLOR: always @@ -31,11 +22,11 @@ jobs: cargo_features: optimism,asm-keccak cargo_package: crates/optimism/bin/Cargo.toml - test: + test-op-geth: timeout-minutes: 60 strategy: fail-fast: false - name: run kurtosis + name: Kurtosis with op-geth runs-on: ubuntu-latest needs: - prepare-op-reth @@ -68,7 +59,7 @@ jobs: sudo apt update sudo apt install kurtosis-cli kurtosis engine start - kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/${{ inputs.args_file }}.yaml + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_remote.yaml ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') GETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-geth-op-node-op-kurtosis".public_ports.rpc.number') RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-2-op-reth-op-node-op-kurtosis".public_ports.rpc.number') @@ -89,9 +80,65 @@ jobs: kurtosis service logs -a op-devnet op-cl-2-op-node-op-reth-op-kurtosis exit 1 + test-no-op-geth: + timeout-minutes: 60 + strategy: + fail-fast: false + name: Kurtosis without op-geth + runs-on: ubuntu-latest + needs: + - prepare-op-reth + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download docker image artifacts + uses: actions/download-artifact@v4 + with: + pattern: artifacts--* + merge-multiple: true + path: /tmp + + - name: Load Docker images + run: | + # Load all images from artifacts + docker load -i /tmp/op_reth_image.tar + + # List available images + docker image ls -a + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Run kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis engine start + kurtosis run --enclave op-devnet github.com/ethpandaops/optimism-package --args-file .github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml + ENCLAVE_ID=$(curl http://127.0.0.1:9779/api/enclaves | jq --raw-output 'keys[0]') + RETH_PORT=$(curl "http://127.0.0.1:9779/api/enclaves/$ENCLAVE_ID/services" | jq '."op-el-1-op-reth-op-node-op-kurtosis".public_ports.rpc.number') + echo "RETH_RPC=http://127.0.0.1:$RETH_PORT" >> $GITHUB_ENV + + - name: Assert that clients advance + run: | + for i in {1..100}; do + sleep 5 + BLOCK_RETH=$(cast bn --rpc-url $RETH_RPC) + + if [ $BLOCK_RETH -ge 100 ] ; then exit 0; fi + echo "Waiting for clients to advance..., Reth: $BLOCK_RETH" + done + kurtosis service logs -a op-devnet op-el-1-op-reth-op-node-op-kurtosis + kurtosis service logs -a op-devnet op-cl-1-op-node-op-reth-op-kurtosis + exit 1 notify-on-error: - needs: test + needs: + - test-op-geth + - test-no-op-geth if: failure() runs-on: ubuntu-latest steps: From dd44bb7a220eae0cebaf88afeaf78b0ca2728471 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A1n=20Jakub=20Nani=C5=A1ta?= Date: Mon, 27 Jan 2025 07:26:22 -0800 Subject: [PATCH 113/113] fix: Fix op-deployer version in kurtosis (#40) --- .github/assets/kurtosis_op_network_params_local.yaml | 2 ++ .../assets/kurtosis_op_network_params_local_no_op_geth.yaml | 2 ++ .github/assets/kurtosis_op_network_params_remote.yaml | 4 +++- .../assets/kurtosis_op_network_params_remote_no_op_geth.yaml | 2 ++ 4 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/assets/kurtosis_op_network_params_local.yaml b/.github/assets/kurtosis_op_network_params_local.yaml index bb2e87bd8841e..612d6823e9446 100644 --- a/.github/assets/kurtosis_op_network_params_local.yaml +++ b/.github/assets/kurtosis_op_network_params_local.yaml @@ -3,6 +3,8 @@ ethereum_package: - el_type: reth cl_type: lighthouse optimism_package: + op_contract_deployer_params: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-deployer:v0.0.11 chains: - participants: - el_type: op-geth diff --git a/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml b/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml index 0acf800fab4bd..41fa18852bcff 100644 --- a/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml +++ b/.github/assets/kurtosis_op_network_params_local_no_op_geth.yaml @@ -3,6 +3,8 @@ ethereum_package: - el_type: reth cl_type: lighthouse optimism_package: + op_contract_deployer_params: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-deployer:v0.0.11 chains: - participants: - el_type: op-reth diff --git a/.github/assets/kurtosis_op_network_params_remote.yaml b/.github/assets/kurtosis_op_network_params_remote.yaml index 486936e058804..19f78156510fe 100644 --- a/.github/assets/kurtosis_op_network_params_remote.yaml +++ b/.github/assets/kurtosis_op_network_params_remote.yaml @@ -3,6 +3,8 @@ ethereum_package: - el_type: reth cl_type: lighthouse optimism_package: + op_contract_deployer_params: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-deployer:v0.0.11 chains: - participants: - el_type: op-geth @@ -12,7 +14,7 @@ optimism_package: cl_extra_params: - "--l1.trustrpc=true" - el_type: op-reth - el_image: "ghcr.io/paradigmxyz/op-reth:kurtosis-ci" + el_image: "op-reth:latest" cl_type: op-node cl_image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop cl_extra_params: diff --git a/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml b/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml index 9684f2929e9d5..b709de89e62bf 100644 --- a/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml +++ b/.github/assets/kurtosis_op_network_params_remote_no_op_geth.yaml @@ -3,6 +3,8 @@ ethereum_package: - el_type: reth cl_type: lighthouse optimism_package: + op_contract_deployer_params: + image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-deployer:v0.0.11 chains: - participants: - el_type: op-reth