diff --git a/crates/rbuilder/src/backtest/backtest_build_block.rs b/crates/rbuilder/src/backtest/backtest_build_block.rs index 5cee82a61..78edac8c4 100644 --- a/crates/rbuilder/src/backtest/backtest_build_block.rs +++ b/crates/rbuilder/src/backtest/backtest_build_block.rs @@ -1,5 +1,5 @@ //! Backtest app to build a single block in a similar way as we do in live. -//! It gets the orders from a HistoricalDataStorage, simulates the orders and the run the building algorithms. +//! It gets the orders from a HistoricalDataStorage, simulates the orders and then runs the building algorithms. //! It outputs the best algorithm (most profit) so we can check for improvements in our [crate::building::builders::BlockBuildingAlgorithm]s //! BlockBuildingAlgorithm are defined on the config file but selected on the command line via "--builders" //! Sample call: diff --git a/crates/rbuilder/src/backtest/backtest_build_range.rs b/crates/rbuilder/src/backtest/backtest_build_range.rs index a48434395..df44c20eb 100644 --- a/crates/rbuilder/src/backtest/backtest_build_range.rs +++ b/crates/rbuilder/src/backtest/backtest_build_range.rs @@ -1,13 +1,13 @@ //! Backtest app to build a multiple blocks in a similar way as we do in live. //! It gets the orders from a HistoricalDataStorage, simulates the orders and the run the building algorithms. -//! For each simulated block we count how for many block we generated more profit ("won" blocks) than the landed block and we report: +//! We count the amount of blocks that generated more profit than the landed block ("won" blocks) and we report: //! - Win %: % of blocks "won" -//! - Total profits: for the blocks we "won" we consider profit = our_true_block_value - landed_bid and we add all this profit. +//! - Total profits: the sum of the profit (= our_true_block_value - landed_bid) for the blocks we won. //! This represents how much extra profit we did compared to the landed blocks. //! Optionally (via --store-backtest) it can store the simulated results on a SQLite db (config.backtest_results_store_path) //! Optionally (via --compare-backtest) it can compare the simulations against previously stored simulations (via --store-backtest) //! -//! Sample call (numbers are from_block , to_block (included)): +//! Sample call (numbers are from_block , to_block (inclusive)): //! - simple backtest: backtest-build-range --config /home/happy_programmer/config.toml 19380913 193809100 //! - backtest storing simulations : backtest-build-range --config /home/happy_programmer/config.toml --store-backtest 19380913 193809100 //! - backtest comparing simulations : backtest-build-range --config /home/happy_programmer/config.toml --compare-backtest 19380913 193809100 @@ -395,7 +395,7 @@ impl CSVResultWriter { /// Spawns a task that reads BlockData from the HistoricalDataStorage in blocks of current_num_threads. /// The results can the be polled from the returned mpsc::Receiver -/// This allows us to process a batch while the next if being fetched. +/// This allows us to process a batch while the next is being fetched. fn spawn_block_fetcher( mut historical_data_storage: HistoricalDataStorage, blocks: Vec, diff --git a/crates/rbuilder/src/backtest/fetch/mempool.rs b/crates/rbuilder/src/backtest/fetch/mempool.rs index 7ed392148..e90cf4681 100644 --- a/crates/rbuilder/src/backtest/fetch/mempool.rs +++ b/crates/rbuilder/src/backtest/fetch/mempool.rs @@ -1,4 +1,4 @@ -//! Implementation of [`DataSource`] to bring mempool txs from flashbots' mempool dumpster +//! Implementation of [`DataSource`] to bring mempool txs from flashbots' mempool dumpster. //! It downloads all the needed parquet files and keeps them cached for future use. use crate::{ backtest::{ @@ -23,7 +23,7 @@ use tracing::{error, trace}; /// Gets all the OrdersWithTimestamp in the given interval. /// Simulation info is set to None. -/// It checks for pre-downloaded parquet files on data_dir and downloads only the missing onces. +/// It checks for pre-downloaded parquet files on data_dir and downloads only the missing ones. pub fn get_mempool_transactions( data_dir: &Path, from: OffsetDateTime, @@ -63,7 +63,7 @@ fn path_transactions(data_dir: &Path, day: &str) -> PathBuf { data_dir.join(format!("transactions/{}.parquet", day)) } -/// Downloads to data_dir missing files for the given interval +/// Downloads missing files to data_dir for the given interval /// Since parquet files are 1 day long it checks all needed days. fn check_and_download_transaction_files( from_millis: i64, diff --git a/crates/rbuilder/src/backtest/fetch/mev_boost.rs b/crates/rbuilder/src/backtest/fetch/mev_boost.rs index 8d4c4e252..efbe36f59 100644 --- a/crates/rbuilder/src/backtest/fetch/mev_boost.rs +++ b/crates/rbuilder/src/backtest/fetch/mev_boost.rs @@ -28,7 +28,7 @@ pub struct PayloadDeliveredFetcher { relays: HashMap, } -/// Uses well-known relays ([RELAYS]) +/// Uses some predefined relays ([RELAYS]) impl Default for PayloadDeliveredFetcher { fn default() -> Self { let relays = RELAYS diff --git a/crates/rbuilder/src/backtest/fetch/mod.rs b/crates/rbuilder/src/backtest/fetch/mod.rs index c72ddd0ff..1c5e8a55f 100644 --- a/crates/rbuilder/src/backtest/fetch/mod.rs +++ b/crates/rbuilder/src/backtest/fetch/mod.rs @@ -117,8 +117,9 @@ impl HistoricalDataFetcher { }) } - /// Filters out orders with non-optional txs already landed (onchain nonce > tx nonce) - /// Also filters out empty orders (eg: all optional already landed txs) + /// Filters out orders with non-optional sub txs (we can't skip them) already landed (onchain nonce > tx nonce, can't be re-executed!) + /// since they will fail. + /// Also filters orders the will not fail but will execute nothing (eg: all optional already landed txs -> all txs will be skipped). async fn filter_order_by_nonces( &self, orders: Vec, diff --git a/crates/rbuilder/src/backtest/mod.rs b/crates/rbuilder/src/backtest/mod.rs index 10953e262..ef98ceef0 100644 --- a/crates/rbuilder/src/backtest/mod.rs +++ b/crates/rbuilder/src/backtest/mod.rs @@ -64,12 +64,13 @@ pub struct OrdersWithTimestamp { #[derive(Debug, Clone, PartialEq, Eq)] pub struct BlockData { pub block_number: u64, - /// Info for landed block + /// Extra info for landed block (not contained on onchain_block). + /// We get this from the relays (API /relay/v1/data/bidtraces/builder_blocks_received). pub winning_bid_trace: BuilderBlockReceived, - /// landed block + /// Landed block. pub onchain_block: alloy_rpc_types::Block, - /// Orders we had at the moment of the block building. - /// This might be an approximation depending on DataSources used + /// Orders we had at the moment of building the block. + /// This might be an approximation depending on DataSources used. pub available_orders: Vec, } diff --git a/crates/rbuilder/src/backtest/store.rs b/crates/rbuilder/src/backtest/store.rs index 54d003455..97a0ec656 100644 --- a/crates/rbuilder/src/backtest/store.rs +++ b/crates/rbuilder/src/backtest/store.rs @@ -26,7 +26,7 @@ use std::{ const VERSION: i64 = 9; /// Storage of BlockData. -/// It allows us have cached locally (using a SQLite DB) all the info we need for backtesting so we don't have to +/// It allows us to locally cache (using a SQLite DB) all the info we need for backtesting so we don't have to /// go to the mempool dumpster (or any other source) every time we simulate a block. pub struct HistoricalDataStorage { conn: SqliteConnection, diff --git a/crates/rbuilder/src/bin/backtest-fetch.rs b/crates/rbuilder/src/bin/backtest-fetch.rs index e7077e142..4520fe585 100644 --- a/crates/rbuilder/src/bin/backtest-fetch.rs +++ b/crates/rbuilder/src/bin/backtest-fetch.rs @@ -1,5 +1,5 @@ //! Application to fetch orders from different sources (eg: mempool dumpster, external bundles db) and store them on a SQLite DB -//! to be used later (eg: backtest-build-block,backtest-build-range) +//! to be used later (eg: backtest-build-block, backtest-build-range) use alloy_primitives::utils::format_ether; use clap::Parser; diff --git a/crates/rbuilder/src/bin/debug-order-input.rs b/crates/rbuilder/src/bin/debug-order-input.rs index 11b078fdb..bb033588f 100644 --- a/crates/rbuilder/src/bin/debug-order-input.rs +++ b/crates/rbuilder/src/bin/debug-order-input.rs @@ -1,5 +1,5 @@ -//! Application test the orders input. -//! For each blocks it subscribes a ReplaceableOrderPrinter to the OrderPool +//! Application to test the orders input. +//! For each block it subscribes a ReplaceableOrderPrinter to the OrderPool. use clap::Parser; use ethers::{ diff --git a/crates/rbuilder/src/bin/debug-order-sim.rs b/crates/rbuilder/src/bin/debug-order-sim.rs index 7d6ecdde3..878383721 100644 --- a/crates/rbuilder/src/bin/debug-order-sim.rs +++ b/crates/rbuilder/src/bin/debug-order-sim.rs @@ -1,5 +1,5 @@ -//! Application test the orders input + simulation. -//! For each blocks it subscribes an [`OrderReplacementManager`]. +//! Application to test the orders input + simulation. +//! For each block it subscribes an [`OrderReplacementManager`]. //! Since simulation needs to pull orders, the [`OrderReplacementManager`] is adapted with an [`OrderSender2OrderSink`] generating an [`OrdersForBlock`] for //! the simulation stage to pull. @@ -99,9 +99,9 @@ pub async fn main() -> eyre::Result<()> { .data .parent_block_number + 1; - // orders sent to the sink will can be polled on orders_for_block + // Orders sent to the sink will be polled on orders_for_block. let (orders_for_block, sink) = OrdersForBlock::new_with_sink(); - // add OrderReplacementManager to manage replacements and cancellations + // Add OrderReplacementManager to manage replacements and cancellations. let order_replacement_manager = OrderReplacementManager::new(Box::new(sink)); let _block_sub = order_pool_subscriber .add_sink_auto_remove(block_number, Box::new(order_replacement_manager)); diff --git a/crates/rbuilder/src/bin/dummy-builder.rs b/crates/rbuilder/src/bin/dummy-builder.rs index 463654d1f..f9cf9a4d5 100644 --- a/crates/rbuilder/src/bin/dummy-builder.rs +++ b/crates/rbuilder/src/bin/dummy-builder.rs @@ -1,7 +1,7 @@ //! This simple app shows how to run a custom block builder. -//! It uses no bidding strategy, just bids all available profit. -//! It does not sends blocks to any relay, just logs the generated blocks. -//! The algorithm is really dummy it just adds some txs it receives and generates a single block. +//! It uses no bidding strategy, it just bids all available profit. +//! It does not sends blocks to any relay, it just logs the generated blocks. +//! The algorithm is really dummy, it just adds some txs it receives and generates a single block. //! This is NOT intended to be run in production so it has no nice configuration, poor error checking and some hardcoded values. use std::{path::PathBuf, sync::Arc, thread::sleep, time::Duration}; diff --git a/crates/rbuilder/src/bin/misc-relays-slot.rs b/crates/rbuilder/src/bin/misc-relays-slot.rs index 539959f3b..d6435d9e0 100644 --- a/crates/rbuilder/src/bin/misc-relays-slot.rs +++ b/crates/rbuilder/src/bin/misc-relays-slot.rs @@ -1,5 +1,5 @@ //! Helper app to get information from a landed block from the relays. -//! Take no configuration since it uses a hardcoded list of well known relays ([`rbuilder::mev_boost::RELAYS`]) +//! Takes no configuration since it uses a hardcoded list of relays ([`rbuilder::mev_boost::RELAYS`]). use alloy_primitives::utils::format_ether; use clap::Parser; use rbuilder::backtest::fetch::mev_boost::PayloadDeliveredFetcher; diff --git a/crates/rbuilder/src/bin/relay-sender.rs b/crates/rbuilder/src/bin/relay-sender.rs index aa5a18cc2..7525b4f96 100644 --- a/crates/rbuilder/src/bin/relay-sender.rs +++ b/crates/rbuilder/src/bin/relay-sender.rs @@ -1,3 +1,4 @@ +//! This app is intended to be used for testing against a fake-relay (https://github.com/ZanCorDX/fake-relay) without needing to have the builder or relay running. use rbuilder::mev_boost::{rpc::TestDataGenerator, RelayClient, SubmitBlockRequest}; use std::str::FromStr; use url::Url; diff --git a/crates/rbuilder/src/primitives/mev_boost.rs b/crates/rbuilder/src/primitives/mev_boost.rs index 497442283..428b70954 100644 --- a/crates/rbuilder/src/primitives/mev_boost.rs +++ b/crates/rbuilder/src/primitives/mev_boost.rs @@ -6,14 +6,14 @@ use url::Url; pub type MevBoostRelayID = String; /// Wrapper over RelayClient that allows to submit blocks and -/// hides the particular configuration (eg:ssz,gip,optimistic) +/// hides the particular configuration (eg: ssz, gip, optimistic). #[derive(Debug, Clone)] pub struct MevBoostRelay { pub id: MevBoostRelayID, pub client: RelayClient, - /// The lower priority -> more important + /// Lower priority -> more important. pub priority: usize, - /// true->ssz false->json + /// true -> ssz; false -> json. pub use_ssz_for_submit: bool, pub use_gzip_for_submit: bool, pub optimistic: bool, diff --git a/crates/rbuilder/src/primitives/mod.rs b/crates/rbuilder/src/primitives/mod.rs index eae410500..721d56875 100644 --- a/crates/rbuilder/src/primitives/mod.rs +++ b/crates/rbuilder/src/primitives/mod.rs @@ -1,4 +1,4 @@ -//! Order types used as units of block building. +//! Order types used as elements for block building. pub mod fmt; pub mod mev_boost; @@ -25,7 +25,7 @@ pub use test_data_generator::TestDataGenerator; use thiserror::Error; use uuid::Uuid; -/// Extra metadata for ShareBundle/Bundle +/// Extra metadata for ShareBundle/Bundle. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Metadata { pub received_at_timestamp: time::OffsetDateTime, @@ -59,15 +59,15 @@ impl AccountNonce { } } -/// BundledTxInfo should replace Nonce in the future +/// BundledTxInfo should replace Nonce in the future. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct BundledTxInfo { pub nonce: AccountNonce, - /// optional -> can revert and the bundle continues + /// optional -> can revert and the bundle continues. pub optional: bool, } -/// @Pending: Delete and replace all uses by BundledTxInfo +/// @Pending: Delete and replace all uses by BundledTxInfo. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Nonce { pub nonce: u64, @@ -75,7 +75,7 @@ pub struct Nonce { pub optional: bool, } -/// Information regarding a new/update replaceable Bundle/ShareBundle +/// Information regarding a new/update replaceable Bundle/ShareBundle. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ReplacementData { pub key: KeyType, @@ -85,7 +85,7 @@ pub struct ReplacementData { } impl ReplacementData { - /// Next sequence_number, useful for testing + /// Next sequence_number, useful for testing. pub fn next(&self) -> Self { Self { key: self.key.clone(), @@ -96,7 +96,7 @@ impl ReplacementData { pub type BundleReplacementData = ReplacementData; -/// Bundle sent to us usually by a searcher via eth_sendBundle (https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#eth_sendbundle) +/// Bundle sent to us usually by a searcher via eth_sendBundle (https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#eth_sendbundle). #[derive(Derivative)] #[derivative(Debug, Clone, PartialEq, Eq, Hash)] pub struct Bundle { @@ -105,14 +105,14 @@ pub struct Bundle { pub max_timestamp: Option, pub txs: Vec, pub reverting_tx_hashes: Vec, - /// Virtual hash generated by concatenating all txs hashes (+some more info) and hashing it. - /// see [Bundle::hash_slow] for more details. + /// Virtual hash generated by concatenating all txs hashes (+some more info) and hashing them. + /// See [Bundle::hash_slow] for more details. pub hash: B256, - /// unique id we generate + /// Unique id we generate. pub uuid: Uuid, - /// unique id, bundle signer - /// The unique id was generated by the sender and is used for updates/cancellations - /// bundle signer is redundant with self.signer + /// Unique id, bundle signer. + /// The unique id was generated by the sender and is used for updates/cancellations. + /// Bundle signer is redundant with self.signer. pub replacement_data: Option, pub signer: Option
, @@ -125,7 +125,7 @@ impl Bundle { can_execute_with_block_base_fee(self.list_txs(), block_base_fee) } - /// BundledTxInfo for all the child txs + /// BundledTxInfo for all the child txs. pub fn nonces(&self) -> Vec { let txs = self .txs @@ -141,8 +141,8 @@ impl Bundle { .collect() } - /// Recalculate bundle hash and uuid - /// Hash is computed from child tx hashes + reverting_tx_hashes + /// Recalculate bundle hash and uuid. + /// Hash is computed from child tx hashes + reverting_tx_hashes. pub fn hash_slow(&mut self) { let hash = self .txs @@ -152,7 +152,7 @@ impl Bundle { self.hash = keccak256(hash); let uuid = { - // block, hash, reverting hashes + // Block, hash, reverting hashes. let mut buff = Vec::with_capacity(8 + 32 + 32 * self.reverting_tx_hashes.len()); { let block = self.block as i64; @@ -166,7 +166,7 @@ impl Bundle { let hash = { let mut res = [0u8; 16]; let mut hasher = Sha256::new(); - // we write 16 zeroes to replicate golang hashing behavior + // We write 16 zeroes to replicate golang hashing behavior. hasher.update(res); hasher.update(&buff); let output = hasher.finalize(); @@ -181,16 +181,16 @@ impl Bundle { #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum TxRevertBehavior { - /// Tx in a bundle can't revert + /// Tx in a bundle can't revert. NotAllowed, - /// If the tx reverts it will be included. This is the old "can_revert" boolean + /// If the tx reverts it will be included. This is the old "can_revert" boolean. AllowedIncluded, - /// If the tx reverts we will ignore it + /// If the tx reverts we will ignore it. AllowedExcluded, } impl TxRevertBehavior { - /// Backwards compatibility + /// Backwards compatibility. pub fn from_old_bool(can_revert: bool) -> Self { if can_revert { TxRevertBehavior::AllowedIncluded @@ -206,7 +206,7 @@ impl TxRevertBehavior { } } -/// Tx as part of a mev share body +/// Tx as part of a mev share body. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ShareBundleTx { pub tx: TransactionSignedEcRecoveredWithBlobs, @@ -220,7 +220,7 @@ impl ShareBundleTx { } /// Body element of a mev share bundle. -/// ShareBundleInner body is formed by several of this. +/// [`ShareBundleInner::body`] is formed by several of these. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum ShareBundleBody { Tx(ShareBundleTx), @@ -240,21 +240,21 @@ impl ShareBundleBody { } /// Mev share contains 2 types of txs: -/// - User txs: simple txs sent to us to be protected and to give kickbacks to the user +/// - User txs: simple txs sent to us to be protected and to give kickbacks to the user. /// - Searcher txs: Txs added by a searcher to extract MEV from the user txs. /// Refund points to the user txs on the body and has the kickback percentage for it. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Refund { - /// index of the ShareBundleInner::body for which this applies + /// Index of the ShareBundleInner::body for which this applies. pub body_idx: usize, - /// percent of the profit going back to the user as kickback. + /// Percent of the profit going back to the user as kickback. pub percent: usize, } /// Users can specify how to get kickbacks and this is propagated by the MEV-Share Node to us. -/// We get this configuration as a multiple RefundConfig, then the refunds are payed to the specified addresses in the indicated percentages. -/// The sum of all RefundConfig::percent on a mev share bundle should be 100% +/// We get this configuration as multiple RefundConfigs, then the refunds are payed to the specified addresses in the indicated percentages. +/// The sum of all RefundConfig::percent on a mev share bundle should be 100%. /// See [ShareBundleInner::refund_config] for more details. #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -270,10 +270,10 @@ pub struct ShareBundleInner { pub refund: Vec, /// Optional RefundConfig for this ShareBundleInner. see [ShareBundleInner::refund_config] for more details. pub refund_config: Vec, - /// We are allowed to skip this sub bundle (either because of inner reverts or any other reason) + /// We are allowed to skip this sub bundle (either because of inner reverts or any other reason). /// Added specifically to allow same user sbundle merging since we stick together many sbundles and allow some of them to fail. pub can_skip: bool, - /// Patch to track the original orders when performing order merging (see [`ShareBundleMerger`]) + /// Patch to track the original orders when performing order merging (see [`ShareBundleMerger`]). pub original_order_id: Option, } @@ -344,8 +344,8 @@ pub type ShareBundleReplacementData = ReplacementData #[derive(Derivative)] #[derivative(Debug, Clone, PartialEq, Eq, Hash)] pub struct ShareBundle { - /// Hash for the ShareBundle (also used in the OrderId::ShareBundle) - /// see [ShareBundle::hash_slow] for more details. + /// Hash for the ShareBundle (also used in OrderId::ShareBundle). + /// See [ShareBundle::hash_slow] for more details. pub hash: B256, pub block: u64, pub max_block: u64, @@ -766,7 +766,7 @@ impl SimulatedOrder { } /// Unique OrderId used along the whole builder. -/// Sadly it's not perfect since we still might have some collisions (eg:ShareBundle is the tx tree hash which does not include all the other cfg). +/// Sadly it's not perfect since we still might have some collisions (eg: ShareBundle is the tx tree hash which does not include all the other cfg). #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum OrderId { Tx(B256), diff --git a/crates/rbuilder/src/utils/noncer.rs b/crates/rbuilder/src/utils/noncer.rs index cc125c187..3e2cd81a1 100644 --- a/crates/rbuilder/src/utils/noncer.rs +++ b/crates/rbuilder/src/utils/noncer.rs @@ -5,7 +5,7 @@ use reth_db::database::Database; use reth_interfaces::provider::ProviderResult; use std::sync::{Arc, Mutex}; -/// Struct to get nonces for Addresses caching the results. +/// Struct to get nonces for Addresses, caching the results. /// NonceCache contains the data (but doesn't allow you to query it) and NonceCacheRef is a reference that allows you to query it. /// Usage: /// - Create a NonceCache