From cab47ffd2e33ff749ee4ad74e9c50932884fe0f2 Mon Sep 17 00:00:00 2001 From: avalonche Date: Fri, 13 Sep 2024 00:59:25 +1000 Subject: [PATCH] Add bundle support --- builder/builder.go | 124 ++-- builder/builder_test.go | 15 +- builder/eth_service.go | 36 +- builder/eth_service_test.go | 4 +- builder/service.go | 13 - builder/types/bundle.go | 34 ++ builder/utils.go | 16 - cmd/geth/main.go | 15 +- cmd/utils/flags.go | 41 +- core/txpool/txpool.go | 66 ++ eth/api_backend.go | 4 + internal/ethapi/api_test.go | 3 + internal/ethapi/backend.go | 6 + internal/ethapi/bundle_api.go | 68 +++ internal/ethapi/transaction_args_test.go | 4 + miner/algo_greedy.go | 325 ++++++++++ miner/algo_greedy_test.go | 59 ++ miner/builder_ordering_test.go | 223 +++++++ miner/bundle_ordering.go | 266 +++++++++ miner/env_diff.go | 259 ++++++++ miner/env_diff_test.go | 396 ++++++++++++ miner/metrics.go | 10 + miner/miner.go | 2 + miner/payload_building.go | 39 +- miner/verify_bundles.go | 404 +++++++++++++ miner/verify_bundles_test.go | 727 +++++++++++++++++++++++ 26 files changed, 2977 insertions(+), 182 deletions(-) create mode 100644 builder/types/bundle.go create mode 100644 internal/ethapi/bundle_api.go create mode 100644 miner/algo_greedy.go create mode 100644 miner/algo_greedy_test.go create mode 100644 miner/builder_ordering_test.go create mode 100644 miner/bundle_ordering.go create mode 100644 miner/env_diff.go create mode 100644 miner/env_diff_test.go create mode 100644 miner/verify_bundles.go create mode 100644 miner/verify_bundles_test.go diff --git a/builder/builder.go b/builder/builder.go index da75f242c6..64c9626e9b 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "math/big" "net/http" _ "os" "strings" @@ -41,6 +40,10 @@ type IBuilder interface { handleGetPayload(w http.ResponseWriter, req *http.Request) } +type IPayload interface { + ResolveFull() *engine.ExecutionPayloadEnvelope +} + type Builder struct { eth IEthereumService ignoreLatePayloadAttributes bool @@ -48,8 +51,7 @@ type Builder struct { builderPrivateKey *ecdsa.PrivateKey builderAddress common.Address - builderRetryInterval time.Duration - builderBlockTime time.Duration + builderBlockTime time.Duration proposerAddress common.Address @@ -58,8 +60,8 @@ type Builder struct { slotCtx context.Context slotCtxCancel context.CancelFunc - bestBlockMu sync.Mutex - bestBlock *builderTypes.VersionedBuilderPayloadResponse + payloadMu sync.Mutex + payload IPayload stop chan struct{} } @@ -69,23 +71,12 @@ type BuilderArgs struct { builderPrivateKey *ecdsa.PrivateKey builderAddress common.Address proposerAddress common.Address - builderRetryInterval time.Duration blockTime time.Duration eth IEthereumService ignoreLatePayloadAttributes bool beaconClient IBeaconClient } -// SubmitBlockOpts is a struct that contains all the arguments needed to submit a block to the relay -type SubmitBlockOpts struct { - // ExecutablePayloadEnvelope is the payload envelope that was executed - ExecutionPayloadEnvelope *engine.ExecutionPayloadEnvelope - // SealedAt is the time at which the block was sealed - SealedAt time.Time - // PayloadAttributes are the payload attributes used for block building - PayloadAttributes *builderTypes.PayloadAttributes -} - func NewBuilder(args BuilderArgs) (*Builder, error) { slotCtx, slotCtxCancel := context.WithCancel(context.Background()) return &Builder{ @@ -95,7 +86,6 @@ func NewBuilder(args BuilderArgs) (*Builder, error) { builderPrivateKey: args.builderPrivateKey, builderAddress: args.builderAddress, proposerAddress: args.proposerAddress, - builderRetryInterval: args.builderRetryInterval, builderBlockTime: args.blockTime, slotCtx: slotCtx, @@ -174,13 +164,10 @@ func (b *Builder) GetPayload(request *builderTypes.BuilderPayloadRequest) (*buil } } - b.bestBlockMu.Lock() - bestBlock := b.bestBlock - b.bestBlockMu.Unlock() - - if bestBlock == nil { - log.Warn("no builder submissions") - return nil, ErrNoPayloads + bestBlock, err := b.getVersionedBlockSubmission(request.Message.Slot, request.Message.ParentHash) + if err != nil { + log.Warn("error getting versioned block submission", "err", err) + return nil, fmt.Errorf("error getting builder block: %w", err) } if bestBlock.Message.Slot != request.Message.Slot { @@ -269,12 +256,24 @@ func (b *Builder) handleGetPayload(w http.ResponseWriter, req *http.Request) { updateServeTimeHistogram("getPayload", true, time.Since(start)) } -func (b *Builder) saveBlockSubmission(opts SubmitBlockOpts) error { - executionPayload := opts.ExecutionPayloadEnvelope.ExecutionPayload +func (b *Builder) getVersionedBlockSubmission(slot uint64, parentHash common.Hash) (*builderTypes.VersionedBuilderPayloadResponse, error) { + var executionPayloadEnvelope *engine.ExecutionPayloadEnvelope + b.payloadMu.Lock() + if b.payload != nil { + executionPayloadEnvelope = b.payload.ResolveFull() + } + b.payloadMu.Unlock() + + if executionPayloadEnvelope == nil { + return nil, fmt.Errorf("no builder block found") + } + + executionPayload := executionPayloadEnvelope.ExecutionPayload + log.Info( "saveBlockSubmission", - "slot", opts.PayloadAttributes.Slot, - "parent", opts.PayloadAttributes.HeadHash.String(), + "slot", slot, + "parent", parentHash.String(), "hash", executionPayload.BlockHash.String(), ) @@ -287,18 +286,18 @@ func (b *Builder) saveBlockSubmission(opts SubmitBlockOpts) error { dataVersion = builderTypes.SpecVersionEcotone } - value, overflow := uint256.FromBig(opts.ExecutionPayloadEnvelope.BlockValue) + value, overflow := uint256.FromBig(executionPayloadEnvelope.BlockValue) if overflow { - return fmt.Errorf("could not set block value due to value overflow") + return nil, fmt.Errorf("could not set block value due to value overflow") } blockBidMsg := builderTypes.BidTrace{ - Slot: opts.PayloadAttributes.Slot, + Slot: slot, ParentHash: executionPayload.ParentHash, BlockHash: executionPayload.BlockHash, BuilderAddress: b.builderAddress, ProposerAddress: b.proposerAddress, - ProposerFeeRecipient: opts.PayloadAttributes.SuggestedFeeRecipient, + ProposerFeeRecipient: executionPayload.FeeRecipient, GasLimit: executionPayload.GasLimit, GasUsed: executionPayload.GasUsed, Value: value.ToBig(), @@ -306,7 +305,7 @@ func (b *Builder) saveBlockSubmission(opts SubmitBlockOpts) error { signature, err := b.signBuilderBid(&blockBidMsg) if err != nil { - return fmt.Errorf("could not sign block bid message, %w", err) + return nil, fmt.Errorf("could not sign block bid message, %w", err) } versionedBlockRequest := &builderTypes.VersionedBuilderPayloadResponse{ @@ -316,14 +315,10 @@ func (b *Builder) saveBlockSubmission(opts SubmitBlockOpts) error { Signature: signature, } - b.bestBlockMu.Lock() - b.bestBlock = versionedBlockRequest - b.bestBlockMu.Unlock() - - log.Info("saved block", "version", dataVersion.String(), "slot", opts.PayloadAttributes.Slot, "value", opts.ExecutionPayloadEnvelope.BlockValue.String(), + log.Info("resolved block", "version", dataVersion.String(), "slot", slot, "value", executionPayloadEnvelope.BlockValue, "parent", executionPayload.ParentHash.String(), "hash", executionPayload.BlockHash) - return nil + return versionedBlockRequest, nil } func (b *Builder) signBuilderBid(bid *builderTypes.BidTrace) ([]byte, error) { @@ -379,49 +374,14 @@ func (b *Builder) handlePayloadAttributes(attrs *builderTypes.PayloadAttributes) } func (b *Builder) runBuildingJob(slotCtx context.Context, attrs *builderTypes.PayloadAttributes) { - ctx, cancel := context.WithTimeout(slotCtx, b.builderBlockTime) - defer cancel() - - // Submission queue for the given payload attributes - // multiple jobs can run for different attributes fot the given slot - // 1. When new block is ready we check if its profit is higher than profit of last best block - // if it is we set queueBest* to values of the new block and notify queueSignal channel. - var ( - queueMu sync.Mutex - queueLastSubmittedHash common.Hash - queueBestBlockValue *big.Int = big.NewInt(0) - ) - log.Info("runBuildingJob", "slot", attrs.Slot, "parent", attrs.HeadHash, "payloadTimestamp", uint64(attrs.Timestamp), "txs", attrs.Transactions) - // retry build block every builderBlockRetryInterval - runRetryLoop(ctx, b.builderRetryInterval, func() { - log.Info("retrying BuildBlock", - "slot", attrs.Slot, - "parent", attrs.HeadHash, - "retryInterval", b.builderRetryInterval.String()) - payload, err := b.eth.BuildBlock(attrs) - if err != nil { - log.Warn("Failed to build block", "err", err) - return - } - - sealedAt := time.Now() - queueMu.Lock() - defer queueMu.Unlock() - if payload.ExecutionPayload.BlockHash != queueLastSubmittedHash && payload.BlockValue.Cmp(queueBestBlockValue) >= 0 { - queueLastSubmittedHash = payload.ExecutionPayload.BlockHash - queueBestBlockValue = payload.BlockValue - - submitBlockOpts := SubmitBlockOpts{ - ExecutionPayloadEnvelope: payload, - SealedAt: sealedAt, - PayloadAttributes: attrs, - } - err := b.saveBlockSubmission(submitBlockOpts) - if err != nil { - log.Error("could not save block submission", "err", err) - } - } - }) + payload, err := b.eth.BuildBlock(attrs) + if err != nil { + log.Warn("Failed to build block", "err", err) + return + } + b.payloadMu.Lock() + b.payload = payload + b.payloadMu.Unlock() } diff --git a/builder/builder_test.go b/builder/builder_test.go index b1c651f73d..9dd7070879 100644 --- a/builder/builder_test.go +++ b/builder/builder_test.go @@ -17,14 +17,24 @@ import ( "github.com/stretchr/testify/require" ) +type testPayloadService struct { + testExecutableData *engine.ExecutionPayloadEnvelope +} + +func (t *testPayloadService) ResolveFull() *engine.ExecutionPayloadEnvelope { + return t.testExecutableData +} + type testEthereumService struct { synced bool testExecutableData *engine.ExecutionPayloadEnvelope testBlock *types.Block } -func (t *testEthereumService) BuildBlock(attrs *builderTypes.PayloadAttributes) (*engine.ExecutionPayloadEnvelope, error) { - return t.testExecutableData, nil +func (t *testEthereumService) BuildBlock(attrs *builderTypes.PayloadAttributes) (IPayload, error) { + return &testPayloadService{ + testExecutableData: t.testExecutableData, + }, nil } func (t *testEthereumService) GetBlockByHash(hash common.Hash) *types.Block { return t.testBlock } @@ -104,7 +114,6 @@ func TestGetPayloadV1(t *testing.T) { builderPrivateKey: testPrivateKey, builderAddress: crypto.PubkeyToAddress(testPrivateKey.PublicKey), proposerAddress: crypto.PubkeyToAddress(testPrivateKey.PublicKey), - builderRetryInterval: 200 * time.Millisecond, blockTime: 2 * time.Second, eth: testEthService, ignoreLatePayloadAttributes: false, diff --git a/builder/eth_service.go b/builder/eth_service.go index 4c6b440566..953ccf367e 100644 --- a/builder/eth_service.go +++ b/builder/eth_service.go @@ -1,21 +1,16 @@ package builder import ( - "errors" - "time" - - "github.com/ethereum/go-ethereum/beacon/engine" builderTypes "github.com/ethereum/go-ethereum/builder/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/params" ) type IEthereumService interface { - BuildBlock(attrs *builderTypes.PayloadAttributes) (*engine.ExecutionPayloadEnvelope, error) + BuildBlock(attrs *builderTypes.PayloadAttributes) (IPayload, error) GetBlockByHash(hash common.Hash) *types.Block Config() *params.ChainConfig Synced() bool @@ -33,7 +28,7 @@ func NewEthereumService(eth *eth.Ethereum, config *Config) *EthereumService { } } -func (s *EthereumService) BuildBlock(attrs *builderTypes.PayloadAttributes) (*engine.ExecutionPayloadEnvelope, error) { +func (s *EthereumService) BuildBlock(attrs *builderTypes.PayloadAttributes) (IPayload, error) { // Send a request to generate a full block in the background. // The result can be obtained via the returned channel. args := &miner.BuildPayloadArgs{ @@ -47,32 +42,7 @@ func (s *EthereumService) BuildBlock(attrs *builderTypes.PayloadAttributes) (*en Transactions: attrs.Transactions, NoTxPool: attrs.NoTxPool, } - - payload, err := s.eth.Miner().BuildPayload(args) - if err != nil { - log.Error("Failed to build payload", "err", err) - return nil, err - } - - resCh := make(chan *engine.ExecutionPayloadEnvelope, 1) - go func() { - resCh <- payload.ResolveFull() - }() - - timer := time.NewTimer(s.cfg.BlockTime) - defer timer.Stop() - - select { - case payload := <-resCh: - if payload == nil { - return nil, errors.New("received nil payload from sealing work") - } - return payload, nil - case <-timer.C: - payload.Cancel() - log.Error("timeout waiting for block", "parent hash", attrs.HeadHash, "slot", attrs.Slot) - return nil, errors.New("timeout waiting for block result") - } + return s.eth.Miner().BuildPayload(args) } func (s *EthereumService) GetBlockByHash(hash common.Hash) *types.Block { diff --git a/builder/eth_service_test.go b/builder/eth_service_test.go index 67bf8dcace..52b67492b0 100644 --- a/builder/eth_service_test.go +++ b/builder/eth_service_test.go @@ -93,7 +93,9 @@ func TestBuildBlock(t *testing.T) { service := NewEthereumService(ethservice, &DefaultConfig) - executableData, err := service.BuildBlock(testPayloadAttributes) + payload, err := service.BuildBlock(testPayloadAttributes) + + executableData := payload.ResolveFull() require.Equal(t, common.Address{0x04, 0x10}, executableData.ExecutionPayload.FeeRecipient) require.Equal(t, common.Hash{0x05, 0x10}, executableData.ExecutionPayload.Random) diff --git a/builder/service.go b/builder/service.go index 0b0e4e93eb..527e611f40 100644 --- a/builder/service.go +++ b/builder/service.go @@ -4,7 +4,6 @@ import ( "crypto/ecdsa" "fmt" "net/http" - "time" builderTypes "github.com/ethereum/go-ethereum/builder/types" "github.com/ethereum/go-ethereum/common" @@ -75,17 +74,6 @@ func Register(stack *node.Node, backend *eth.Ethereum, cfg *Config) error { ethereumService := NewEthereumService(backend, cfg) - var builderRetryInterval time.Duration - if cfg.RetryInterval != "" { - d, err := time.ParseDuration(cfg.RetryInterval) - if err != nil { - return fmt.Errorf("error parsing builder retry interval - %v", err) - } - builderRetryInterval = d - } else { - builderRetryInterval = RetryIntervalDefault - } - builderPrivateKey, err := crypto.HexToECDSA(cfg.BuilderSigningKey) if err != nil { return fmt.Errorf("invalid builder private key: %w", err) @@ -109,7 +97,6 @@ func Register(stack *node.Node, backend *eth.Ethereum, cfg *Config) error { builderAddress: builderAddress, proposerAddress: proposerAddress, eth: ethereumService, - builderRetryInterval: builderRetryInterval, ignoreLatePayloadAttributes: cfg.IgnoreLatePayloadAttributes, beaconClient: beaconClient, blockTime: cfg.BlockTime, diff --git a/builder/types/bundle.go b/builder/types/bundle.go new file mode 100644 index 0000000000..95c136af48 --- /dev/null +++ b/builder/types/bundle.go @@ -0,0 +1,34 @@ +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" +) + +type MevBundle struct { + Txs types.Transactions + BlockNumber *big.Int + MinTimestamp uint64 + MaxTimestamp uint64 + RevertingTxHashes []common.Hash + Hash common.Hash +} + +func (b *MevBundle) RevertingHash(hash common.Hash) bool { + for _, revHash := range b.RevertingTxHashes { + if revHash == hash { + return true + } + } + return false +} + +type SimulatedBundle struct { + MevGasPrice *uint256.Int + TotalEth *uint256.Int // total profit of the bundle + TotalGasUsed uint64 + OriginalBundle MevBundle +} diff --git a/builder/utils.go b/builder/utils.go index c380df9fcb..8b708a225f 100644 --- a/builder/utils.go +++ b/builder/utils.go @@ -1,12 +1,10 @@ package builder import ( - "context" "encoding/json" "errors" "math/big" "net/http" - "time" builderTypes "github.com/ethereum/go-ethereum/builder/types" "github.com/ethereum/go-ethereum/common" @@ -57,20 +55,6 @@ func respondError(w http.ResponseWriter, code int, message string) { } } -// runRetryLoop calls retry periodically with the provided interval respecting context cancellation -func runRetryLoop(ctx context.Context, interval time.Duration, retry func()) { - t := time.NewTicker(interval) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - retry() - } - } -} - func SigningHash(domain [32]byte, chainID *big.Int, payloadBytes []byte) (common.Hash, error) { var msgInput [32 + 32 + 32]byte // domain: first 32 bytes diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a634a56ea9..ce33b42b0a 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -177,14 +177,13 @@ var ( }, utils.NetworkFlags, utils.DatabaseFlags) builderApiFlags = []cli.Flag{ - utils.BuilderEnabled, - utils.BuilderIgnoreLatePayloadAttributes, - utils.BuilderSigningKey, - utils.BuilderListenAddr, - utils.BuilderBeaconEndpoints, - utils.BuilderBlockRetryInterval, - utils.BuilderBlockTime, - utils.BuilderProposerSigningAddress, + utils.BuilderEnabledFlag, + utils.BuilderIgnoreLatePayloadAttributesFlag, + utils.BuilderSigningKeyFlag, + utils.BuilderListenAddrFlag, + utils.BuilderBeaconEndpointsFlag, + utils.BuilderBlockTimeFlag, + utils.BuilderProposerSigningAddressFlag, } rpcFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 3ed6b0fd38..d972b32c8f 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -671,54 +671,47 @@ var ( } // Builder API settings - BuilderEnabled = &cli.BoolFlag{ + BuilderEnabledFlag = &cli.BoolFlag{ Name: "builder", Usage: "Enable the builder", EnvVars: []string{"BUILDER_ENABLED"}, Category: flags.BuilderCategory, } - BuilderIgnoreLatePayloadAttributes = &cli.BoolFlag{ + BuilderIgnoreLatePayloadAttributesFlag = &cli.BoolFlag{ Name: "builder.ignore_late_payload_attributes", Usage: "Builder will ignore all but the first payload attributes. Use if your CL sends non-canonical head updates.", EnvVars: []string{"BUILDER_IGNORE_LATE_PAYLOAD_ATTRIBUTES"}, Category: flags.BuilderCategory, } - BuilderSigningKey = &cli.StringFlag{ + BuilderSigningKeyFlag = &cli.StringFlag{ Name: "builder.signing_key", Usage: "Builder key used for signing blocks", EnvVars: []string{"BUILDER_SIGNING_KEY"}, Value: "0x2fc12ae741f29701f8e30f5de6350766c020cb80768a0ff01e6838ffd2431e11", Category: flags.BuilderCategory, } - BuilderListenAddr = &cli.StringFlag{ + BuilderListenAddrFlag = &cli.StringFlag{ Name: "builder.listen_addr", Usage: "Listening address for builder endpoint", EnvVars: []string{"BUILDER_LISTEN_ADDR"}, Value: ":28545", Category: flags.BuilderCategory, } - BuilderBeaconEndpoints = &cli.StringFlag{ + BuilderBeaconEndpointsFlag = &cli.StringFlag{ Name: "builder.beacon_endpoints", Usage: "Comma separated list of beacon endpoints to connect to for beacon chain data", EnvVars: []string{"BUILDER_BEACON_ENDPOINTS"}, Value: "http://127.0.0.1:5052", Category: flags.BuilderCategory, } - BuilderBlockTime = &cli.DurationFlag{ + BuilderBlockTimeFlag = &cli.DurationFlag{ Name: "builder.block_time", Usage: "Determines the block time of the network.", EnvVars: []string{"BUILDER_BLOCK_TIME"}, Value: builder.BlockTimeDefault, Category: flags.BuilderCategory, } - BuilderBlockRetryInterval = &cli.StringFlag{ - Name: "builder.block_retry_interval", - Usage: "Determines the interval at which builder will retry building a block", - EnvVars: []string{"BUILDER_RATE_LIMIT_RETRY_INTERVAL"}, - Value: builder.RetryIntervalDefault.String(), - Category: flags.BuilderCategory, - } - BuilderProposerSigningAddress = &cli.StringFlag{ + BuilderProposerSigningAddressFlag = &cli.StringFlag{ Name: "builder.proposer_signing_address", Usage: "Proposer address used for authenticating proposer messages", EnvVars: []string{"BUILDER_PROPOSER_SIGNING_ADDRESS"}, @@ -1578,17 +1571,16 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { } func SetBuilderConfig(ctx *cli.Context, cfg *builder.Config) { - if ctx.IsSet(BuilderEnabled.Name) { - cfg.Enabled = ctx.Bool(BuilderEnabled.Name) + if ctx.IsSet(BuilderEnabledFlag.Name) { + cfg.Enabled = ctx.Bool(BuilderEnabledFlag.Name) } - cfg.IgnoreLatePayloadAttributes = ctx.IsSet(BuilderIgnoreLatePayloadAttributes.Name) - cfg.BuilderSigningKey = ctx.String(BuilderSigningKey.Name) - cfg.ListenAddr = ctx.String(BuilderListenAddr.Name) - cfg.BeaconEndpoints = strings.Split(ctx.String(BuilderBeaconEndpoints.Name), ",") + cfg.IgnoreLatePayloadAttributes = ctx.IsSet(BuilderIgnoreLatePayloadAttributesFlag.Name) + cfg.BuilderSigningKey = ctx.String(BuilderSigningKeyFlag.Name) + cfg.ListenAddr = ctx.String(BuilderListenAddrFlag.Name) + cfg.BeaconEndpoints = strings.Split(ctx.String(BuilderBeaconEndpointsFlag.Name), ",") - cfg.RetryInterval = ctx.String(BuilderBlockRetryInterval.Name) - cfg.BlockTime = ctx.Duration(BuilderBlockTime.Name) - cfg.ProposerAddress = ctx.String(BuilderProposerSigningAddress.Name) + cfg.BlockTime = ctx.Duration(BuilderBlockTimeFlag.Name) + cfg.ProposerAddress = ctx.String(BuilderProposerSigningAddressFlag.Name) } // SetNodeConfig applies node-related command line flags to the config. @@ -1800,6 +1792,9 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { if ctx.IsSet(RollupComputePendingBlock.Name) { cfg.RollupComputePendingBlock = ctx.Bool(RollupComputePendingBlock.Name) } + if ctx.IsSet(BuilderEnabledFlag.Name) { + cfg.BuilderEnabled = ctx.Bool(BuilderEnabledFlag.Name) + } } func setRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) { diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index ff31ffc200..f21d57a6eb 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -28,6 +28,9 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "golang.org/x/crypto/sha3" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" ) type L1CostFunc func(dataGas types.RollupCostData) *big.Int @@ -77,6 +80,9 @@ type TxPool struct { term chan struct{} // Termination channel to detect a closed pool sync chan chan error // Testing / simulator channel to block until internal reset is done + + bundleLock sync.RWMutex // Mutex protecting the pool when adding bundles + mevBundles []builderTypes.MevBundle } // New creates a new transaction pool to gather, sort and filter inbound @@ -482,3 +488,63 @@ func (p *TxPool) Sync() error { return errors.New("pool already terminated") } } + +func (p *TxPool) AddMevBundle(txs types.Transactions, blockNumber *big.Int, minTimestamp, maxTimestamp uint64, revertingTxHashes []common.Hash) (common.Hash, error) { + bundleHasher := sha3.NewLegacyKeccak256() + for _, tx := range txs { + _, err := bundleHasher.Write(tx.Hash().Bytes()) + if err != nil { + return common.Hash{}, err + } + } + bundleHash := common.BytesToHash(bundleHasher.Sum(nil)) + + p.bundleLock.Lock() + defer p.bundleLock.Unlock() + + p.mevBundles = append(p.mevBundles, builderTypes.MevBundle{ + Txs: txs, + BlockNumber: blockNumber, + MinTimestamp: minTimestamp, + MaxTimestamp: maxTimestamp, + RevertingTxHashes: revertingTxHashes, + Hash: bundleHash, + }) + return bundleHash, nil +} + +// MevBundles returns a list of bundles valid for the given blockNumber/blockTimestamp +// also prunes bundles that are outdated +// Returns regular bundles and a function resolving to current cancellable bundles +func (p *TxPool) MevBundles(blockNumber *big.Int, blockTimestamp uint64) []builderTypes.MevBundle { + p.bundleLock.Lock() + defer p.bundleLock.Unlock() + + // returned values + var ret []builderTypes.MevBundle + // rolled over values + var bundles []builderTypes.MevBundle + + for _, bundle := range p.mevBundles { + // Prune outdated bundles + if (bundle.MaxTimestamp != 0 && blockTimestamp > bundle.MaxTimestamp) || blockNumber.Cmp(bundle.BlockNumber) > 0 { + continue + } + + // Roll over future bundles + if (bundle.MinTimestamp != 0 && blockTimestamp < bundle.MinTimestamp) || blockNumber.Cmp(bundle.BlockNumber) < 0 { + bundles = append(bundles, bundle) + continue + } + + // keep the bundles around internally until they need to be pruned + bundles = append(bundles, bundle) + + // return the ones which are in time + ret = append(ret, bundle) + } + + p.mevBundles = bundles + + return ret +} diff --git a/eth/api_backend.go b/eth/api_backend.go index bd1b623c36..6507ae9f70 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -315,6 +315,10 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) return b.eth.txPool.Add([]*types.Transaction{signedTx}, true, false)[0] } +func (b *EthAPIBackend) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) (common.Hash, error) { + return b.eth.txPool.AddMevBundle(txs, big.NewInt(blockNumber.Int64()), minTimestamp, maxTimestamp, revertingTxHashes) +} + func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { pending := b.eth.txPool.Pending(txpool.PendingFilter{}) var txs types.Transactions diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 20a0e0233a..c54aeffb76 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -785,6 +785,9 @@ func (b testBackend) HistoricalRPCService() *rpc.Client { func (b testBackend) Genesis() *types.Block { panic("implement me") } +func (b testBackend) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) (common.Hash, error) { + panic("implement me") +} func TestEstimateGas(t *testing.T) { t.Parallel() diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 7265bed8a0..54251de542 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -99,6 +99,9 @@ type Backend interface { SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription BloomStatus() (uint64, uint64) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) + + // Bundle API + SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) (common.Hash, error) } func GetAPIs(apiBackend Backend) []rpc.API { @@ -125,6 +128,9 @@ func GetAPIs(apiBackend Backend) []rpc.API { }, { Namespace: "personal", Service: NewPersonalAccountAPI(apiBackend, nonceLock), + }, { + Namespace: "eth", + Service: NewPrivateTxBundleAPI(apiBackend), }, } } diff --git a/internal/ethapi/bundle_api.go b/internal/ethapi/bundle_api.go new file mode 100644 index 0000000000..2eaadc30b9 --- /dev/null +++ b/internal/ethapi/bundle_api.go @@ -0,0 +1,68 @@ +package ethapi + +import ( + "context" + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" +) + +// PrivateTxBundleAPI offers an API for accepting bundled transactions +type PrivateTxBundleAPI struct { + b Backend +} + +// NewPrivateTxBundleAPI creates a new Tx Bundle API instance. +func NewPrivateTxBundleAPI(b Backend) *PrivateTxBundleAPI { + return &PrivateTxBundleAPI{b} +} + +// SendBundleArgs represents the arguments for a SendBundle call. +type SendBundleArgs struct { + Txs []hexutil.Bytes `json:"txs"` + BlockNumber rpc.BlockNumber `json:"blockNumber"` + MinTimestamp *uint64 `json:"minTimestamp"` + MaxTimestamp *uint64 `json:"maxTimestamp"` + RevertingTxHashes []common.Hash `json:"revertingTxHashes"` +} + +type SendBundleResult struct { + BundleHash common.Hash `json:"bundleHash"` +} + +// SendBundle will add the signed transaction to the transaction pool. +// The sender is responsible for signing the transaction and using the correct nonce and ensuring validity +func (s *PrivateTxBundleAPI) SendBundle(ctx context.Context, args SendBundleArgs) (*SendBundleResult, error) { + var txs types.Transactions + if len(args.Txs) == 0 { + return nil, errors.New("bundle missing txs") + } + if args.BlockNumber == 0 { + return nil, errors.New("bundle missing blockNumber") + } + + for _, encodedTx := range args.Txs { + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(encodedTx); err != nil { + return nil, err + } + txs = append(txs, tx) + } + + var minTimestamp, maxTimestamp uint64 + if args.MinTimestamp != nil { + minTimestamp = *args.MinTimestamp + } + if args.MaxTimestamp != nil { + maxTimestamp = *args.MaxTimestamp + } + + bundleHash, err := s.b.SendBundle(ctx, txs, args.BlockNumber, minTimestamp, maxTimestamp, args.RevertingTxHashes) + if err != nil { + return nil, err + } + return &SendBundleResult{BundleHash: bundleHash}, nil +} diff --git a/internal/ethapi/transaction_args_test.go b/internal/ethapi/transaction_args_test.go index 79fe9a4257..20e7956556 100644 --- a/internal/ethapi/transaction_args_test.go +++ b/internal/ethapi/transaction_args_test.go @@ -405,3 +405,7 @@ func (b *backendMock) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) func (b *backendMock) Engine() consensus.Engine { return nil } func (b *backendMock) HistoricalRPCService() *rpc.Client { return nil } func (b *backendMock) Genesis() *types.Block { return nil } + +func (b *backendMock) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64, revertingTxHashes []common.Hash) (common.Hash, error) { + return common.Hash{}, nil +} diff --git a/miner/algo_greedy.go b/miner/algo_greedy.go new file mode 100644 index 0000000000..2cca268f6a --- /dev/null +++ b/miner/algo_greedy.go @@ -0,0 +1,325 @@ +package miner + +import ( + "errors" + "fmt" + "math/big" + "sync" + "sync/atomic" + "time" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +// generateBuilderWork generates a sealing block based on the given parameters. +func (miner *Miner) generateBuilderWork(params *generateParams) *newPayloadResult { + start := time.Now() + profit := new(uint256.Int) + bundles := []builderTypes.SimulatedBundle{} + + work, err := miner.prepareWork(params) + if err != nil { + return &newPayloadResult{err: err} + } + if work.gasPool == nil { + gasLimit := miner.config.EffectiveGasCeil + if gasLimit == 0 || gasLimit > work.header.GasLimit { + gasLimit = work.header.GasLimit + } + work.gasPool = new(core.GasPool).AddGas(gasLimit) + } + + misc.EnsureCreate2Deployer(miner.chainConfig, work.header.Time, work.state) + + for _, tx := range params.txs { + from, _ := types.Sender(work.signer, tx) + work.state.SetTxContext(tx.Hash(), work.tcount) + err = miner.commitTransaction(work, tx) + if err != nil { + return &newPayloadResult{err: fmt.Errorf("failed to force-include tx: %s type: %d sender: %s nonce: %d, err: %w", tx.Hash(), tx.Type(), from, tx.Nonce(), err)} + } + work.tcount++ + } + if !params.noTxs { + // use shared interrupt if present + interrupt := params.interrupt + if interrupt == nil { + interrupt = new(atomic.Int32) + } + timer := time.AfterFunc(max(minRecommitInterruptInterval, miner.config.Recommit), func() { + interrupt.Store(commitInterruptTimeout) + }) + + bundles, profit, err = miner.fillTransactionsAndBundles(interrupt, work, params.txs) + timer.Stop() // don't need timeout interruption any more + if errors.Is(err, errBlockInterruptedByTimeout) { + log.Warn("Block building is interrupted", "allowance", common.PrettyDuration(miner.config.Recommit)) + } else if errors.Is(err, errBlockInterruptedByResolve) { + log.Info("Block building got interrupted by payload resolution") + } else if err != nil { + return &newPayloadResult{err: err} + } + } + if intr := params.interrupt; intr != nil && params.isUpdate && intr.Load() != commitInterruptNone { + return &newPayloadResult{err: errInterruptedUpdate} + } + + body := types.Body{Transactions: work.txs, Withdrawals: params.withdrawals} + block, err := miner.engine.FinalizeAndAssemble(miner.chain, work.header, work.state, &body, work.receipts) + if err != nil { + return &newPayloadResult{err: err} + } + + log.Info("Block finalized and assembled", "num", block.Number().String(), "profit", ethIntToFloat(profit), + "txs", len(work.txs), "bundles", len(bundles), "gasUsed", block.GasUsed(), "time", time.Since(start)) + + return &newPayloadResult{ + block: block, + fees: profit.ToBig(), + sidecars: work.sidecars, + stateDB: work.state, + receipts: work.receipts, + } +} + +func (miner *Miner) mergeOrdersIntoEnvDiff(envDiff *environmentDiff, orders *ordersByPriceAndNonce, interrupt *atomic.Int32) []builderTypes.SimulatedBundle { + var ( + usedBundles []builderTypes.SimulatedBundle + ) + for { + order := orders.Peek() + if order == nil { + break + } + + if laxyTx := order.Tx(); laxyTx != nil { + tx := laxyTx.Resolve() + if tx == nil { + log.Trace("Ignoring evicted transaction", "hash", laxyTx.Hash) + orders.Pop() + continue + } + receipt, skip, err := envDiff.commitTx(tx, miner.chain) + switch skip { + case shiftTx: + orders.Shift() + case popTx: + orders.Pop() + } + + if err != nil { + log.Trace("could not apply tx", "hash", tx.Hash(), "err", err) + continue + } + effGapPrice, err := tx.EffectiveGasTip(envDiff.baseEnvironment.header.BaseFee) + if err == nil { + log.Trace("Included tx", "EGP", effGapPrice.String(), "gasUsed", receipt.GasUsed) + } + } else if bundle := order.Bundle(); bundle != nil { + err := envDiff.commitBundle(bundle, miner.chain, interrupt) + orders.Pop() + if err != nil { + log.Trace("Could not apply bundle", "bundle", bundle.OriginalBundle.Hash, "err", err) + continue + } + + log.Trace("Included bundle", "bundleEGP", bundle.MevGasPrice.String(), "gasUsed", bundle.TotalGasUsed, "totalEth", ethIntToFloat(bundle.TotalEth)) + usedBundles = append(usedBundles, *bundle) + } + } + return usedBundles +} + +func (miner *Miner) fillTransactionsAndBundles(interrupt *atomic.Int32, env *environment, forcedTxs types.Transactions) ([]builderTypes.SimulatedBundle, *uint256.Int, error) { + miner.confMu.RLock() + tip := miner.config.GasPrice + miner.confMu.RUnlock() + + // Retrieve the pending transactions pre-filtered by the 1559/4844 dynamic fees + filter := txpool.PendingFilter{ + MinTip: uint256.MustFromBig(tip), + } + if env.header.BaseFee != nil { + filter.BaseFee = uint256.MustFromBig(env.header.BaseFee) + } + if env.header.ExcessBlobGas != nil { + filter.BlobFee = uint256.MustFromBig(eip4844.CalcBlobFee(*env.header.ExcessBlobGas)) + } + + pending := miner.txpool.Pending(filter) + mempoolTxHashes := make(map[common.Hash]struct{}) + for _, txs := range pending { + for _, tx := range txs { + mempoolTxHashes[tx.Hash] = struct{}{} + } + } + + bundlesToConsider, err := miner.getSimulatedBundles(env) + if err != nil { + return nil, nil, err + } + + start := time.Now() + + orders := newOrdersByPriceAndNonce(env.signer, pending, bundlesToConsider, env.header.BaseFee) + envDiff := newEnvironmentDiff(env) + usedBundles := miner.mergeOrdersIntoEnvDiff(envDiff, orders, interrupt) + envDiff.applyToBaseEnv() + + mergeAlgoTimer.Update(time.Since(start)) + + err = VerifyBundlesAtomicity(env, usedBundles, bundlesToConsider, mempoolTxHashes, forcedTxs) + if err != nil { + return nil, nil, err + } + return usedBundles, envDiff.profit, nil +} + +func (miner *Miner) getSimulatedBundles(env *environment) ([]builderTypes.SimulatedBundle, error) { + bundles := miner.txpool.MevBundles(env.header.Number, env.header.Time) + + simBundles, err := miner.simulateBundles(env, bundles) + if err != nil { + log.Error("Failed to simulate bundles", "err", err) + return nil, err + } + + return simBundles, nil +} + +func (miner *Miner) simulateBundles(env *environment, bundles []builderTypes.MevBundle) ([]builderTypes.SimulatedBundle, error) { + start := time.Now() + + simResult := make([]*builderTypes.SimulatedBundle, len(bundles)) + + var wg sync.WaitGroup + for i, bundle := range bundles { + wg.Add(1) + go func(idx int, bundle builderTypes.MevBundle, state *state.StateDB) { + defer wg.Done() + + start := time.Now() + + if len(bundle.Txs) == 0 { + return + } + gasPool := new(core.GasPool).AddGas(env.header.GasLimit) + simmed, err := miner.computeBundleGas(env, bundle, state, gasPool) + + simulationMeter.Mark(1) + + if err != nil { + simulationRevertedMeter.Mark(1) + failedBundleSimulationTimer.UpdateSince(start) + + log.Trace("Error computing gas for a bundle", "error", err) + return + } + simResult[idx] = &simmed + + simulationCommittedMeter.Mark(1) + successfulBundleSimulationTimer.UpdateSince(start) + }(i, bundle, env.state.Copy()) + } + + wg.Wait() + + simBundleCount := 0 + for _, bundle := range simResult { + if bundle != nil { + simBundleCount += 1 + } + } + + simulatedBundles := make([]builderTypes.SimulatedBundle, 0, simBundleCount) + for _, bundle := range simResult { + if bundle != nil { + simulatedBundles = append(simulatedBundles, *bundle) + } + } + + log.Debug("Simulated bundles", "block", env.header.Number, "allBundles", len(bundles), "okBundles", len(simulatedBundles), "time", time.Since(start)) + + blockBundleSimulationTimer.Update(time.Since(start)) + blockBundleNumHistogram.Update(int64(len(bundles))) + + return simulatedBundles, nil +} + +// Compute the adjusted gas price for a whole bundle +// Done by calculating all gas spent, adding transfers to the coinbase, and then dividing by gas used +func (miner *Miner) computeBundleGas(env *environment, bundle builderTypes.MevBundle, state *state.StateDB, gasPool *core.GasPool) (builderTypes.SimulatedBundle, error) { + var totalGasUsed uint64 = 0 + var tempGasUsed uint64 + + totalEth := new(uint256.Int) + + for i, tx := range bundle.Txs { + if env.header.BaseFee != nil && tx.Type() == 2 { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + return builderTypes.SimulatedBundle{}, core.ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return builderTypes.SimulatedBundle{}, core.ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return builderTypes.SimulatedBundle{}, core.ErrTipAboveFeeCap + } + } + + state.SetTxContext(tx.Hash(), i) + coinbaseBalanceBefore := state.GetBalance(env.coinbase) + + config := *miner.chain.GetVMConfig() + receipt, err := core.ApplyTransaction(miner.chainConfig, miner.chain, &env.coinbase, gasPool, state, env.header, tx, &tempGasUsed, config) + if err != nil { + return builderTypes.SimulatedBundle{}, err + } + + if receipt.Status == types.ReceiptStatusFailed && !containsHash(bundle.RevertingTxHashes, receipt.TxHash) { + return builderTypes.SimulatedBundle{}, errors.New("failed tx") + } + + totalGasUsed += receipt.GasUsed + + coinbaseBalanceAfter := state.GetBalance(env.coinbase) + coinbaseDelta := uint256.NewInt(0).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) + totalEth.Add(totalEth, coinbaseDelta) + } + + return builderTypes.SimulatedBundle{ + MevGasPrice: new(uint256.Int).Div(totalEth, new(uint256.Int).SetUint64(totalGasUsed)), + TotalEth: totalEth, + TotalGasUsed: totalGasUsed, + OriginalBundle: bundle, + }, nil +} + +// ethIntToFloat is for formatting a uint256.Int in wei to eth +func ethIntToFloat(eth *uint256.Int) *big.Float { + if eth == nil { + return big.NewFloat(0) + } + return new(big.Float).Quo(new(big.Float).SetInt(eth.ToBig()), new(big.Float).SetInt(big.NewInt(params.Ether))) +} + +func containsHash(arr []common.Hash, match common.Hash) bool { + for _, elem := range arr { + if elem == match { + return true + } + } + return false +} diff --git a/miner/algo_greedy_test.go b/miner/algo_greedy_test.go new file mode 100644 index 0000000000..f9d70ea810 --- /dev/null +++ b/miner/algo_greedy_test.go @@ -0,0 +1,59 @@ +package miner + +import ( + "math" + "math/big" + "testing" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/holiman/uint256" +) + +func TestBuildBlockGasLimit(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) + txs := make(map[common.Address][]*txpool.LazyTransaction) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + txs[signers.addresses[1]] = []*txpool.LazyTransaction{{ + Hash: tx1.Hash(), + Tx: tx1, + Time: tx1.Time(), + GasFeeCap: uint256.MustFromBig(tx1.GasFeeCap()), + GasTipCap: uint256.MustFromBig(tx1.GasTipCap()), + }} + tx2 := signers.signTx(2, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + txs[signers.addresses[2]] = []*txpool.LazyTransaction{{ + Hash: tx2.Hash(), + Tx: tx2, + Time: tx2.Time(), + GasFeeCap: uint256.MustFromBig(tx2.GasFeeCap()), + GasTipCap: uint256.MustFromBig(tx2.GasTipCap()), + }} + tx3 := signers.signTx(3, 21000, big.NewInt(math.MaxInt), big.NewInt(math.MaxInt), signers.addresses[2], big.NewInt(math.MaxInt), []byte{}) + txs[signers.addresses[3]] = []*txpool.LazyTransaction{{ + Hash: tx3.Hash(), + Tx: tx3, + Time: tx3.Time(), + GasFeeCap: uint256.MustFromBig(tx3.GasFeeCap()), + GasTipCap: uint256.MustFromBig(tx3.GasTipCap()), + }} + + var result *environment + + envDiff := newEnvironmentDiff(env) + orders := newOrdersByPriceAndNonce(env.signer, txs, []builderTypes.SimulatedBundle{}, env.header.BaseFee) + miner := Miner{ + chain: chData, + chainConfig: chData.Config(), + } + miner.mergeOrdersIntoEnvDiff(envDiff, orders, nil) + envDiff.applyToBaseEnv() + + if env.tcount != 1 { + t.Fatalf("Incorrect tx count [found: %d]", result.tcount) + } + +} diff --git a/miner/builder_ordering_test.go b/miner/builder_ordering_test.go new file mode 100644 index 0000000000..3d830a6f3a --- /dev/null +++ b/miner/builder_ordering_test.go @@ -0,0 +1,223 @@ +package miner + +import ( + "crypto/ecdsa" + "math/big" + "math/rand" + "testing" + "time" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/holiman/uint256" +) + +func TestOrderPriceNonceSort1559(t *testing.T) { + t.Parallel() + testOrderPriceNonceSort(t, big.NewInt(0)) + testOrderPriceNonceSort(t, big.NewInt(5)) + testOrderPriceNonceSort(t, big.NewInt(50)) +} + +// Tests that transactions can be correctly sorted according to their price in +// decreasing order, but at the same time with increasing nonces when issued by +// the same account. +func testOrderPriceNonceSort(t *testing.T, baseFee *big.Int) { + // Generate a batch of accounts to start with + keys := make([]*ecdsa.PrivateKey, 25) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + } + signer := types.LatestSignerForChainID(common.Big1) + + // Generate a batch of transactions with overlapping values, but shifted nonces + groups := map[common.Address][]*txpool.LazyTransaction{} + expectedCount := 0 + for start, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + count := 25 + for i := 0; i < 25; i++ { + var tx *types.Transaction + gasFeeCap := rand.Intn(50) + if baseFee == nil { + tx = types.NewTx(&types.LegacyTx{ + Nonce: uint64(start + i), + To: &common.Address{}, + Value: big.NewInt(100), + Gas: 100, + GasPrice: big.NewInt(int64(gasFeeCap)), + Data: nil, + }) + } else { + tx = types.NewTx(&types.DynamicFeeTx{ + Nonce: uint64(start + i), + To: &common.Address{}, + Value: big.NewInt(100), + Gas: 100, + GasFeeCap: big.NewInt(int64(gasFeeCap)), + GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))), + Data: nil, + }) + if count == 25 && int64(gasFeeCap) < baseFee.Int64() { + count = i + } + } + tx, err := types.SignTx(tx, signer, key) + if err != nil { + t.Fatalf("failed to sign tx: %s", err) + } + groups[addr] = append(groups[addr], &txpool.LazyTransaction{ + Hash: tx.Hash(), + Tx: tx, + Time: tx.Time(), + GasFeeCap: uint256.MustFromBig(tx.GasFeeCap()), + GasTipCap: uint256.MustFromBig(tx.GasTipCap()), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), + }) + } + expectedCount += count + } + // Sort the transactions and cross check the nonce ordering + txset := newOrdersByPriceAndNonce(signer, groups, nil, baseFee) + + txs := types.Transactions{} + for tx := txset.Peek(); tx != nil; tx = txset.Peek() { + txs = append(txs, tx.Tx().Tx) + txset.Shift() + } + if len(txs) != expectedCount { + t.Errorf("expected %d transactions, found %d", expectedCount, len(txs)) + } + for i, txi := range txs { + fromi, _ := types.Sender(signer, txi) + + // Make sure the nonce order is valid + for j, txj := range txs[i+1:] { + fromj, _ := types.Sender(signer, txj) + if fromi == fromj && txi.Nonce() > txj.Nonce() { + t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce()) + } + } + // If the next tx has different from account, the price must be lower than the current one + if i+1 < len(txs) { + next := txs[i+1] + fromNext, _ := types.Sender(signer, next) + tip, err := txi.EffectiveGasTip(baseFee) + nextTip, nextErr := next.EffectiveGasTip(baseFee) + if err != nil || nextErr != nil { + t.Errorf("error calculating effective tip: %v, %v", err, nextErr) + } + if fromi != fromNext && tip.Cmp(nextTip) < 0 { + t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) + } + } + } +} + +// Tests that if multiple transactions have the same price, the ones seen earlier +// are prioritized to avoid network spam attacks aiming for a specific ordering. +func TestOrderTimeSort(t *testing.T) { + t.Parallel() + // Generate a batch of accounts to start with + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + } + signer := types.HomesteadSigner{} + + // Generate a batch of transactions with overlapping prices, but different creation times + groups := map[common.Address][]*txpool.LazyTransaction{} + for start, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + + tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key) + tx.SetTime(time.Unix(0, int64(len(keys)-start))) + + groups[addr] = append(groups[addr], &txpool.LazyTransaction{ + Hash: tx.Hash(), + Tx: tx, + Time: tx.Time(), + GasFeeCap: uint256.MustFromBig(tx.GasFeeCap()), + GasTipCap: uint256.MustFromBig(tx.GasTipCap()), + Gas: tx.Gas(), + BlobGas: tx.BlobGas(), + }) + } + // Sort the transactions and cross check the nonce ordering + txset := newOrdersByPriceAndNonce(signer, groups, nil, nil) + + txs := types.Transactions{} + for tx := txset.Peek(); tx != nil; tx = txset.Peek() { + txs = append(txs, tx.Tx().Tx) + txset.Shift() + } + if len(txs) != len(keys) { + t.Errorf("expected %d transactions, found %d", len(keys), len(txs)) + } + for i, txi := range txs { + fromi, _ := types.Sender(signer, txi) + if i+1 < len(txs) { + next := txs[i+1] + fromNext, _ := types.Sender(signer, next) + + if txi.GasPrice().Cmp(next.GasPrice()) < 0 { + t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) + } + // Make sure time order is ascending if the txs have the same gas price + if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.Time().After(next.Time()) { + t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.Time(), i+1, fromNext[:4], next.Time()) + } + } + } +} + +func TestOrdersWithMinerFeeHeap(t *testing.T) { + statedb, chData, signers := genTestSetup(GasLimit) + + env := newEnvironment(chData, statedb, signers.addresses[0], 21000, big.NewInt(1)) + + txs := make(map[common.Address][]*txpool.LazyTransaction) + + tx1 := signers.signTx(1, 21000, big.NewInt(1), big.NewInt(5), signers.addresses[2], big.NewInt(0), []byte{}) + txs[signers.addresses[1]] = []*txpool.LazyTransaction{ + { + Hash: tx1.Hash(), + Tx: tx1, + Time: tx1.Time(), + GasFeeCap: uint256.MustFromBig(tx1.GasFeeCap()), + GasTipCap: uint256.MustFromBig(tx1.GasTipCap()), + }, + } + tx2 := signers.signTx(2, 21000, big.NewInt(4), big.NewInt(5), signers.addresses[2], big.NewInt(0), []byte{}) + txs[signers.addresses[2]] = []*txpool.LazyTransaction{ + { + Hash: tx2.Hash(), + Tx: tx2, + Time: tx2.Time(), + GasFeeCap: uint256.MustFromBig(tx2.GasFeeCap()), + GasTipCap: uint256.MustFromBig(tx2.GasTipCap()), + }, + } + + bundle1 := builderTypes.SimulatedBundle{MevGasPrice: uint256.NewInt(3), OriginalBundle: builderTypes.MevBundle{Hash: common.HexToHash("0xb1")}} + bundle2 := builderTypes.SimulatedBundle{MevGasPrice: uint256.NewInt(2), OriginalBundle: builderTypes.MevBundle{Hash: common.HexToHash("0xb2")}} + + orders := newOrdersByPriceAndNonce(env.signer, txs, []builderTypes.SimulatedBundle{bundle2, bundle1}, env.header.BaseFee) + + for { + order := orders.Peek() + if order == nil { + return + } + + if order.Tx() != nil { + orders.Shift() + } else if order.Bundle() != nil { + orders.Pop() + } + } +} diff --git a/miner/bundle_ordering.go b/miner/bundle_ordering.go new file mode 100644 index 0000000000..2855212a29 --- /dev/null +++ b/miner/bundle_ordering.go @@ -0,0 +1,266 @@ +package miner + +import ( + "container/heap" + "math/big" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" +) + +type _Order interface { + AsTx() *txpool.LazyTransaction + AsBundle() *builderTypes.SimulatedBundle +} + +type _TxOrder struct { + tx *txpool.LazyTransaction +} + +func (o _TxOrder) AsTx() *txpool.LazyTransaction { return o.tx } +func (o _TxOrder) AsBundle() *builderTypes.SimulatedBundle { return nil } + +type _BundleOrder struct { + bundle *builderTypes.SimulatedBundle +} + +func (o _BundleOrder) AsTx() *txpool.LazyTransaction { return nil } +func (o _BundleOrder) AsBundle() *builderTypes.SimulatedBundle { return o.bundle } + +// orderWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap +type orderWithMinerFee struct { + order _Order + from common.Address + fees *uint256.Int +} + +func (t *orderWithMinerFee) Tx() *txpool.LazyTransaction { + return t.order.AsTx() +} + +func (t *orderWithMinerFee) Bundle() *builderTypes.SimulatedBundle { + return t.order.AsBundle() +} + +func (t *orderWithMinerFee) Price() *uint256.Int { + return new(uint256.Int).Set(t.fees) +} + +func (t *orderWithMinerFee) Profit(baseFee *uint256.Int, gasUsed uint64) *uint256.Int { + if tx := t.Tx(); tx != nil { + profit := new(uint256.Int).Set(t.fees) + if gasUsed != 0 { + profit.Mul(profit, new(uint256.Int).SetUint64(gasUsed)) + } else { + profit.Mul(profit, new(uint256.Int).SetUint64(tx.Gas)) + } + return profit + } else if bundle := t.Bundle(); bundle != nil { + return bundle.TotalEth + } else { + panic("profit called on unsupported order type") + } +} + +// SetPrice sets the miner fee of the wrapped transaction. +func (t *orderWithMinerFee) SetPrice(price *uint256.Int) { + t.fees.Set(price) +} + +// SetProfit sets the profit of the wrapped transaction. +func (t *orderWithMinerFee) SetProfit(profit *uint256.Int) { + if bundle := t.Bundle(); bundle != nil { + bundle.TotalEth.Set(profit) + } else { + panic("SetProfit called on unsupported order type") + } +} + +// NewBundleWithMinerFee creates a wrapped bundle. +func newBundleWithMinerFee(bundle *builderTypes.SimulatedBundle) (*orderWithMinerFee, error) { + minerFee := bundle.MevGasPrice + return &orderWithMinerFee{ + order: _BundleOrder{bundle}, + fees: minerFee, + }, nil +} + +// newTxWithMinerFee creates a wrapped transaction, calculating the effective +// miner gasTipCap if a base fee is provided. +// Returns error in case of a negative effective miner gasTipCap. +func newTxOrderWithMinerFee(tx *txpool.LazyTransaction, from common.Address, baseFee *uint256.Int) (*orderWithMinerFee, error) { + tip := new(uint256.Int).Set(tx.GasTipCap) + if baseFee != nil { + if tx.GasFeeCap.Cmp(baseFee) < 0 { + return nil, types.ErrGasFeeCapTooLow + } + tip = new(uint256.Int).Sub(tx.GasFeeCap, baseFee) + if tip.Gt(tx.GasTipCap) { + tip = tx.GasTipCap + } + } + return &orderWithMinerFee{ + order: _TxOrder{tx}, + from: from, + fees: tip, + }, nil +} + +// orderByPriceAndTime implements both the sort and the heap interface, making it useful +// for all at once sorting as well as individually adding and removing elements. +type orderByPriceAndTime []*orderWithMinerFee + +func (s orderByPriceAndTime) Len() int { return len(s) } +func (s orderByPriceAndTime) Less(i, j int) bool { + // If the prices are equal, use the time the transaction was first seen for + // deterministic sorting + cmp := s[i].fees.Cmp(s[j].fees) + if cmp == 0 { + if s[i].Tx() != nil && s[j].Tx() != nil { + return s[i].Tx().Time.Before(s[j].Tx().Time) + } else if s[i].Bundle() != nil && s[j].Bundle() != nil { + return s[i].Bundle().TotalGasUsed <= s[j].Bundle().TotalGasUsed + } else if s[i].Bundle() != nil { + return false + } + + return true + } + return cmp > 0 +} +func (s orderByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s *orderByPriceAndTime) Push(x interface{}) { + *s = append(*s, x.(*orderWithMinerFee)) +} + +func (s *orderByPriceAndTime) Pop() interface{} { + old := *s + n := len(old) + x := old[n-1] + old[n-1] = nil + *s = old[0 : n-1] + return x +} + +// ordersByPriceAndNonce represents a set of transactions that can return +// transactions in a profit-maximizing sorted order, while supporting removing +// entire batches of transactions for non-executable accounts. +type ordersByPriceAndNonce struct { + txs map[common.Address][]*txpool.LazyTransaction // Per account nonce-sorted list of transactions + heads orderByPriceAndTime // Next transaction for each unique account (price heap) + signer types.Signer // Signer for the set of transactions + baseFee *uint256.Int // Current base fee +} + +// newOrdersByPriceAndNonce creates a transaction set that can retrieve +// price sorted transactions in a nonce-honouring way. +// +// Note, the input map is reowned so the caller should not interact any more with +// if after providing it to the constructor. +func newOrdersByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, bundles []builderTypes.SimulatedBundle, baseFee *big.Int) *ordersByPriceAndNonce { + // Convert the basefee from header format to uint256 format + var baseFeeUint *uint256.Int + if baseFee != nil { + baseFeeUint = uint256.MustFromBig(baseFee) + } + // Initialize a price and received time based heap with the head transactions + heads := make(orderByPriceAndTime, 0, len(txs)) + + for i := range bundles { + wrapped, err := newBundleWithMinerFee(&bundles[i]) + if err != nil { + continue + } + heads = append(heads, wrapped) + } + + for from, accTxs := range txs { + wrapped, err := newTxOrderWithMinerFee(accTxs[0], from, baseFeeUint) + if err != nil { + delete(txs, from) + continue + } + heads = append(heads, wrapped) + txs[from] = accTxs[1:] + } + heap.Init(&heads) + + // Assemble and return the transaction set + return &ordersByPriceAndNonce{ + txs: txs, + heads: heads, + signer: signer, + baseFee: baseFeeUint, + } +} + +// Peek returns the next transaction by price. +func (t *ordersByPriceAndNonce) Peek() *orderWithMinerFee { + if len(t.heads) == 0 { + return nil + } + return t.heads[0] +} + +// Shift replaces the current best head with the next one from the same account. +func (t *ordersByPriceAndNonce) Shift() { + acc := t.heads[0].from + if txs, ok := t.txs[acc]; ok && len(txs) > 0 { + if wrapped, err := newTxOrderWithMinerFee(txs[0], acc, t.baseFee); err == nil { + t.heads[0], t.txs[acc] = wrapped, txs[1:] + heap.Fix(&t.heads, 0) + return + } + } + heap.Pop(&t.heads) +} + +// Pop removes the best transaction, *not* replacing it with the next one from +// the same account. This should be used when a transaction cannot be executed +// and hence all subsequent ones should be discarded from the same account. +func (t *ordersByPriceAndNonce) Pop() { + heap.Pop(&t.heads) +} + +// ShiftAndPushByAccountForTx attempts to update the transaction list associated with a given account address +// based on the input transaction account. If the associated account exists and has additional transactions, +// the top of the transaction list is popped and pushed to the heap. +// Note that this operation should only be performed when the head transaction on the heap is different from the +// input transaction. This operation is useful in scenarios where the current best head transaction for an account +// was already popped from the heap and we want to process the next one from the same account. +func (t *ordersByPriceAndNonce) ShiftAndPushByAccountForTx(tx *types.Transaction) { + if tx == nil { + return + } + + acc, _ := types.Sender(t.signer, tx) + if txs, exists := t.txs[acc]; exists && len(txs) > 0 { + if wrapped, err := newTxWithMinerFee(txs[0], acc, t.baseFee); err == nil { + t.txs[acc] = txs[1:] + heap.Push(&t.heads, wrapped) + } + } +} + +func (t *ordersByPriceAndNonce) Push(tx *orderWithMinerFee) { + if tx == nil { + return + } + + heap.Push(&t.heads, tx) +} + +// Empty returns if the price heap is empty. It can be used to check it simpler +// than calling peek and checking for nil return. +func (t *ordersByPriceAndNonce) Empty() bool { + return len(t.heads) == 0 +} + +// Clear removes the entire content of the heap. +func (t *ordersByPriceAndNonce) Clear() { + t.heads, t.txs = nil, nil +} diff --git a/miner/env_diff.go b/miner/env_diff.go new file mode 100644 index 0000000000..4b933794de --- /dev/null +++ b/miner/env_diff.go @@ -0,0 +1,259 @@ +package miner + +import ( + "errors" + "math/big" + "sync/atomic" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +const ( + shiftTx = 1 + popTx = 2 +) + +var ( + errMevGasPriceNotSet = errors.New("mev gas price not set") + errInterrupt = errors.New("miner worker interrupted") + errNoPrivateKey = errors.New("no private key provided") +) + +// environmentDiff is a helper struct used to apply transactions to a block using a copy of the state at that block +type environmentDiff struct { + baseEnvironment *environment + header *types.Header + gasPool *core.GasPool // available gas used to pack transactions + state *state.StateDB // apply state changes here + profit *uint256.Int + newTxs []*types.Transaction + newReceipts []*types.Receipt + newSidecars []*types.BlobTxSidecar + newBlobs int +} + +func newEnvironmentDiff(env *environment) *environmentDiff { + gasPool := new(core.GasPool).AddGas(env.gasPool.Gas()) + return &environmentDiff{ + baseEnvironment: env, + header: types.CopyHeader(env.header), + gasPool: gasPool, + state: env.state.Copy(), + profit: new(uint256.Int), + } +} + +func (envDiff *environmentDiff) copy() *environmentDiff { + gasPool := new(core.GasPool).AddGas(envDiff.gasPool.Gas()) + + return &environmentDiff{ + baseEnvironment: envDiff.baseEnvironment, + header: types.CopyHeader(envDiff.header), + gasPool: gasPool, + state: envDiff.state.Copy(), + profit: new(uint256.Int).Set(envDiff.profit), + newTxs: envDiff.newTxs[:], + newReceipts: envDiff.newReceipts[:], + newSidecars: envDiff.newSidecars[:], + newBlobs: envDiff.newBlobs, + } +} + +func (envDiff *environmentDiff) applyToBaseEnv() { + env := envDiff.baseEnvironment + env.gasPool = new(core.GasPool).AddGas(envDiff.gasPool.Gas()) + env.header = envDiff.header + env.state = envDiff.state + env.tcount += len(envDiff.newTxs) + env.txs = append(env.txs, envDiff.newTxs...) + env.receipts = append(env.receipts, envDiff.newReceipts...) + env.sidecars = append(env.sidecars, envDiff.newSidecars...) + env.blobs += envDiff.newBlobs +} + +func (envDiff *environmentDiff) commitBlobTx(tx *types.Transaction, chainConfig *params.ChainConfig, chain *core.BlockChain) (*types.Receipt, int, error) { + sc := tx.BlobTxSidecar() + if sc == nil { + return nil, popTx, errors.New("blob transaction without blobs in miner") + } + // Checking against blob gas limit: It's kind of ugly to perform this check here, but there + // isn't really a better place right now. The blob gas limit is checked at block validation time + // and not during execution. This means core.ApplyTransaction will not return an error if the + // tx has too many blobs. So we have to explicitly check it here. + if (envDiff.newBlobs+len(sc.Blobs))*params.BlobTxBlobGasPerBlob > params.MaxBlobGasPerBlock { + return nil, popTx, errors.New("max data blobs reached") + } + receipt, txType, err := envDiff.commitTxCommon(tx, chainConfig, chain) + if err != nil { + return nil, txType, err + } + + envDiff.newTxs = append(envDiff.newTxs, tx.WithoutBlobTxSidecar()) + envDiff.newSidecars = append(envDiff.newSidecars, sc) + envDiff.newBlobs += len(sc.Blobs) + *envDiff.header.BlobGasUsed += receipt.BlobGasUsed + return receipt, txType, nil +} + +// commitTxCommon is common logic to commit transaction to envDiff +func (envDiff *environmentDiff) commitTxCommon(tx *types.Transaction, chainConfig *params.ChainConfig, chain *core.BlockChain) (*types.Receipt, int, error) { + header := envDiff.header + coinbase := &envDiff.baseEnvironment.coinbase + signer := envDiff.baseEnvironment.signer + + gasPrice, err := tx.EffectiveGasTip(header.BaseFee) + if err != nil { + return nil, shiftTx, err + } + + envDiff.state.SetTxContext(tx.Hash(), envDiff.baseEnvironment.tcount+len(envDiff.newTxs)) + + snap := envDiff.state.Snapshot() + receipt, err := core.ApplyTransaction(chainConfig, chain, coinbase, envDiff.gasPool, envDiff.state, header, tx, &header.GasUsed, *chain.GetVMConfig()) + + if err != nil { + envDiff.state.RevertToSnapshot(snap) + switch { + case errors.Is(err, core.ErrGasLimitReached): + // Pop the current out-of-gas transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Gas limit exceeded for current block", "sender", from) + return receipt, popTx, err + + case errors.Is(err, core.ErrNonceTooLow): + // New head notification data race between the transaction pool and miner, shift + from, _ := types.Sender(signer, tx) + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, shiftTx, err + + case errors.Is(err, core.ErrNonceTooHigh): + // Reorg notification data race between the transaction pool and miner, skip account = + from, _ := types.Sender(signer, tx) + log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) + return receipt, popTx, err + + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + from, _ := types.Sender(signer, tx) + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + return receipt, popTx, err + + case errors.Is(err, core.ErrBlobFeeCapTooLow): + from, _ := types.Sender(signer, tx) + log.Trace("Skipping blob transaction with fee cap less than block blob gas fee", "sender", from, "err", err.Error()) + return receipt, popTx, err + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Trace("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + return receipt, shiftTx, err + } + } + + profit := gasPrice.Mul(gasPrice, big.NewInt(int64(receipt.GasUsed))) + envDiff.profit = envDiff.profit.Add(envDiff.profit, uint256.MustFromBig(profit)) + envDiff.newReceipts = append(envDiff.newReceipts, receipt) + + return receipt, shiftTx, nil +} + +// commit tx to envDiff +func (envDiff *environmentDiff) commitTx(tx *types.Transaction, chain *core.BlockChain) (*types.Receipt, int, error) { + if tx.Type() == types.BlobTxType { + return envDiff.commitBlobTx(tx, chain.Config(), chain) + } + receipt, skip, err := envDiff.commitTxCommon(tx, chain.Config(), chain) + if err != nil { + return nil, skip, err + } + envDiff.newTxs = append(envDiff.newTxs, tx) + return receipt, skip, nil +} + +// Commit Bundle to env diff +func (envDiff *environmentDiff) commitBundle(bundle *builderTypes.SimulatedBundle, chain *core.BlockChain, interrupt *atomic.Int32) error { + coinbase := envDiff.baseEnvironment.coinbase + tmpEnvDiff := envDiff.copy() + + coinbaseBalanceBefore := tmpEnvDiff.state.GetBalance(coinbase) + + profitBefore := new(uint256.Int).Set(tmpEnvDiff.profit) + var gasUsed uint64 + + for _, tx := range bundle.OriginalBundle.Txs { + txHash := tx.Hash() + if tmpEnvDiff.header.BaseFee != nil && tx.Type() == types.DynamicFeeTxType { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + return core.ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return core.ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return core.ErrTipAboveFeeCap + } + } + + if tx.Value().Sign() == -1 { + return txpool.ErrNegativeValue + } + + _, err := tx.EffectiveGasTip(envDiff.header.BaseFee) + if err != nil { + return err + } + + _, err = types.Sender(envDiff.baseEnvironment.signer, tx) + if err != nil { + return err + } + + if interrupt != nil && interrupt.Load() != commitInterruptNone { + return errInterrupt + } + + receipt, _, err := tmpEnvDiff.commitTx(tx, chain) + if err != nil { + log.Trace("Bundle tx error", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + return err + } + + if receipt != nil { + if receipt.Status == types.ReceiptStatusFailed && !bundle.OriginalBundle.RevertingHash(txHash) { + // if transaction reverted and isn't specified as reverting hash, return error + log.Trace("Bundle tx failed", "bundle", bundle.OriginalBundle.Hash, "tx", txHash, "err", err) + return errors.New("bundle tx revert") + } + } else { + // NOTE: The expectation is that a receipt is only nil if an error occurred. + // If there is no error but receipt is nil, there is likely a programming error. + return errors.New("invalid receipt when no error occurred") + } + + gasUsed += receipt.GasUsed + } + coinbaseBalanceAfter := tmpEnvDiff.state.GetBalance(coinbase) + coinbaseBalanceDelta := new(uint256.Int).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) + tmpEnvDiff.profit.Add(profitBefore, coinbaseBalanceDelta) + + if bundle.MevGasPrice == nil { + return errMevGasPriceNotSet + } + + if gasUsed == 0 { + return errors.New("bundle gas used is 0") + } + + *envDiff = *tmpEnvDiff + return nil +} diff --git a/miner/env_diff_test.go b/miner/env_diff_test.go new file mode 100644 index 0000000000..e1f423ac12 --- /dev/null +++ b/miner/env_diff_test.go @@ -0,0 +1,396 @@ +package miner + +import ( + "crypto/ecdsa" + "errors" + "fmt" + "math/big" + "testing" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/triedb" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +const GasLimit uint64 = 30000000 + +var ( + // Pay proxy is a contract that sends msg.value to address specified in calldata[0..32] + payProxyAddress = common.HexToAddress("0x1100000000000000000000000000000000000000") + payProxyCode = hexutil.MustDecode("0x6000600060006000346000356000f1") + // log contract logs value that it receives + logContractAddress = common.HexToAddress("0x2200000000000000000000000000000000000000") + logContractCode = hexutil.MustDecode("0x346000523460206000a1") +) + +type signerList struct { + config *params.ChainConfig + signers []*ecdsa.PrivateKey + addresses []common.Address + nonces []uint64 +} + +func (sig signerList) signTx(i int, gas uint64, gasTipCap, gasFeeCap *big.Int, to common.Address, value *big.Int, data []byte) *types.Transaction { + txData := &types.DynamicFeeTx{ + ChainID: sig.config.ChainID, + Nonce: sig.nonces[i], + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Gas: gas, + To: &to, + Value: value, + Data: data, + } + sig.nonces[i] += 1 + + return types.MustSignNewTx(sig.signers[i], types.LatestSigner(sig.config), txData) +} + +func genSignerList(len int, config *params.ChainConfig) signerList { + res := signerList{ + config: config, + signers: make([]*ecdsa.PrivateKey, len), + addresses: make([]common.Address, len), + nonces: make([]uint64, len), + } + + for i := 0; i < len; i++ { + privKey, err := crypto.ToECDSA(crypto.Keccak256(big.NewInt(int64(i)).Bytes())) + if err != nil { + panic(fmt.Sprint("cant create priv key", err)) + } + res.signers[i] = privKey + res.addresses[i] = crypto.PubkeyToAddress(privKey.PublicKey) + } + return res +} + +func genGenesisAlloc(sign signerList, contractAddr []common.Address, contractCode [][]byte) types.GenesisAlloc { + genesisAlloc := make(types.GenesisAlloc) + for i := 0; i < len(sign.signers); i++ { + genesisAlloc[sign.addresses[i]] = types.Account{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: sign.nonces[i], + } + } + + for i, address := range contractAddr { + genesisAlloc[address] = types.Account{ + Balance: new(big.Int), + Code: contractCode[i], + } + } + + return genesisAlloc +} + +func genTestSetup(gasLimit uint64) (*state.StateDB, *core.BlockChain, signerList) { + config := params.AllEthashProtocolChanges + signerList := genSignerList(10, params.AllEthashProtocolChanges) + genesisAlloc := genGenesisAlloc(signerList, []common.Address{payProxyAddress, logContractAddress}, [][]byte{payProxyCode, logContractCode}) + + stateDB, chain := genTestSetupWithAlloc(config, genesisAlloc, gasLimit) + return stateDB, chain, signerList +} + +func genTestSetupWithAlloc(config *params.ChainConfig, alloc types.GenesisAlloc, gasLimit uint64) (*state.StateDB, *core.BlockChain) { + db := rawdb.NewMemoryDatabase() + + gspec := &core.Genesis{ + Config: config, + Alloc: alloc, + GasLimit: gasLimit, + } + _ = gspec.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults)) + + chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + + stateDB, _ := state.New(chain.CurrentHeader().Root, state.NewDatabase(db), nil) + + return stateDB, chain +} + +func newEnvironment(chain *core.BlockChain, state *state.StateDB, coinbase common.Address, gasLimit uint64, baseFee *big.Int) *environment { + currentBlock := chain.CurrentBlock() + // Note the passed coinbase may be different with header.Coinbase. + return &environment{ + signer: types.MakeSigner(chain.Config(), currentBlock.Number, currentBlock.Time), + state: state, + gasPool: new(core.GasPool).AddGas(gasLimit), + coinbase: coinbase, + header: &types.Header{ + Coinbase: coinbase, + ParentHash: currentBlock.Hash(), + Number: new(big.Int).Add(currentBlock.Number, big.NewInt(1)), + GasLimit: gasLimit, + GasUsed: 0, + BaseFee: baseFee, + Difficulty: big.NewInt(0), + }, + } +} + +func simulateBundle(env *environment, bundle builderTypes.MevBundle, chain *core.BlockChain) (builderTypes.SimulatedBundle, error) { + stateDB := env.state + gasPool := new(core.GasPool).AddGas(env.header.GasLimit) + + var totalGasUsed uint64 + totalEth := uint256.NewInt(0) + + for i, tx := range bundle.Txs { + if env.header.BaseFee != nil && tx.Type() == 2 { + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + return builderTypes.SimulatedBundle{}, core.ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return builderTypes.SimulatedBundle{}, core.ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return builderTypes.SimulatedBundle{}, core.ErrTipAboveFeeCap + } + } + + stateDB.SetTxContext(tx.Hash(), i+env.tcount) + coinbaseBalanceBefore := stateDB.GetBalance(env.coinbase) + + var tempGasUsed uint64 + receipt, err := core.ApplyTransaction(chain.Config(), chain, &env.coinbase, gasPool, stateDB, env.header, tx, &tempGasUsed, *chain.GetVMConfig()) + if err != nil { + return builderTypes.SimulatedBundle{}, err + } + if receipt.Status == types.ReceiptStatusFailed && !containsHash(bundle.RevertingTxHashes, receipt.TxHash) { + return builderTypes.SimulatedBundle{}, errors.New("failed tx") + } + + totalGasUsed += receipt.GasUsed + + coinbaseBalanceAfter := stateDB.GetBalance(env.coinbase) + coinbaseDelta := uint256.NewInt(0).Sub(coinbaseBalanceAfter, coinbaseBalanceBefore) + totalEth.Add(totalEth, coinbaseDelta) + } + + return builderTypes.SimulatedBundle{ + MevGasPrice: new(uint256.Int).Div(totalEth, new(uint256.Int).SetUint64(totalGasUsed)), + TotalEth: totalEth, + TotalGasUsed: totalGasUsed, + OriginalBundle: bundle, + }, nil +} + +func TestTxCommit(t *testing.T) { + statedb, chain, signers := genTestSetup(GasLimit) + + env := newEnvironment(chain, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + envDiff := newEnvironmentDiff(env) + + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + receipt, i, err := envDiff.commitTx(tx, chain) + if err != nil { + t.Fatal("can't commit transaction:", err) + } + if receipt.Status != 1 { + t.Fatal("tx failed", receipt) + } + if i != shiftTx { + t.Fatal("incorrect shift value") + } + + if env.tcount != 0 { + t.Fatal("env tcount modified") + } + if len(env.receipts) != 0 { + t.Fatal("env receipts modified") + } + if len(env.txs) != 0 { + t.Fatal("env txs modified") + } + if env.gasPool.Gas() != GasLimit { + t.Fatal("env gas pool modified") + } + + if envDiff.gasPool.AddGas(receipt.GasUsed).Gas() != GasLimit { + t.Fatal("envDiff gas pool incorrect") + } + if envDiff.header.GasUsed != receipt.GasUsed { + t.Fatal("envDiff gas used is incorrect") + } + if len(envDiff.newReceipts) != 1 { + t.Fatal("envDiff receipts incorrect") + } + if len(envDiff.newTxs) != 1 { + t.Fatal("envDiff txs incorrect") + } +} + +func TestBundleCommit(t *testing.T) { + statedb, chain, signers := genTestSetup(GasLimit) + + env := newEnvironment(chain, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + envDiff := newEnvironmentDiff(env) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + bundle := builderTypes.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: env.header.Number, + } + + simBundle, err := simulateBundle(env, bundle, chain) + if err != nil { + t.Fatal("Failed to simulate bundle", err) + } + + err = envDiff.commitBundle(&simBundle, chain, nil) + if err != nil { + t.Fatal("Failed to commit bundle", err) + } + + if len(envDiff.newTxs) != 2 { + t.Fatal("Incorrect new txs") + } + if len(envDiff.newReceipts) != 2 { + t.Fatal("Incorrect receipts txs") + } + if envDiff.gasPool.AddGas(21000*2).Gas() != GasLimit { + t.Fatal("Gas pool incorrect update") + } +} + +func TestErrorTxCommit(t *testing.T) { + statedb, chain, signers := genTestSetup(GasLimit) + + env := newEnvironment(chain, statedb, signers.addresses[0], GasLimit, big.NewInt(1)) + envDiff := newEnvironmentDiff(env) + + signers.nonces[1] = 10 + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + _, i, err := envDiff.commitTx(tx, chain) + if err == nil { + t.Fatal("committed incorrect transaction:", err) + } + if i != popTx { + t.Fatal("incorrect shift value") + } + + if envDiff.gasPool.Gas() != GasLimit { + t.Fatal("envDiff gas pool incorrect") + } + if envDiff.header.GasUsed != 0 { + t.Fatal("envDiff gas used incorrect") + } + if envDiff.profit.Sign() != 0 { + t.Fatal("envDiff new profit incorrect") + } + if len(envDiff.newReceipts) != 0 { + t.Fatal("envDiff receipts incorrect") + } + if len(envDiff.newTxs) != 0 { + t.Fatal("envDiff txs incorrect") + } +} + +func TestCommitTxOverGasLimit(t *testing.T) { + statedb, chain, signers := genTestSetup(GasLimit) + + env := newEnvironment(chain, statedb, signers.addresses[0], 21000, big.NewInt(1)) + envDiff := newEnvironmentDiff(env) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + receipt, i, err := envDiff.commitTx(tx1, chain) + if err != nil { + t.Fatal("can't commit transaction:", err) + } + if receipt.Status != 1 { + t.Fatal("tx failed", receipt) + } + if i != shiftTx { + t.Fatal("incorrect shift value") + } + + if envDiff.gasPool.Gas() != 0 { + t.Fatal("Env diff gas pool is not drained") + } + + _, _, err = envDiff.commitTx(tx2, chain) + require.Error(t, err, "committed tx over gas limit") +} + +func TestErrorBundleCommit(t *testing.T) { + statedb, chain, signers := genTestSetup(GasLimit) + + env := newEnvironment(chain, statedb, signers.addresses[0], 21000*2, big.NewInt(1)) + envDiff := newEnvironmentDiff(env) + + // This tx will be included before bundle so bundle will fail because of gas limit + tx0 := signers.signTx(4, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + bundle := builderTypes.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + BlockNumber: env.header.Number, + } + + simBundle, err := simulateBundle(env, bundle, chain) + if err != nil { + t.Fatal("Failed to simulate bundle", err) + } + + _, _, err = envDiff.commitTx(tx0, chain) + if err != nil { + t.Fatal("Failed to commit tx0", err) + } + + gasPoolBefore := *envDiff.gasPool + gasUsedBefore := envDiff.header.GasUsed + newProfitBefore := new(uint256.Int).Set(envDiff.profit) + balanceBefore := envDiff.state.GetBalance(signers.addresses[2]) + + err = envDiff.commitBundle(&simBundle, chain, nil) + if err == nil { + t.Fatal("Committed failed bundle", err) + } + + if *envDiff.gasPool != gasPoolBefore { + t.Fatal("gasPool changed") + } + + if envDiff.header.GasUsed != gasUsedBefore { + t.Fatal("gasUsed changed") + } + + balanceAfter := envDiff.state.GetBalance(signers.addresses[2]) + if balanceAfter.Cmp(balanceBefore) != 0 { + t.Fatal("balance changed") + } + + if envDiff.profit.Cmp(newProfitBefore) != 0 { + t.Fatal("newProfit changed") + } + + if len(envDiff.newTxs) != 1 { + t.Fatal("Incorrect new txs") + } + if len(envDiff.newReceipts) != 1 { + t.Fatal("Incorrect receipts txs") + } +} diff --git a/miner/metrics.go b/miner/metrics.go index f1b643a7bd..ab101c180d 100644 --- a/miner/metrics.go +++ b/miner/metrics.go @@ -9,8 +9,18 @@ var ( blockProfitGauge = metrics.NewRegisteredGauge("miner/block/profit/gauge", nil) culmulativeProfitGauge = metrics.NewRegisteredGauge("miner/block/profit/culmulative", nil) + mergeAlgoTimer = metrics.NewRegisteredTimer("miner/block/merge", nil) buildBlockTimer = metrics.NewRegisteredTimer("miner/block/build", nil) + blockBundleSimulationTimer = metrics.NewRegisteredTimer("miner/block/simulate", nil) + successfulBundleSimulationTimer = metrics.NewRegisteredTimer("miner/bundle/simulate/success", nil) + failedBundleSimulationTimer = metrics.NewRegisteredTimer("miner/bundle/simulate/failed", nil) + blockBundleNumHistogram = metrics.NewRegisteredHistogram("miner/block/bundle/num", nil, metrics.NewExpDecaySample(1028, 0.015)) + + simulationMeter = metrics.NewRegisteredMeter("miner/block/simulation", nil) + simulationCommittedMeter = metrics.NewRegisteredMeter("miner/block/simulation/committed", nil) + simulationRevertedMeter = metrics.NewRegisteredMeter("miner/block/simulation/reverted", nil) + gasUsedGauge = metrics.NewRegisteredGauge("miner/block/gasused", nil) transactionNumGauge = metrics.NewRegisteredGauge("miner/block/txnum", nil) ) diff --git a/miner/miner.go b/miner/miner.go index 8368b96e15..ab551881bb 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -57,6 +57,8 @@ type Config struct { RollupComputePendingBlock bool // Compute the pending block from tx-pool, instead of copying the latest-block EffectiveGasCeil uint64 // if non-zero, a gas ceiling to apply independent of the header's gaslimit value + + BuilderEnabled bool } // DefaultConfig contains default settings for miner. diff --git a/miner/payload_building.go b/miner/payload_building.go index 5ee6dd525e..b8bc0d5d4c 100644 --- a/miner/payload_building.go +++ b/miner/payload_building.go @@ -22,6 +22,7 @@ import ( "errors" "math/big" "sync" + "sync/atomic" "time" "github.com/ethereum/go-ethereum/beacon/engine" @@ -94,8 +95,9 @@ type Payload struct { lock sync.Mutex cond *sync.Cond - err error - stopOnce sync.Once + err error + stopOnce sync.Once + interrupt *atomic.Int32 // interrupt signal shared with worker } // newPayload initializes the payload object. @@ -104,6 +106,8 @@ func newPayload(empty *types.Block, id engine.PayloadID) *Payload { id: id, empty: empty, stop: make(chan struct{}), + + interrupt: new(atomic.Int32), } log.Info("Starting work on payload", "id", payload.id) payload.cond = sync.NewCond(&payload.lock) @@ -206,6 +210,10 @@ func (payload *Payload) resolve(onlyFull bool) *engine.ExecutionPayloadEnvelope payload.lock.Lock() defer payload.lock.Unlock() + // We interrupt any active building block to prevent it from adding more transactions, + // and if it is an update, don't attempt to seal the block. + payload.interruptBuilding() + if payload.full == nil && (onlyFull || payload.empty == nil) { select { case <-payload.stop: @@ -231,6 +239,24 @@ func (payload *Payload) resolve(onlyFull bool) *engine.ExecutionPayloadEnvelope return nil } +// interruptBuilding sets an interrupt for a potentially ongoing +// block building process. +// This will prevent it from adding new transactions to the block, and if it is +// building an update, the block will also not be sealed, as we would discard +// the update anyways. +// interruptBuilding is safe to be called concurrently. +func (payload *Payload) interruptBuilding() { + // Set the interrupt if not interrupted already. + // It's ok if it has either already been interrupted by payload resolution earlier, + // or by the timeout timer set to commitInterruptTimeout. + if payload.interrupt.CompareAndSwap(commitInterruptNone, commitInterruptResolve) { + log.Debug("Interrupted payload building.", "id", payload.id) + } else { + log.Debug("Payload building already interrupted.", + "id", payload.id, "interrupt", payload.interrupt.Load()) + } +} + // stopBuilding signals to the block updating routine to stop. An ongoing payload // building job will still complete. It can be interrupted to stop filling new // transactions with interruptBuilding. @@ -295,6 +321,8 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs) (*Payload, error) { } payload := newPayload(nil, args.Id()) + // set shared interrupt + fullParams.interrupt = payload.interrupt // Spin up a routine for updating the payload in background. This strategy // can maximum the revenue for including transactions with highest fee. @@ -323,7 +351,12 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs) (*Payload, error) { updatePayload := func() time.Duration { start := time.Now() // getSealingBlock is interrupted by shared interrupt - r := miner.generateWork(fullParams) + var r *newPayloadResult + if miner.config.BuilderEnabled { + r = miner.generateBuilderWork(fullParams) + } else { + r = miner.generateWork(fullParams) + } dur := time.Since(start) // update handles error case payload.update(r, dur) diff --git a/miner/verify_bundles.go b/miner/verify_bundles.go new file mode 100644 index 0000000000..d679e583d0 --- /dev/null +++ b/miner/verify_bundles.go @@ -0,0 +1,404 @@ +package miner + +import ( + "fmt" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// ErrBundleTxNotFound is returned when a tx is not found in the resulting block +type ErrBundleTxNotFound struct { + BundleHash common.Hash + TxHash common.Hash + // Index of the tx in the bundle + TxIndex int +} + +func NewErrBundleTxNotFound(bundleHash, txHash common.Hash, txIndex int) *ErrBundleTxNotFound { + return &ErrBundleTxNotFound{ + BundleHash: bundleHash, + TxHash: txHash, + TxIndex: txIndex, + } +} + +func (e *ErrBundleTxNotFound) Error() string { + return fmt.Sprintf("tx from included bundle not found tx_hash=%s, bundle_hash=%s, tx_bundle_index=%d", e.TxHash.Hex(), e.BundleHash.Hex(), e.TxIndex) +} + +// ErrBundleTxReverted is returned when a tx is reverted in the resulting block, but it was not allowed to be reverted +type ErrBundleTxReverted struct { + BundleHash common.Hash + TxHash common.Hash + // Index of the tx in the bundle + TxIndex int +} + +func NewErrBundleTxReverted(bundleHash, txHash common.Hash, txIndex int) *ErrBundleTxReverted { + return &ErrBundleTxReverted{ + BundleHash: bundleHash, + TxHash: txHash, + TxIndex: txIndex, + } +} + +func (e *ErrBundleTxReverted) Error() string { + return fmt.Sprintf("tx from included bundle reverted tx_hash=%s, bundle_hash=%s, tx_bundle_index=%d", e.TxHash.Hex(), e.BundleHash.Hex(), e.TxIndex) +} + +// ErrBundleTxWrongPlace is returned when a tx is found in the resulting block, but it is not in the right place +type ErrBundleTxWrongPlace struct { + BundleHash common.Hash + TxHash common.Hash + // Index of the tx in the bundle + TxIndex int + // Index of the tx in the block + BlockIndex int + ExpectedBlockIndex int +} + +func NewErrBundleTxWrongPlace(bundleHash, txHash common.Hash, txIndex, blockIndex, expectedBlockIndex int) *ErrBundleTxWrongPlace { + return &ErrBundleTxWrongPlace{ + BundleHash: bundleHash, + TxHash: txHash, + TxIndex: txIndex, + BlockIndex: blockIndex, + ExpectedBlockIndex: expectedBlockIndex, + } +} + +func (e *ErrBundleTxWrongPlace) Error() string { + return fmt.Sprintf("tx from included bundle is in wrong place tx_hash=%s, bundle_hash=%s, tx_bundle_index=%d, tx_block_index=%d, expected_block_index=%d", e.TxHash.Hex(), e.BundleHash.Hex(), e.TxIndex, e.BlockIndex, e.ExpectedBlockIndex) +} + +// ErrPrivateTxFromFailedBundle is returned when a private tx is included in the block, but the bundle it belongs to was not included +type ErrPrivateTxFromFailedBundle struct { + BundleHash common.Hash + TxHash common.Hash + // Index of the tx in the bundle + TxIndex int +} + +func NewErrPrivateTxFromFailedBundle(bundleHash, txHash common.Hash, txIndex int) *ErrPrivateTxFromFailedBundle { + return &ErrPrivateTxFromFailedBundle{ + BundleHash: bundleHash, + TxHash: txHash, + TxIndex: txIndex, + } +} + +func (e *ErrPrivateTxFromFailedBundle) Error() string { + return fmt.Sprintf("private tx from failed bundle included in the block tx_hash=%s, bundle_hash=%s, tx_bundle_index=%d", e.TxHash.Hex(), e.BundleHash.Hex(), e.TxIndex) +} + +// ErrForcedTxNotIncluded is returned when a forced tx is not included in the block +type ErrForcedTxNotIncluded struct { + TxHash common.Hash +} + +func NewErrForcedTxNotIncluded(txHash common.Hash) *ErrForcedTxNotIncluded { + return &ErrForcedTxNotIncluded{ + TxHash: txHash, + } +} + +func (e *ErrForcedTxNotIncluded) Error() string { + return fmt.Sprintf("forced tx not included in the block tx_hash=%s", e.TxHash.Hex()) +} + +// ErrForcedTxReverted is returned when a forced tx is reverted in the block, but it was not allowed to be reverted +type ErrForcedTxReverted struct { + TxHash common.Hash + TxIndex int +} + +func NewErrForcedTxReverted(txHash common.Hash, txIndex int) *ErrForcedTxReverted { + return &ErrForcedTxReverted{ + TxHash: txHash, + TxIndex: txIndex, + } +} + +func (e *ErrForcedTxReverted) Error() string { + return fmt.Sprintf("forced tx reverted tx_hash=%s, tx_index=%d", e.TxHash.Hex(), e.TxIndex) +} + +// ErrForcedTxWrongPlace is returned when a forced tx is found in the block, but it is not in the right place +type ErrForcedTxWrongPlace struct { + TxHash common.Hash + TxIndex int + ExpectedBlockIndex int +} + +func NewErrForcedTxWrongPlace(txHash common.Hash, txIndex, expectedBlockIndex int) *ErrForcedTxWrongPlace { + return &ErrForcedTxWrongPlace{ + TxHash: txHash, + TxIndex: txIndex, + ExpectedBlockIndex: expectedBlockIndex, + } +} + +func (e *ErrForcedTxWrongPlace) Error() string { + return fmt.Sprintf("forced tx is in wrong place tx_hash=%s, tx_index=%d, expected_block_index=%d", e.TxHash.Hex(), e.TxIndex, e.ExpectedBlockIndex) +} + +// ErrUnexpectedTx is returned when a tx is included in the block, but it is not from the mempool or from the included bundles +// ErrUnexpectedTx is returned when a tx is included in the block, but it is not from the mempool or from the included bundles +type ErrUnexpectedTx struct { + TxHash common.Hash +} + +func NewErrUnexpectedTx(txHash common.Hash) *ErrUnexpectedTx { + return &ErrUnexpectedTx{ + TxHash: txHash, + } +} + +func (e *ErrUnexpectedTx) Error() string { + return fmt.Sprintf("unexpected tx included in the block tx_hash=%s", e.TxHash.Hex()) +} + +// VerifyBundlesAtomicity checks that all txs from the included bundles are included in the block correctly +// 1. We check that all non-reverted txs from the bundle are included in the block and are not reverted +// 2. Reverted txs are allowed to be not included in the block +// 3. All txs from the bundle must be in the right order, gaps between txs are allowed +// 4. All txs in the block are either from mempool or from the included bundles +func VerifyBundlesAtomicity(env *environment, committedBundles, allBundles []builderTypes.SimulatedBundle, mempoolTxHashes map[common.Hash]struct{}, forcedTxs types.Transactions) error { + // bundleHash -> tx + includedBundles := make(bundleHashToTransactionDataMap).ExtractFromBundles(committedBundles) + + includedTxDataByHash := extractIncludedTxDataFromEnv(env) + + allUsedBundles := make(bundleHashToTransactionDataMap).ExtractFromBundles(allBundles) + + forcedTxHashes := make(forcedTxHashesMap).ExtractFromTxs(forcedTxs) + + privateTxDataFromFailedBundles := extractPrivateTxsFromFailedBundles(includedBundles, allUsedBundles, mempoolTxHashes) + + return checkBundlesAtomicity(includedBundles, includedTxDataByHash, privateTxDataFromFailedBundles, mempoolTxHashes, forcedTxHashes) +} + +type bundleTxData struct { + hash common.Hash + canRevert bool +} + +type includedTxData struct { + hash common.Hash + index int + reverted bool +} + +type privateTxData struct { + bundleHash common.Hash + index int +} + +type forcedTxData struct { + hash common.Hash + index int +} + +type bundleHashToTransactionDataMap map[common.Hash][]bundleTxData + +type forcedTxHashesMap map[common.Hash]forcedTxData + +func (btm bundleHashToTransactionDataMap) ExtractFromBundles(bundles []builderTypes.SimulatedBundle) bundleHashToTransactionDataMap { + for _, b := range bundles { + bundleData := make([]bundleTxData, len(b.OriginalBundle.Txs)) + for i, tx := range b.OriginalBundle.Txs { + bundleData[i] = bundleTxData{ + hash: tx.Hash(), + canRevert: b.OriginalBundle.RevertingHash(tx.Hash()), + } + } + + btm[b.OriginalBundle.Hash] = bundleData + } + return btm +} + +func (mth forcedTxHashesMap) ExtractFromTxs(txs types.Transactions) forcedTxHashesMap { + for i, tx := range txs { + mth[tx.Hash()] = forcedTxData{ + hash: tx.Hash(), + index: i, + } + } + return mth +} + +// checkBundlesAtomicity checks that all txs from the included bundles are included in the block correctly +func checkBundlesAtomicity( + includedBundles map[common.Hash][]bundleTxData, + includedTxDataByHash map[common.Hash]includedTxData, + privateTxsFromFailedBundles map[common.Hash]privateTxData, + mempoolTxHashes map[common.Hash]struct{}, + forcedTxHashes map[common.Hash]forcedTxData, +) error { + txsFromSuccessfulBundles := make(map[common.Hash]struct{}) + + for txHash, tx := range forcedTxHashes { + // must be at top of the block and not reverted + if txInclusion, ok := includedTxDataByHash[txHash]; !ok { + return NewErrForcedTxNotIncluded(txHash) + } else if txInclusion.reverted { + return NewErrForcedTxReverted(txHash, txInclusion.index) + } else if txInclusion.index != tx.index { + return NewErrForcedTxWrongPlace(txHash, txInclusion.index, tx.index) + } + } + + for bundleHash, b := range includedBundles { + var ( + firstTxBlockIdx int + firstTxBundleIdx int + firstTxFound = false + ) + // 1. locate the first included tx of the bundle + for bundleIdx, tx := range b { + txsFromSuccessfulBundles[tx.hash] = struct{}{} + + txInclusion, ok := includedTxDataByHash[tx.hash] + if !ok { + // tx not found, maybe it was reverting + if tx.canRevert { + continue + } else { + return NewErrBundleTxNotFound(bundleHash, tx.hash, bundleIdx) + } + } + + if txInclusion.reverted && !tx.canRevert { + return NewErrBundleTxReverted(bundleHash, tx.hash, bundleIdx) + } + + // optional txs can be outside the bundle, so we don't use them to determine ordering of the bundle + if tx.canRevert { + continue + } + + firstTxBlockIdx = txInclusion.index + firstTxBundleIdx = bundleIdx + firstTxFound = true + break + } + + // none of the txs from the bundle are included + if !firstTxFound { + continue + } + + currentBlockTx := firstTxBlockIdx + 1 + // locate other txs in the bundle + for idx, tx := range b[firstTxBundleIdx+1:] { + txsFromSuccessfulBundles[tx.hash] = struct{}{} + + bundleIdx := firstTxBundleIdx + 1 + idx + // see if tx is on its place + txInclusion, ok := includedTxDataByHash[tx.hash] + if !ok { + // tx was not found, maybe its reverting + if tx.canRevert { + continue + } else { + return NewErrBundleTxNotFound(bundleHash, tx.hash, bundleIdx) + } + } + + if txInclusion.reverted && !tx.canRevert { + return NewErrBundleTxReverted(bundleHash, tx.hash, bundleIdx) + } + + // we don't do position check for optional txs + if tx.canRevert { + continue + } + + // we allow gaps between txs in the bundle, + // but txs must be in the right order + if txInclusion.index < currentBlockTx { + return NewErrBundleTxWrongPlace(bundleHash, tx.hash, bundleIdx, txInclusion.index, currentBlockTx) + } + + currentBlockTx = txInclusion.index + 1 + } + } + + for hash, priv := range privateTxsFromFailedBundles { + if _, ok := txsFromSuccessfulBundles[hash]; ok { + continue + } + if _, ok := includedTxDataByHash[hash]; ok { + return NewErrPrivateTxFromFailedBundle(priv.bundleHash, hash, priv.index) + } + } + + for hash := range includedTxDataByHash { + if _, ok := txsFromSuccessfulBundles[hash]; ok { + continue + } + if _, ok := mempoolTxHashes[hash]; ok { + continue + } + if _, ok := forcedTxHashes[hash]; ok { + continue + } + return NewErrUnexpectedTx(hash) + } + + return nil +} + +func extractBundleTxDataFromBundles(bundles []builderTypes.SimulatedBundle, result map[common.Hash][]bundleTxData) { + for _, b := range bundles { + bundleData := make([]bundleTxData, len(b.OriginalBundle.Txs)) + for i, tx := range b.OriginalBundle.Txs { + bundleData[i] = bundleTxData{ + hash: tx.Hash(), + canRevert: b.OriginalBundle.RevertingHash(tx.Hash()), + } + } + result[b.OriginalBundle.Hash] = bundleData + } +} + +func extractIncludedTxDataFromEnv(env *environment) map[common.Hash]includedTxData { + res := make(map[common.Hash]includedTxData) + for i, tx := range env.txs { + if tx != nil { + res[tx.Hash()] = includedTxData{ + hash: tx.Hash(), + index: i, + reverted: env.receipts[i].Status == types.ReceiptStatusFailed, + } + } + } + return res +} + +func extractPrivateTxsFromFailedBundles( + includedBundles, allBundles map[common.Hash][]bundleTxData, mempoolTxHashes map[common.Hash]struct{}, +) map[common.Hash]privateTxData { + // we don't handle overlapping bundles here, they are handled in checkBundlesAtomicity + res := make(map[common.Hash]privateTxData) + + for bundleHash, b := range allBundles { + if _, bundleIncluded := includedBundles[bundleHash]; bundleIncluded { + continue + } + + for i, tx := range b { + if _, mempool := mempoolTxHashes[tx.hash]; mempool { + continue + } + res[tx.hash] = privateTxData{ + bundleHash: bundleHash, + index: i, + } + } + } + return res +} diff --git a/miner/verify_bundles_test.go b/miner/verify_bundles_test.go new file mode 100644 index 0000000000..abc6c7c15a --- /dev/null +++ b/miner/verify_bundles_test.go @@ -0,0 +1,727 @@ +package miner + +import ( + "fmt" + "math/big" + "testing" + + builderTypes "github.com/ethereum/go-ethereum/builder/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +func TestVerifyBundlesAtomicity(t *testing.T) { + tests := []struct { + name string + // includedBundles is a map of bundle hash to a slice of tx data that were included in the block + includedBundles map[common.Hash][]bundleTxData + // includedTxDataByHash is a map of tx hash to tx data that were included in the block + includedTxDataByHash map[common.Hash]includedTxData + // privateTxData is a map of tx hash to private tx data of private txs from failed bundles + privateTxData map[common.Hash]privateTxData + // mempoolTxHashes is a map of tx hashes from mempool + mempoolTxHashes map[common.Hash]struct{} + // forcedTxs is a map of tx hashes from mempool + forcedTxsHashes map[common.Hash]forcedTxData + // expectedErr is the expected error returned by verifyBundles + expectedErr error + }{ + // Success cases + { + name: "Simple bundle with 1 tx included", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 0, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: nil, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Simple bundle included", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 3, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 4, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + // This tx is not included in the block, but it is in the mempool + common.HexToHash("0xc4"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Simple bundle included with gaps between txs", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + {hash: common.HexToHash("0xb13"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 3, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 4, reverted: false}, + common.HexToHash("0xc4"): {hash: common.HexToHash("0xc4"), index: 5, reverted: false}, + common.HexToHash("0xc5"): {hash: common.HexToHash("0xc5"), index: 6, reverted: false}, + common.HexToHash("0xb13"): {hash: common.HexToHash("0xb13"), index: 7, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + common.HexToHash("0xc4"): {}, + common.HexToHash("0xc5"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Simple bundle included with revertible tx, tx included and reverted", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: true}, + {hash: common.HexToHash("0xb13"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 3, reverted: true}, + common.HexToHash("0xb13"): {hash: common.HexToHash("0xb13"), index: 4, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Simple bundle included with revertible tx, tx not included", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: true}, + {hash: common.HexToHash("0xb13"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 0, reverted: false}, + common.HexToHash("0xb13"): {hash: common.HexToHash("0xb13"), index: 1, reverted: false}, + }, + mempoolTxHashes: nil, + privateTxData: nil, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Bundle marked included but none of the txs are included (all optional)", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: true}, + {hash: common.HexToHash("0xb13"), canRevert: true}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + }, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + }, + privateTxData: nil, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Simple bundle included with all revertible tx, last of them is included as success", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: true}, + {hash: common.HexToHash("0xb13"), canRevert: true}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: true}, + common.HexToHash("0xb13"): {hash: common.HexToHash("0xb13"), index: 1, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 2, reverted: true}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Simple bundle included with all revertible tx, none of the txs are included", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: true}, + {hash: common.HexToHash("0xb13"), canRevert: true}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: true}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Two bundles included, both backrun one tx that is allowed to revert", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb00"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + common.HexToHash("0xb2"): { + {hash: common.HexToHash("0xb00"), canRevert: true}, + {hash: common.HexToHash("0xb22"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xb00"): {hash: common.HexToHash("0xb00"), index: 0, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 1, reverted: false}, + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 2, reverted: true}, + common.HexToHash("0xb22"): {hash: common.HexToHash("0xb22"), index: 3, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Two bundles included, one have optional tx in the middle that gets included as part of other bundle", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb00"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + common.HexToHash("0xb2"): { + {hash: common.HexToHash("0xb21"), canRevert: false}, + {hash: common.HexToHash("0xb00"), canRevert: true}, + {hash: common.HexToHash("0xb22"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xb00"): {hash: common.HexToHash("0xb00"), index: 0, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 1, reverted: false}, + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 2, reverted: true}, + common.HexToHash("0xb21"): {hash: common.HexToHash("0xb21"), index: 3, reverted: false}, + common.HexToHash("0xb22"): {hash: common.HexToHash("0xb22"), index: 4, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Optional tx in the middle of the bundle was included after the bundle as part of mempool", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb00"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 0, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 1, reverted: false}, + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 2, reverted: true}, + common.HexToHash("0xb00"): {hash: common.HexToHash("0xb00"), index: 3, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xb00"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Optional tx in the middle of the bundle was included before the bundle as part of mempool", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb00"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xb00"): {hash: common.HexToHash("0xb00"), index: 0, reverted: false}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 1, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 2, reverted: false}, + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 3, reverted: true}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xb00"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + { + name: "Private tx from overlapping included bundle included", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 3, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 4, reverted: false}, + }, + privateTxData: map[common.Hash]privateTxData{ + common.HexToHash("0xb11"): {bundleHash: common.HexToHash("0xb2"), index: 2}, + }, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: nil, + }, + // Error cases + { + name: "Simple bundle included but with reverted txs (first tx reverted)", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: true}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 3, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 4, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrBundleTxReverted(common.HexToHash("0xb1"), common.HexToHash("0xb11"), 0), + }, + { + name: "Simple bundle included but with reverted txs (second tx reverted)", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 3, reverted: true}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 4, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrBundleTxReverted(common.HexToHash("0xb1"), common.HexToHash("0xb12"), 1), + }, + { + name: "Simple bundle included with gaps between txs (second tx reverted)", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 3, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 4, reverted: true}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrBundleTxReverted(common.HexToHash("0xb1"), common.HexToHash("0xb12"), 1), + }, + { + name: "Simple bundle included but with incorrect order", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 2, reverted: false}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 3, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 4, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrBundleTxWrongPlace(common.HexToHash("0xb1"), common.HexToHash("0xb12"), 1, 2, 4), + }, + { + name: "Simple bundle included but first tx missing", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 2, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 3, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrBundleTxNotFound(common.HexToHash("0xb1"), common.HexToHash("0xb11"), 0), + }, + { + name: "Simple bundle included but second tx missing", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 3, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrBundleTxNotFound(common.HexToHash("0xb1"), common.HexToHash("0xb12"), 1), + }, + { + name: "Bundle with multiple reverting txs in the front has failing tx", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: true}, + {hash: common.HexToHash("0xb12"), canRevert: true}, + {hash: common.HexToHash("0xb13"), canRevert: true}, + {hash: common.HexToHash("0xb14"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb14"): {hash: common.HexToHash("0xb11"), index: 2, reverted: true}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 3, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrBundleTxReverted(common.HexToHash("0xb1"), common.HexToHash("0xb14"), 3), + }, + { + name: "Private tx from failed bundles was included in a block", + includedBundles: nil, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 2, reverted: false}, + }, + privateTxData: map[common.Hash]privateTxData{ + common.HexToHash("0xb11"): {bundleHash: common.HexToHash("0xb1"), index: 2}, + }, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrPrivateTxFromFailedBundle(common.HexToHash("0xb1"), common.HexToHash("0xb11"), 2), + }, + { + name: "Unexpected tx was included in a block", + includedBundles: map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: false}, + {hash: common.HexToHash("0xb12"), canRevert: false}, + }, + }, + includedTxDataByHash: map[common.Hash]includedTxData{ + common.HexToHash("0xc1"): {hash: common.HexToHash("0xc1"), index: 0, reverted: false}, + common.HexToHash("0xc2"): {hash: common.HexToHash("0xc2"), index: 1, reverted: true}, + common.HexToHash("0xb11"): {hash: common.HexToHash("0xb11"), index: 2, reverted: false}, + common.HexToHash("0xb12"): {hash: common.HexToHash("0xb12"), index: 3, reverted: false}, + common.HexToHash("0xc3"): {hash: common.HexToHash("0xc3"), index: 4, reverted: false}, + common.HexToHash("0xd1"): {hash: common.HexToHash("0xd1"), index: 5, reverted: false}, + }, + privateTxData: nil, + mempoolTxHashes: map[common.Hash]struct{}{ + common.HexToHash("0xc1"): {}, + common.HexToHash("0xc2"): {}, + common.HexToHash("0xc3"): {}, + }, + forcedTxsHashes: nil, + expectedErr: NewErrUnexpectedTx(common.HexToHash("0xd1")), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := checkBundlesAtomicity(test.includedBundles, test.includedTxDataByHash, test.privateTxData, test.mempoolTxHashes, test.forcedTxsHashes) + if test.expectedErr == nil { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Equal(t, test.expectedErr, err) + } + }) + } +} + +func TestExtractBundleDataFromUsedBundles(t *testing.T) { + _, _, signers := genTestSetup(GasLimit) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + bundle := builderTypes.SimulatedBundle{ + OriginalBundle: builderTypes.MevBundle{ + Txs: types.Transactions{tx1, tx2}, + RevertingTxHashes: []common.Hash{tx1.Hash()}, + Hash: common.HexToHash("0xb1"), + }, + } + + expectedResult := map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: tx1.Hash(), canRevert: true}, + {hash: tx2.Hash(), canRevert: false}, + }, + } + + result := make(map[common.Hash][]bundleTxData) + extractBundleTxDataFromBundles([]builderTypes.SimulatedBundle{bundle}, result) + + require.Equal(t, expectedResult, result) +} + +func TestExtractIncludedTxDataFromEnv(t *testing.T) { + _, _, signers := genTestSetup(GasLimit) + + tx1 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + tx2 := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), []byte{}) + + env := &environment{ + txs: []*types.Transaction{tx1, tx2}, + receipts: []*types.Receipt{ + {TxHash: tx1.Hash(), Status: types.ReceiptStatusSuccessful}, + {TxHash: tx2.Hash(), Status: types.ReceiptStatusFailed}, + }, + } + + expectedResult := map[common.Hash]includedTxData{ + tx1.Hash(): {hash: tx1.Hash(), index: 0, reverted: false}, + tx2.Hash(): {hash: tx2.Hash(), index: 1, reverted: true}, + } + + result := extractIncludedTxDataFromEnv(env) + require.Equal(t, expectedResult, result) +} + +func TestExtractPrivateTxData(t *testing.T) { + includedBundles := map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: true}, + }, + common.HexToHash("0xb2"): { + {hash: common.HexToHash("0xb21"), canRevert: true}, + }, + } + allUsedBundles := map[common.Hash][]bundleTxData{ + common.HexToHash("0xb1"): { + {hash: common.HexToHash("0xb11"), canRevert: true}, + }, + common.HexToHash("0xb2"): { + {hash: common.HexToHash("0xb21"), canRevert: true}, + }, + common.HexToHash("0xb3"): { + {hash: common.HexToHash("0xb31"), canRevert: true}, + {hash: common.HexToHash("0xb32"), canRevert: true}, + }, + } + mempoolTxHashes := map[common.Hash]struct{}{ + common.HexToHash("0xb11"): {}, + common.HexToHash("0xb31"): {}, + } + + expectedResult := map[common.Hash]privateTxData{ + common.HexToHash("0xb32"): {bundleHash: common.HexToHash("0xb3"), index: 1}, + } + + result := extractPrivateTxsFromFailedBundles(includedBundles, allUsedBundles, mempoolTxHashes) + + require.Equal(t, expectedResult, result) +} + +func BenchmarkVerifyBundlesAtomicity(b *testing.B) { + _, _, signers := genTestSetup(GasLimit) + + var ( + env = &environment{} + committedBundles []builderTypes.SimulatedBundle + allBundles []builderTypes.SimulatedBundle + mempoolTxHashes = make(map[common.Hash]struct{}) + forcedTxs types.Transactions + ) + + // generate committed bundles + for i := 0; i < 1000; i++ { + data := crypto.Keccak256([]byte(fmt.Sprintf("ok-bundles-%x", i))) + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), data) + _ = tx.Hash() + env.txs = append(env.txs, tx) + env.receipts = append(env.receipts, &types.Receipt{TxHash: tx.Hash(), Status: types.ReceiptStatusSuccessful}) + bundleHash := common.BytesToHash(data) + committedBundles = append(committedBundles, builderTypes.SimulatedBundle{ + OriginalBundle: builderTypes.MevBundle{ + Txs: types.Transactions{tx}, + RevertingTxHashes: []common.Hash{}, + Hash: bundleHash, + }, + }) + allBundles = append(allBundles, builderTypes.SimulatedBundle{ + OriginalBundle: builderTypes.MevBundle{ + Txs: types.Transactions{tx}, + RevertingTxHashes: []common.Hash{}, + Hash: bundleHash, + }, + }) + } + + // generate failed bundles + for i := 0; i < 1000; i++ { + data := crypto.Keccak256([]byte(fmt.Sprintf("failed-bundles-%x", i))) + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), data) + _ = tx.Hash() + bundleHash := common.BytesToHash(data) + allBundles = append(allBundles, builderTypes.SimulatedBundle{ + OriginalBundle: builderTypes.MevBundle{ + Txs: types.Transactions{tx}, + RevertingTxHashes: []common.Hash{}, + Hash: bundleHash, + }, + }) + } + + // generate committed mempool txs + for i := 0; i < 1000; i++ { + data := crypto.Keccak256([]byte(fmt.Sprintf("ok-mempool-tx-%x", i))) + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), data) + hash := tx.Hash() + env.txs = append(env.txs, tx) + env.receipts = append(env.receipts, &types.Receipt{TxHash: hash, Status: types.ReceiptStatusSuccessful}) + mempoolTxHashes[hash] = struct{}{} + } + + // generate failed mempool tx hashes + for i := 0; i < 1000; i++ { + data := crypto.Keccak256([]byte(fmt.Sprintf("failed-mempool-tx-%x", i))) + mempoolTxHashes[common.BytesToHash(data)] = struct{}{} + } + + // generate forced txs + for i := 0; i < 1000; i++ { + data := crypto.Keccak256([]byte(fmt.Sprintf("forced-tx-%x", i))) + tx := signers.signTx(1, 21000, big.NewInt(0), big.NewInt(1), signers.addresses[2], big.NewInt(0), data) + hash := tx.Hash() + env.txs = append(env.txs, tx) + env.receipts = append(env.receipts, &types.Receipt{TxHash: hash, Status: types.ReceiptStatusSuccessful}) + forcedTxs = append(forcedTxs, tx) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := VerifyBundlesAtomicity(env, committedBundles, allBundles, mempoolTxHashes, forcedTxs) + if err != nil { + b.Fatal(err) + } + } +}