diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 000000000000..3fc1f2ff8c68 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,64 @@ +name: Go + +on: + push: + pull_request: + branches: [ master ] + +jobs: + + build: + name: Build + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ^1.13 + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Test + run: go test ./core ./miner/... ./internal/ethapi/... ./les/... + + - name: Build + run: make geth + + e2e: + name: End to End + runs-on: ubuntu-latest + steps: + + - name: Set up Go 1.x + uses: actions/setup-go@v2 + with: + go-version: ^1.13 + id: go + + - name: Use Node.js 12.x + uses: actions/setup-node@v1 + with: + node-version: 12.x + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Build + run: make geth + + - name: Check out the e2e code repo + uses: actions/checkout@v2 + with: + repository: flashbots/mev-geth-demo + path: e2e + + - run: cd e2e && yarn install + - run: | + cd e2e + GETH=`pwd`/../build/bin/geth ./run.sh & + sleep 15 + yarn run demo-simple + yarn run demo-contract diff --git a/README.md b/README.md index 4a083d117a4a..0aa27b7e3959 100644 --- a/README.md +++ b/README.md @@ -1,359 +1,151 @@ -## Go Ethereum +# MEV-geth -Official Golang implementation of the Ethereum protocol. +This is a fork of go-ethereum, [the original README is here](README.original.md). -[![API Reference]( -https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 -)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) -[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) -[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum) -[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) +Flashbots is a research and development organization formed to mitigate the negative externalities and existential risks posed by miner-extractable value (MEV) to smart-contract blockchains. We propose a permissionless, transparent, and fair ecosystem for MEV extraction that reinforce the Ethereum ideals. -Automated builds are available for stable releases and the unstable master branch. Binary -archives are published at https://geth.ethereum.org/downloads/. +## Quick start -## Building the source - -For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth). - -Building `geth` requires both a Go (version 1.13 or later) and a C compiler. You can install -them using your favourite package manager. Once the dependencies are installed, run - -```shell +``` +git clone https://github.com/flashbots/mev-geth +cd mev-geth make geth ``` -or, to build the full suite of utilities: +See [here](https://geth.ethereum.org/docs/install-and-build/installing-geth#build-go-ethereum-from-source-code) for further info on building MEV-geth from source. -```shell -make all -``` +## MEV-Geth: a proof of concept -## Executables +We have designed and implemented a proof of concept for permissionless MEV extraction called MEV-Geth. It is a sealed-bid block space auction mechanism for communicating transaction order preference. While our proof of concept has incomplete trust guarantees, we believe it's a significant improvement over the status quo. The adoption of MEV-Geth should relieve a lot of the network and chain congestion caused by frontrunning and backrunning bots. -The go-ethereum project comes with several wrappers/executables found in the `cmd` -directory. +| Guarantee | PGA | Dark-txPool | MEV-Geth | +| -------------------- | :-: | :---------: | :------: | +| Permissionless | ✅ | ❌ | ✅ | +| Efficient | ❌ | ❌ | ✅ | +| Pre-trade privacy | ❌ | ✅ | ✅ | +| Failed trade privacy | ❌ | ❌ | ✅ | +| Complete privacy | ❌ | ❌ | ❌ | +| Finality | ❌ | ❌ | ❌ | -| Command | Description | -| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | -| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | -| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | -| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | -| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://eth.wiki/json-rpc/API) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | -| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | -| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | +### Why MEV-Geth? -## Running `geth` +We believe that without the adoption of neutral, public, open-source infrastructure for permissionless MEV extraction, MEV risks becoming an insiders' game. We commit as an organization to releasing reference implementations for participation in fair, ethical, and politically neutral MEV extraction. By doing so, we hope to prevent the properties of Ethereum from being eroded by trust-based dark pools or proprietary channels which are key points of security weakness. We thus release MEV-Geth with the dual goal of creating an ecosystem for MEV extraction that preserves Ethereum properties, as well as starting conversations with the community around our research and development roadmap. -Going through all the possible command line flags is out of scope here (please consult our -[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)), -but we've enumerated a few common parameter combos to get you up to speed quickly -on how you can run your own `geth` instance. +### Design goals -### Full node on the main Ethereum network +- **Permissionless** + A permissionless design implies there are no trusted intermediary which can censor transactions. +- **Efficient** + An efficient design implies MEV extraction is performed without causing unnecessary network or chain congestion. +- **Pre-trade privacy** + Pre-trade privacy implies transactions only become publicly known after they have been included in a block. Note, this type of privacy does not exclude privileged actors such as transaction aggregators / gateways / miners. +- **Failed trade privacy** + Failed trade privacy implies loosing bids are never included in a block, thus never exposed to the public. Failed trade privacy is tightly coupled to extraction efficiency. +- **Complete privacy** + Complete privacy implies there are no privileged actors such as transaction aggregators / gateways / miners who can observe incoming transactions. +- **Finality** + Finality implies it is infeasible for MEV extraction to be reversed once included in a block. This would protect against time-bandit chain re-org attacks. -By far the most common scenario is people wanting to simply interact with the Ethereum -network: create accounts; transfer funds; deploy and interact with contracts. For this -particular use-case the user doesn't care about years-old historical data, so we can -fast-sync quickly to the current state of the network. To do so: +The MEV-Geth proof of concept relies on the fact that searchers can withhold bids from certain miners in order to disincentivize bad behavior like stealing a profitable strategy. We expect a complete privacy design to necessitate some sort of private computation solution like SGX, ZKP, or MPC to withhold the transaction content from miners until it is mined in a block. One of the core objective of the Flashbots organization is to incentivize and produce research in this direction. -```shell -$ geth console -``` - -This command will: - * Start `geth` in fast sync mode (default, can be changed with the `--syncmode` flag), - causing it to download more data in exchange for avoiding processing the entire history - of the Ethereum network, which is very CPU intensive. - * Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), - (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://web3js.readthedocs.io/en/) - as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). - This tool is optional and if you leave it out you can always attach to an already running - `geth` instance with `geth attach`. - -### A Full node on the Görli test network - -Transitioning towards developers, if you'd like to play around with creating Ethereum -contracts, you almost certainly would like to do that without any real money involved until -you get the hang of the entire system. In other words, instead of attaching to the main -network, you want to join the **test** network with your node, which is fully equivalent to -the main network, but with play-Ether only. - -```shell -$ geth --goerli console -``` +The MEV-Geth proof of concept does not provide any finality guarantees. We expect the solution to this problem to require post-trade execution privacy through private chain state or strong economic infeasibility. The design of a system with strong finality is the second core objective of the MEV-Geth research effort. -The `console` subcommand has the exact same meaning as above and they are equally -useful on the testnet too. Please, see above for their explanations if you've skipped here. +### How it works -Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit: +MEV-Geth introduces the concepts of "searchers", "transaction bundles", and "block template" to Ethereum. Effectively, MEV-Geth provides a way for miners to delegate the task of finding and ordering transactions to third parties called "searchers". These searchers compete with each other to find the most profitable ordering and bid for its inclusion in the next block using a standardized template called a "transaction bundle". These bundles are evaluated in a sealed-bid auction hosted by miners to produce a "block template" which holds the [information about transaction order required to begin mining](https://ethereum.stackexchange.com/questions/268/ethereum-block-architecture). - * Instead of connecting the main Ethereum network, the client will connect to the Görli - test network, which uses different P2P bootnodes, different network IDs and genesis - states. - * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` - will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on - Linux). Note, on OSX and Linux this also means that attaching to a running testnet node - requires the use of a custom endpoint since `geth attach` will try to attach to a - production node endpoint by default, e.g., - `geth attach /goerli/geth.ipc`. Windows users are not affected by - this. +![](https://hackmd.io/_uploads/B1fWz7rcD.png) -*Note: Although there are some internal protective measures to prevent transactions from -crossing over between the main network and test network, you should make sure to always -use separate accounts for play-money and real-money. Unless you manually move -accounts, `geth` will by default correctly separate the two networks and will not make any -accounts available between them.* +The MEV-Geth proof of concept is compatible with any regular Ethereum client. The Flashbots core devs are maintaining [a reference implementation](https://github.com/flashbots/mev-geth) for the go-ethereum client. -### Full node on the Rinkeby test network +### Differences between MEV-Geth and [_vanilla_ geth](https://github.com/ethereum/go-ethereum) -Go Ethereum also supports connecting to the older proof-of-authority based test network -called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community. +The entire patch can be broken down into four modules: -```shell -$ geth --rinkeby console -``` +1. bundle worker and `eth_sendBundle` rpc (commits [8104d5d7b0a54bd98b3a08479a1fde685eb53c29](https://github.com/flashbots/mev-geth/commit/8104d5d7b0a54bd98b3a08479a1fde685eb53c29) and [c2b5b4029b2b748a6f1a9d5668f12096f096563d](https://github.com/flashbots/mev-geth/commit/c2b5b4029b2b748a6f1a9d5668f12096f096563d)) +2. profit switcher (commit [aa5840d22f4882f91ecba0eb20ef35a702b134d5](https://github.com/flashbots/mev-geth/commit/aa5840d22f4882f91ecba0eb20ef35a702b134d5)) +3. `eth_callBundle` simulation rpc (commits [9199d2e13d484df7a634fad12343ed2b46d5d4c3](https://github.com/flashbots/mev-geth/commit/9199d2e13d484df7a634fad12343ed2b46d5d4c3) and [a99dfc198817dd171128cc22439c81896e876619](https://github.com/flashbots/mev-geth/commit/a99dfc198817dd171128cc22439c81896e876619)) +4. Documentation (this file) and CI/infrastructure configuration (commit [035109807944f7a446467aa27ca8ec98d109a465](https://github.com/flashbots/mev-geth/commit/035109807944f7a446467aa27ca8ec98d109a465)) -### Full node on the Ropsten test network - -In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The -Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such, -it has certain extra overhead and is more susceptible to reorganization attacks due to the -network's low difficulty/security. - -```shell -$ geth --ropsten console -``` - -*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.* - -### Configuration - -As an alternative to passing the numerous flags to the `geth` binary, you can also pass a -configuration file via: - -```shell -$ geth --config /path/to/your_config.toml -``` +followed by v0.1.1 and v0.2 changes -To get an idea how the file should look like you can use the `dumpconfig` subcommand to -export your existing configuration: +5. v0.1.1 improvement to reorganizations handling (commit [a9204599292d21c7e3d61710bb3d53d49142255e](https://github.com/flashbots/mev-geth/commit/a9204599292d21c7e3d61710bb3d53d49142255e)) +6. v0.2 change to the MEV equivalent gas price when comparing bundles (commit [910d412be36a8c8ac53df717f4fa85863c7463fa](https://github.com/flashbots/mev-geth/commit/910d412be36a8c8ac53df717f4fa85863c7463fa)) +7. v0.2 discarding transactions with reverts (commit [1ca66fa1e422570729c44ed88df5261c22e5762a](https://github.com/flashbots/mev-geth/commit/df05284b80c23814e5033e8e1ef802fe251762a1)) -```shell -$ geth --your-favourite-flags dumpconfig -``` +The entire changeset can be viewed inspecting the [diff](https://github.com/ethereum/go-ethereum/compare/master...flashbots:master). -*Note: This works only with `geth` v1.6.0 and above.* +In summary: -#### Docker quick start +- Geth’s txpool is modified to also contain a `mevBundles` field, which stores a list of MEV bundles. Each MEV bundle is an array of transactions, along with a min/max timestamp for their inclusion. +- A new `eth_sendBundle` API is exposed which allows adding an MEV Bundle to the txpool. During the Flashbots Alpha, this is only called by MEV-relay. + - The transactions submitted to the bundle are “eth_sendRawTransaction-style” RLP encoded signed transactions along with the min/max block of inclusion + - This API is a no-op when run in light mode +- Geth’s miner is modified as follows: + - While in the event loop, before adding all the pending txpool “normal” transactions to the block, it: + - Finds the best bundles and merges them as long as they are more profitable than normal block transactions: + - It compares bundles by their coinbase payment per unit of gas + - computeBundleGas: Returns MEV equivalent gas price ((coinbase_after - coinbase_before)) / \sum{gasused_i}) + - Commits the merged bundle (remember: Bundle transactions are not ordered by nonce or gas price). For each transaction in the merged bundle, it: + - `Prepare`’s it against the state + - CommitsTransaction with trackProfit = true + w.current.profit += coinbase_after_tx - coinbase_before_tx + - If a block is found where the w.current.profit is more than the previous profit, it switches mining to that block. +- A new `eth_callBundle` API is exposed that enables simulation of transaction bundles. +- Documentation and CI/infrastructure files are added. -One of the quickest ways to get Ethereum up and running on your machine is by using -Docker: +### MEV-Geth for miners -```shell -docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \ - -p 8545:8545 -p 30303:30303 \ - ethereum/client-go -``` +Miners can start mining MEV blocks by running MEV-Geth, or by implementing their own fork that matches the specification. -This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the -above command does. It will also create a persistent volume in your home directory for -saving your blockchain as well as map the default ports. There is also an `alpine` tag -available for a slim version of the image. - -Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers -and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not -accessible from the outside. - -### Programmatically interfacing `geth` nodes - -As a developer, sooner rather than later you'll want to start interacting with `geth` and the -Ethereum network via your own programs and not manually through the console. To aid -this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API) -and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)). -These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based -platforms, and named pipes on Windows). - -The IPC interface is enabled by default and exposes all the APIs supported by `geth`, -whereas the HTTP and WS interfaces need to manually be enabled and only expose a -subset of APIs due to security reasons. These can be turned on/off and configured as -you'd expect. - -HTTP based JSON-RPC API options: - - * `--http` Enable the HTTP-RPC server - * `--http.addr` HTTP-RPC server listening interface (default: `localhost`) - * `--http.port` HTTP-RPC server listening port (default: `8545`) - * `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) - * `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) - * `--ws` Enable the WS-RPC server - * `--ws.addr` WS-RPC server listening interface (default: `localhost`) - * `--ws.port` WS-RPC server listening port (default: `8546`) - * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) - * `--ws.origins` Origins from which to accept websockets requests - * `--ipcdisable` Disable the IPC-RPC server - * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) - * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) - -You'll need to use your own programming environments' capabilities (libraries, tools, etc) to -connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll -need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You -can reuse the same connection for multiple requests! - -**Note: Please understand the security implications of opening up an HTTP/WS based -transport before doing so! Hackers on the internet are actively trying to subvert -Ethereum nodes with exposed APIs! Further, all browser tabs can access locally -running web servers, so malicious web pages could try to subvert locally available -APIs!** - -### Operating a private network - -Maintaining your own private network is more involved as a lot of configurations taken for -granted in the official networks need to be manually set up. - -#### Defining the private genesis state - -First, you'll need to create the genesis state of your networks, which all nodes need to be -aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`): - -```json -{ - "config": { - "chainId": , - "homesteadBlock": 0, - "eip150Block": 0, - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0 - }, - "alloc": {}, - "coinbase": "0x0000000000000000000000000000000000000000", - "difficulty": "0x20000", - "extraData": "", - "gasLimit": "0x2fefd8", - "nonce": "0x0000000000000042", - "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x00" -} -``` +While only the bundle worker and `eth_sendBundle` module (1) is necessary to mine flashbots blocks, we recommend also running the profit switcher module (2) to guarantee mining rewards are maximized. The `eth_callBundle` simulation rpc module (3) is not needed for the alpha. The suggested configuration is implemented in the `master` branch of this repository, which also includes the documentation module (4). -The above fields should be fine for most purposes, although we'd recommend changing -the `nonce` to some random value so you prevent unknown remote nodes from being able -to connect to you. If you'd like to pre-fund some accounts for easier testing, create -the accounts and populate the `alloc` field with their addresses. - -```json -"alloc": { - "0x0000000000000000000000000000000000000001": { - "balance": "111111111" - }, - "0x0000000000000000000000000000000000000002": { - "balance": "222222222" - } -} -``` +We issue and maintain [releases](https://github.com/flashbots/mev-geth/releases) for the recommended configuration for the current and immediately prior versions of geth. -With the genesis state defined in the above JSON file, you'll need to initialize **every** -`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly -set: +In order to see the diff of the recommended patch, run: -```shell -$ geth init path/to/genesis.json ``` - -#### Creating the rendezvous point - -With all nodes that you want to run initialized to the desired genesis state, you'll need to -start a bootstrap node that others can use to find each other in your network and/or over -the internet. The clean way is to configure and run a dedicated bootnode: - -```shell -$ bootnode --genkey=boot.key -$ bootnode --nodekey=boot.key + git diff master~8..master~1 ``` -With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format) -that other nodes can use to connect to it and exchange peer information. Make sure to -replace the displayed IP address information (most probably `[::]`) with your externally -accessible IP to get the actual `enode` URL. - -*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less -recommended way.* - -#### Starting up your member nodes - -With the bootnode operational and externally reachable (you can try -`telnet ` to ensure it's indeed reachable), start every subsequent `geth` -node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will -probably also be desirable to keep the data directory of your private network separated, so -do also specify a custom `--datadir` flag. +Alternatively, the `master-barebones` branch includes only modules (1) and (4), leaving the profit switching logic to miners. While this usage is discouraged, it entails a much smaller change in the code. -```shell -$ geth --datadir=path/to/custom/data/folder --bootnodes= -``` +At this stage, we recommend only receiving bundles via a relay, to prevent abuse via denial-of-service attacks. We have [implemented](https://github.com/flashbots/mev-relay) and currently run such relay. This relay performs basic rate limiting and miner profitability checks, but does otherwise not interfere with submitted bundles in any way, and is open for everybody to participate. We invite you to try the [Flashbots Alpha](https://github.com/flashbots/pm#flashbots-alpha) and start receiving MEV revenue by following these steps: -*Note: Since your network will be completely cut off from the main and test networks, you'll -also need to configure a miner to process transactions and create new blocks for you.* +1. Fill out this [form](https://forms.gle/78JS52d22dwrgabi6) to indicate your interest in participating in the Alpha and be added to the MEV-Relay miner whitelist. +2. You will receive an onboarding email from Flashbots to help [set up](https://github.com/flashbots/mev-geth/blob/master/README.md#quick-start) your MEV-Geth node and protect it with a [reverse proxy](https://github.com/flashbots/mev-relay-js/blob/master/miner/proxy.js) to open the `eth_sendBundle` RPC. +3. Respond to Flashbots' email with your MEV-Geth node endpoint to be added to the Flashbots hosted [MEV-relay](https://github.com/flashbots/mev-relay-js) gateway. MEV-Relay is needed during the alpha to aggregate bundle requests from all users, prevent spam and DOS attacks on participating miner(s)/mining pool(s), and collect system health metrics. +4. After receiving a confirmation email that your MEV-Geth node's endpoint has been added to the relay, you will immediately start receiving Flashbots transaction bundles with associated MEV revenue paid to you. -#### Running a private miner +### MEV-Geth for searchers -Mining on the public Ethereum network is a complex task as it's only feasible using GPUs, -requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a -setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/) -and the [ethminer](https://github.com/ethereum-mining/ethminer) repository. +You do _not_ need to run MEV-Geth as a searcher, but, instead, to monitor the Ethereum state and transaction pool for MEV opportunities and produce transaction bundles that extract that MEV. Anyone can become a searcher. In fact, the bundles produced by searchers don't need to extract MEV at all, but we expect the most valuable bundles will. -In a private network setting, however a single CPU miner instance is more than enough for -practical purposes as it can produce a stable stream of blocks at the correct intervals -without needing heavy resources (consider running on a single thread, no need for multiple -ones either). To start a `geth` instance for mining, run it with all your usual flags, extended -by: +An MEV-Geth bundle is a standard message template composed of an array of valid ethereum transactions, a blockheight, and an optional timestamp range over which the bundle is valid. -```shell -$ geth --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000 +```jsonld +{ + "signedTransactions": ['...'], // RLP encoded signed transaction array + "blocknumber": "0x386526", // hex string + "minTimestamp": 12345, // optional uint64 + "maxTimestamp": 12345 // optional uint64 +} ``` -Which will start mining blocks and transactions on a single CPU thread, crediting all -proceedings to the account specified by `--miner.etherbase`. You can further tune the mining -by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price -transactions are accepted at (`--miner.gasprice`). - -## Contribution +The `signedTransactions` can be any valid ethereum transactions. Care must be taken to place transaction nonces in correct order. -Thank you for considering to help out with the source code! We welcome contributions -from anyone on the internet, and are grateful for even the smallest of fixes! +The `blocknumber` defines the block height at which the bundle is to be included. A bundle will only be evaluated for the provided blockheight and immediately evicted if not selected. -If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request -for the maintainers to review and merge into the main code base. If you wish to submit -more complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) -to ensure those changes are in line with the general philosophy of the project and/or get -some early feedback which can make both your efforts much lighter as well as our review -and merge procedures quick and simple. +The `minTimestamp` and `maxTimestamp` are optional conditions to further restrict bundle validity within a time range. -Please make sure your contributions adhere to our coding guidelines: +MEV-Geth miners select the most profitable bundle per unit of gas used and place it at the beginning of the list of transactions of the block template at a given blockheight. Miners determine the value of a bundle based on the following equation: the total eth sent to the coinbase divided by the total gas used by the bundle. This equation completely ignores gas fees from the transactions. - * Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) - guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). - * Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) - guidelines. - * Pull requests need to be based on and opened against the `master` branch. - * Commit messages should be prefixed with the package(s) they modify. - * E.g. "eth, rpc: make trace configs optional" +To submit a bundle, the searcher sends the bundle directly to the miner using the rpc method `eth_sendBundle`. Since MEV-Geth requires direct communication between searchers and miners, a searcher can configure the list of miners where they want to send their bundle. -Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) -for more details on configuring your environment, managing project dependencies, and -testing procedures. +### Feature requests and bug reports -## License +If you are a user of MEV-Geth and have suggestions on how to make integration with your current setup easier, or would like to submit a bug report, we encourage you to open an issue in this repository with the `enhancement` or `bug` labels respectively. If you need help getting started, please ask in the dedicated [#⛏️miners](https://discord.gg/rcgADN9qFX) channel in our Discord. -The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the -[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), -also included in our repository in the `COPYING.LESSER` file. +### Moving beyond proof of concept -The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the -[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also -included in our repository in the `COPYING` file. +We provide the MEV-Geth proof of concept as a first milestone on the path to mitigating the negative externalities caused by MEV. We hope to discuss with the community the merits of adopting MEV-Geth in its current form. Our preliminary research indicates it could free at least 2.5% of the current chain congestion by eliminating the use of frontrunning and backrunning and provide uplift of up to 18% on miner rewards from Ethereum. That being said, we believe a sustainable solution to MEV existential risks requires complete privacy and finality, which the proof of concept does not address. We hope to engage community feedback throughout the development of this complete version of MEV-Geth. diff --git a/README.original.md b/README.original.md new file mode 100644 index 000000000000..ddb885dfdc36 --- /dev/null +++ b/README.original.md @@ -0,0 +1,359 @@ +## Go Ethereum + +Official Golang implementation of the Ethereum protocol. + +[![API Reference]( +https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 +)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) +[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) +[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) +[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) + +Automated builds are available for stable releases and the unstable master branch. Binary +archives are published at https://geth.ethereum.org/downloads/. + +## Building the source + +For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki. + +Building `geth` requires both a Go (version 1.13 or later) and a C compiler. You can install +them using your favourite package manager. Once the dependencies are installed, run + +```shell +make geth +``` + +or, to build the full suite of utilities: + +```shell +make all +``` + +## Executables + +The go-ethereum project comes with several wrappers/executables found in the `cmd` +directory. + +| Command | Description | +| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. | +| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. | +| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | +| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | +| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | +| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | +| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | + +## Running `geth` + +Going through all the possible command line flags is out of scope here (please consult our +[CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)), +but we've enumerated a few common parameter combos to get you up to speed quickly +on how you can run your own `geth` instance. + +### Full node on the main Ethereum network + +By far the most common scenario is people wanting to simply interact with the Ethereum +network: create accounts; transfer funds; deploy and interact with contracts. For this +particular use-case the user doesn't care about years-old historical data, so we can +fast-sync quickly to the current state of the network. To do so: + +```shell +$ geth console +``` + +This command will: + * Start `geth` in fast sync mode (default, can be changed with the `--syncmode` flag), + causing it to download more data in exchange for avoiding processing the entire history + of the Ethereum network, which is very CPU intensive. + * Start up `geth`'s built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console), + (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API) + as well as `geth`'s own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs). + This tool is optional and if you leave it out you can always attach to an already running + `geth` instance with `geth attach`. + +### A Full node on the Görli test network + +Transitioning towards developers, if you'd like to play around with creating Ethereum +contracts, you almost certainly would like to do that without any real money involved until +you get the hang of the entire system. In other words, instead of attaching to the main +network, you want to join the **test** network with your node, which is fully equivalent to +the main network, but with play-Ether only. + +```shell +$ geth --goerli console +``` + +The `console` subcommand has the exact same meaning as above and they are equally +useful on the testnet too. Please, see above for their explanations if you've skipped here. + +Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit: + + * Instead of connecting the main Ethereum network, the client will connect to the Görli + test network, which uses different P2P bootnodes, different network IDs and genesis + states. + * Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth` + will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on + Linux). Note, on OSX and Linux this also means that attaching to a running testnet node + requires the use of a custom endpoint since `geth attach` will try to attach to a + production node endpoint by default, e.g., + `geth attach /goerli/geth.ipc`. Windows users are not affected by + this. + +*Note: Although there are some internal protective measures to prevent transactions from +crossing over between the main network and test network, you should make sure to always +use separate accounts for play-money and real-money. Unless you manually move +accounts, `geth` will by default correctly separate the two networks and will not make any +accounts available between them.* + +### Full node on the Rinkeby test network + +Go Ethereum also supports connecting to the older proof-of-authority based test network +called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community. + +```shell +$ geth --rinkeby console +``` + +### Full node on the Ropsten test network + +In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The +Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such, +it has certain extra overhead and is more susceptible to reorganization attacks due to the +network's low difficulty/security. + +```shell +$ geth --ropsten console +``` + +*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.* + +### Configuration + +As an alternative to passing the numerous flags to the `geth` binary, you can also pass a +configuration file via: + +```shell +$ geth --config /path/to/your_config.toml +``` + +To get an idea how the file should look like you can use the `dumpconfig` subcommand to +export your existing configuration: + +```shell +$ geth --your-favourite-flags dumpconfig +``` + +*Note: This works only with `geth` v1.6.0 and above.* + +#### Docker quick start + +One of the quickest ways to get Ethereum up and running on your machine is by using +Docker: + +```shell +docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \ + -p 8545:8545 -p 30303:30303 \ + ethereum/client-go +``` + +This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the +above command does. It will also create a persistent volume in your home directory for +saving your blockchain as well as map the default ports. There is also an `alpine` tag +available for a slim version of the image. + +Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers +and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not +accessible from the outside. + +### Programmatically interfacing `geth` nodes + +As a developer, sooner rather than later you'll want to start interacting with `geth` and the +Ethereum network via your own programs and not manually through the console. To aid +this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC) +and [`geth` specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)). +These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based +platforms, and named pipes on Windows). + +The IPC interface is enabled by default and exposes all the APIs supported by `geth`, +whereas the HTTP and WS interfaces need to manually be enabled and only expose a +subset of APIs due to security reasons. These can be turned on/off and configured as +you'd expect. + +HTTP based JSON-RPC API options: + + * `--http` Enable the HTTP-RPC server + * `--http.addr` HTTP-RPC server listening interface (default: `localhost`) + * `--http.port` HTTP-RPC server listening port (default: `8545`) + * `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) + * `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) + * `--ws` Enable the WS-RPC server + * `--ws.addr` WS-RPC server listening interface (default: `localhost`) + * `--ws.port` WS-RPC server listening port (default: `8546`) + * `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) + * `--ws.origins` Origins from which to accept websockets requests + * `--ipcdisable` Disable the IPC-RPC server + * `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,shh,txpool,web3`) + * `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) + +You'll need to use your own programming environments' capabilities (libraries, tools, etc) to +connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll +need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You +can reuse the same connection for multiple requests! + +**Note: Please understand the security implications of opening up an HTTP/WS based +transport before doing so! Hackers on the internet are actively trying to subvert +Ethereum nodes with exposed APIs! Further, all browser tabs can access locally +running web servers, so malicious web pages could try to subvert locally available +APIs!** + +### Operating a private network + +Maintaining your own private network is more involved as a lot of configurations taken for +granted in the official networks need to be manually set up. + +#### Defining the private genesis state + +First, you'll need to create the genesis state of your networks, which all nodes need to be +aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`): + +```json +{ + "config": { + "chainId": , + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0 + }, + "alloc": {}, + "coinbase": "0x0000000000000000000000000000000000000000", + "difficulty": "0x20000", + "extraData": "", + "gasLimit": "0x2fefd8", + "nonce": "0x0000000000000042", + "mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "timestamp": "0x00" +} +``` + +The above fields should be fine for most purposes, although we'd recommend changing +the `nonce` to some random value so you prevent unknown remote nodes from being able +to connect to you. If you'd like to pre-fund some accounts for easier testing, create +the accounts and populate the `alloc` field with their addresses. + +```json +"alloc": { + "0x0000000000000000000000000000000000000001": { + "balance": "111111111" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "222222222" + } +} +``` + +With the genesis state defined in the above JSON file, you'll need to initialize **every** +`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly +set: + +```shell +$ geth init path/to/genesis.json +``` + +#### Creating the rendezvous point + +With all nodes that you want to run initialized to the desired genesis state, you'll need to +start a bootstrap node that others can use to find each other in your network and/or over +the internet. The clean way is to configure and run a dedicated bootnode: + +```shell +$ bootnode --genkey=boot.key +$ bootnode --nodekey=boot.key +``` + +With the bootnode online, it will display an [`enode` URL](https://github.com/ethereum/wiki/wiki/enode-url-format) +that other nodes can use to connect to it and exchange peer information. Make sure to +replace the displayed IP address information (most probably `[::]`) with your externally +accessible IP to get the actual `enode` URL. + +*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less +recommended way.* + +#### Starting up your member nodes + +With the bootnode operational and externally reachable (you can try +`telnet ` to ensure it's indeed reachable), start every subsequent `geth` +node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will +probably also be desirable to keep the data directory of your private network separated, so +do also specify a custom `--datadir` flag. + +```shell +$ geth --datadir=path/to/custom/data/folder --bootnodes= +``` + +*Note: Since your network will be completely cut off from the main and test networks, you'll +also need to configure a miner to process transactions and create new blocks for you.* + +#### Running a private miner + +Mining on the public Ethereum network is a complex task as it's only feasible using GPUs, +requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a +setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/) +and the [ethminer](https://github.com/ethereum-mining/ethminer) repository. + +In a private network setting, however a single CPU miner instance is more than enough for +practical purposes as it can produce a stable stream of blocks at the correct intervals +without needing heavy resources (consider running on a single thread, no need for multiple +ones either). To start a `geth` instance for mining, run it with all your usual flags, extended +by: + +```shell +$ geth --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000 +``` + +Which will start mining blocks and transactions on a single CPU thread, crediting all +proceedings to the account specified by `--etherbase`. You can further tune the mining +by changing the default gas limit blocks converge to (`--targetgaslimit`) and the price +transactions are accepted at (`--gasprice`). + +## Contribution + +Thank you for considering to help out with the source code! We welcome contributions +from anyone on the internet, and are grateful for even the smallest of fixes! + +If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request +for the maintainers to review and merge into the main code base. If you wish to submit +more complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) +to ensure those changes are in line with the general philosophy of the project and/or get +some early feedback which can make both your efforts much lighter as well as our review +and merge procedures quick and simple. + +Please make sure your contributions adhere to our coding guidelines: + + * Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) + guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). + * Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) + guidelines. + * Pull requests need to be based on and opened against the `master` branch. + * Commit messages should be prefixed with the package(s) they modify. + * E.g. "eth, rpc: make trace configs optional" + +Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) +for more details on configuring your environment, managing project dependencies, and +testing procedures. + +## License + +The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the +[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), +also included in our repository in the `COPYING.LESSER` file. + +The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the +[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also +included in our repository in the `COPYING` file. diff --git a/core/mev_bundle.go b/core/mev_bundle.go new file mode 100644 index 000000000000..257411708e74 --- /dev/null +++ b/core/mev_bundle.go @@ -0,0 +1,30 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/core/types" +) + +type mevBundle struct { + txs types.Transactions + blockNumber *big.Int + minTimestamp uint64 + maxTimestamp uint64 +} diff --git a/core/state_processor.go b/core/state_processor.go index 40a953f0d4f8..f4b1d4dad85c 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -134,6 +134,51 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon return receipt, err } +func applyTransactionWithResult(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, *ExecutionResult, error) { + // Create a new context to be used in the EVM environment. + txContext := NewEVMTxContext(msg) + evm.Reset(txContext, statedb) + + // Apply the transaction to the current state (included in the env). + result, err := ApplyMessage(evm, msg, gp) + if err != nil { + return nil, nil, err + } + + // Update the state with pending changes. + var root []byte + if config.IsByzantium(header.Number) { + statedb.Finalise(true) + } else { + root = statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes() + } + *usedGas += result.UsedGas + + // Create a new receipt for the transaction, storing the intermediate root and gas used + // by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: *usedGas} + if result.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } + receipt.TxHash = tx.Hash() + receipt.GasUsed = result.UsedGas + + // If the transaction created a contract, store the creation address in the receipt. + if msg.To() == nil { + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) + } + + // Set the receipt logs and create the bloom filter. + receipt.Logs = statedb.GetLogs(tx.Hash()) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + receipt.BlockHash = statedb.BlockHash() + receipt.BlockNumber = header.Number + receipt.TransactionIndex = uint(statedb.TxIndex()) + return receipt, result, err +} + // ApplyTransaction attempts to apply a transaction to the given state database // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, @@ -148,3 +193,14 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) return applyTransaction(msg, config, bc, author, gp, statedb, header, tx, usedGas, vmenv) } + +func ApplyTransactionWithResult(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, *ExecutionResult, error) { + msg, err := tx.AsMessage(types.MakeSigner(config, header.Number)) + if err != nil { + return nil, nil, err + } + // Create a new context to be used in the EVM environment + blockContext := NewEVMBlockContext(header, bc, author) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) + return applyTransactionWithResult(msg, config, bc, author, gp, statedb, header, tx, usedGas, vmenv) +} diff --git a/core/tx_pool.go b/core/tx_pool.go index 5db1d3df329d..8a93c774916d 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -238,11 +238,12 @@ type TxPool struct { locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all *txLookup // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + pending map[common.Address]*txList // All currently processable transactions + queue map[common.Address]*txList // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + mevBundles []mevBundle + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price chainHeadCh chan ChainHeadEvent chainHeadSub event.Subscription @@ -496,6 +497,58 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { return pending, nil } +/// AllMevBundles returns all the MEV Bundles currently in the pool +func (pool *TxPool) AllMevBundles() []mevBundle { + return pool.mevBundles +} + +// MevBundles returns a list of bundles valid for the given blockNumber/blockTimestamp +// also prunes bundles that are outdated +func (pool *TxPool) MevBundles(blockNumber *big.Int, blockTimestamp uint64) ([]types.Transactions, error) { + pool.mu.Lock() + defer pool.mu.Unlock() + + // returned values + var txBundles []types.Transactions + // rolled over values + var bundles []mevBundle + + for _, bundle := range pool.mevBundles { + // Prune outdated bundles + if (bundle.maxTimestamp != 0 && blockTimestamp > bundle.maxTimestamp) || blockNumber.Cmp(bundle.blockNumber) > 0 { + continue + } + + // Roll over future bundles + if (bundle.minTimestamp != 0 && blockTimestamp < bundle.minTimestamp) || blockNumber.Cmp(bundle.blockNumber) < 0 { + bundles = append(bundles, bundle) + continue + } + + // return the ones which are in time + txBundles = append(txBundles, bundle.txs) + // keep the bundles around internally until they need to be pruned + bundles = append(bundles, bundle) + } + + pool.mevBundles = bundles + return txBundles, nil +} + +// AddMevBundle adds a mev bundle to the pool +func (pool *TxPool) AddMevBundle(txs types.Transactions, blockNumber *big.Int, minTimestamp, maxTimestamp uint64) error { + pool.mu.Lock() + defer pool.mu.Unlock() + + pool.mevBundles = append(pool.mevBundles, mevBundle{ + txs: txs, + blockNumber: blockNumber, + minTimestamp: minTimestamp, + maxTimestamp: maxTimestamp, + }) + return nil +} + // Locals retrieves the accounts currently considered local by the pool. func (pool *TxPool) Locals() []common.Address { pool.mu.Lock() diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 5d555f5a9cfd..e5e19a06c7b2 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -2052,3 +2052,13 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { pool.Stop() } } + +func checkBundles(t *testing.T, pool *TxPool, block int64, timestamp uint64, expectedRes int, expectedRemaining int) { + res, _ := pool.MevBundles(big.NewInt(block), timestamp) + if len(res) != expectedRes { + t.Fatalf("expected returned bundles did not match got %d, expected %d", len(res), expectedRes) + } + if len(pool.mevBundles) != expectedRemaining { + t.Fatalf("expected remaining bundles did not match got %d, expected %d", len(pool.mevBundles), expectedRemaining) + } +} diff --git a/eth/api_backend.go b/eth/api_backend.go index 7ac1f82a863a..5b53bb29391c 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -230,6 +230,10 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) return b.eth.txPool.AddLocal(signedTx) } +func (b *EthAPIBackend) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64) error { + return b.eth.txPool.AddMevBundle(txs, big.NewInt(blockNumber.Int64()), minTimestamp, maxTimestamp) +} + func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { pending, err := b.eth.txPool.Pending() if err != nil { diff --git a/eth/backend.go b/eth/backend.go index 9cf8b8566304..711d440e2709 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -287,7 +287,7 @@ func makeExtraData(extra []byte) []byte { // APIs return the collection of RPC services the ethereum package offers. // NOTE, some of these services probably need to be moved to somewhere else. func (s *Ethereum) APIs() []rpc.API { - apis := ethapi.GetAPIs(s.APIBackend) + apis := ethapi.GetAPIs(s.APIBackend, s.BlockChain()) // Append any APIs exposed explicitly by the consensus engine apis = append(apis, s.engine.APIs(s.BlockChain())...) diff --git a/infra/Dockerfile.node b/infra/Dockerfile.node new file mode 100644 index 000000000000..db8e99ac937e --- /dev/null +++ b/infra/Dockerfile.node @@ -0,0 +1,23 @@ +# Build Geth in a stock Go builder container +FROM golang:1.15-alpine as builder + +RUN apk add --no-cache make gcc musl-dev linux-headers git + +ADD . /go-ethereum +RUN cd /go-ethereum && make geth + +# Pull Geth into a second stage deploy alpine container +FROM alpine:latest + +ENV PYTHONUNBUFFERED=1 +RUN apk add --update --no-cache groff less python3 curl jq ca-certificates && ln -sf python3 /usr/bin/python +RUN python3 -m ensurepip +RUN pip3 install --no-cache --upgrade pip setuptools awscli + +COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ + +COPY ./infra/start-mev-geth-node.sh /root/start-mev-geth-node.sh +RUN chmod 755 /root/start-mev-geth-node.sh + +EXPOSE 8545 8546 30303 30303/udp +ENTRYPOINT ["/root/start-mev-geth-node.sh"] diff --git a/infra/Dockerfile.updater b/infra/Dockerfile.updater new file mode 100644 index 000000000000..d3099d19ce1a --- /dev/null +++ b/infra/Dockerfile.updater @@ -0,0 +1,23 @@ +# Build Geth in a stock Go builder container +FROM golang:1.15-alpine as builder + +RUN apk add --no-cache make gcc musl-dev linux-headers git + +ADD . /go-ethereum +RUN cd /go-ethereum && make geth + +# Pull Geth into a second stage deploy alpine container +FROM alpine:latest + +ENV PYTHONUNBUFFERED=1 +RUN apk add --update --no-cache groff less python3 curl jq ca-certificates && ln -sf python3 /usr/bin/python +RUN python3 -m ensurepip +RUN pip3 install --no-cache --upgrade pip setuptools awscli + +COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/ + +COPY ./infra/start-mev-geth-updater.sh /root/start-mev-geth-updater.sh +RUN chmod 755 /root/start-mev-geth-updater.sh + +EXPOSE 8545 8546 30303 30303/udp +ENTRYPOINT ["/root/start-mev-geth-updater.sh"] diff --git a/infra/mev-geth-nodes-arm64.yaml b/infra/mev-geth-nodes-arm64.yaml new file mode 100644 index 000000000000..af76b6aada82 --- /dev/null +++ b/infra/mev-geth-nodes-arm64.yaml @@ -0,0 +1,979 @@ +--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: > + This template creates an automated continuous deployment pipeline to Amazon Elastic Container Service (ECS) + Created by Luke Youngblood, luke@blockscale.net + +Parameters: + +# GitHub Parameters + + GitHubUser: + Type: String + Default: lyoungblood + Description: Your team or username on GitHub. + + NodeGitHubRepo: + Type: String + Default: mev-geth + Description: The repo name of the node service. + + NodeGitHubBranch: + Type: String + Default: master + Description: The branch of the node repo to continuously deploy. + + GitHubToken: + Type: String + NoEcho: true + Description: > + Token for the team or user specified above. (https://github.com/settings/tokens) + +# VPC Parameters + + VPC: + Type: AWS::EC2::VPC::Id + + Subnets: + Type: List + + VpcCIDR: + Type: String + Default: 172.31.0.0/16 + +# ECS Parameters + + InstanceType: + Type: String + Default: m6gd.large + + MemoryLimit: + Type: Number + Default: 6144 + + KeyPair: + Type: AWS::EC2::KeyPair::KeyName + + SpotPrice: + Type: Number + Default: 0.0904 + + ClusterSize: + Type: Number + Default: 5 + + Bandwidth: + Type: Number + Default: 2048 + + BandwidthCeiling: + Type: Number + Default: 4096 + + NodeDesiredCount: + Type: Number + Default: 0 + + NodeTaskName: + Type: String + Default: mev-geth-node + + ECSAMI: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ecs/optimized-ami/amazon-linux-2/arm64/recommended/image_id + +# SNS Parameters + + SNSSubscriptionEndpoint: + Type: String + Default: https://events.pagerduty.com/integration/44cbdb66f22b4f3caf5dd15741c7eb17/enqueue + + SNSSubscriptionProtocol: + Type: String + Default: HTTPS + +# CloudWatch Alarm Parameters + + CPUAlarmThreshold: + Type: Number + Default: 80 + + MemoryAlarmThreshold: + Type: Number + Default: 80 + +# Mev-Geth Parameters + + Network: + Type: String + Default: mainnet + AllowedValues: + - mainnet + - goerli + + SyncMode: + Type: String + Default: fast + AllowedValues: + - full + - fast + - light + + Connections: + Type: Number + Default: 50 + + RpcPort: + Type: Number + Default: 8545 + + WsPort: + Type: Number + Default: 8546 + + NetPort: + Type: Number + Default: 30303 + +Metadata: + + AWS::CloudFormation::Interface: + ParameterLabels: + GitHubUser: + default: "User" + NodeGitHubRepo: + default: "Node Repo" + NodeGitHubBranch: + default: "Node Branch" + GitHubToken: + default: "Personal Access Token" + VPC: + default: "Choose which VPC the autoscaling group should be deployed to" + Subnets: + default: "Choose which subnets the autoscaling group should be deployed to" + VpcCIDR: + default: "VPC CIDR Block" + InstanceType: + default: "Which instance type should we use to build the ECS cluster?" + MemoryLimit: + default: "How much memory should be reserved for each task. Set to greater than 50% of instance memory capacity." + KeyPair: + default: "Which keypair should be used to allow SSH to the nodes?" + ClusterSize: + default: "How many ECS hosts do you want to initially deploy?" + SpotPrice: + default: "The maximum spot price to pay for instances - this should normally be set to the on demand price." + Bandwidth: + default: "How much bandwidth, in kb/sec., should be allocated to Ethereum peers (upload) per EC2 instance" + BandwidthCeiling: + default: "How much bandwidth, in kb/sec., should be allocated to Ethereum peers as a ceiling (max. upload)" + NodeDesiredCount: + default: "How many ECS Tasks do you want to initially execute?" + NodeTaskName: + default: "The name of the node ECS Task" + ECSAMI: + default: "The ECS AMI ID populated from SSM." + Network: + default: "The Ethereum network you will be connecting to" + SyncMode: + default: "The synchronization mode that Mev-Geth should use (full, fast, or light)" + Connections: + default: "The number of desired connections on the Mev-Geth node" + RpcPort: + default: "The RPC port used for communication with the local Mev-Geth node" + WsPort: + default: "The Websockets port used for communication with the local Mev-Geth node" + NetPort: + default: "The TCP port used for connectivity to other Ethereum peer nodes" + ParameterGroups: + - Label: + default: GitHub Configuration + Parameters: + - NodeGitHubRepo + - NodeGitHubBranch + - GitHubUser + - GitHubToken + - Label: + default: VPC Configuration + Parameters: + - VPC + - Subnets + - VpcCIDR + - Label: + default: ECS Configuration + Parameters: + - InstanceType + - MemoryLimit + - KeyPair + - SpotPrice + - ClusterSize + - Bandwidth + - BandwidthCeiling + - NodeDesiredCount + - NodeTaskName + - ECSAMI + - Label: + default: Mev-Geth Configuration + Parameters: + - Network + - SyncMode + - Connections + - RpcPort + - WsPort + - NetPort + - Label: + default: PagerDuty Endpoint Configuration + Parameters: + - SNSSubscriptionEndpoint + - SNSSubscriptionProtocol + - Label: + default: CloudWatch Alarms Configuration + Parameters: + - CPUAlarmThreshold + - MemoryAlarmThreshold + +# Mappings + +Mappings: + + RegionMap: + us-east-2: + mainnet: mev-geth-updater-fast-chainbucket-17p2xhnhcydlz + goerli: mev-geth-updater-fast-goerli-chainbucket-j6dujg8apbna + #us-west-2: + # mainnet: + # goerli: + +Resources: + +# ECS Resources + + Cluster: + Type: AWS::ECS::Cluster + Properties: + ClusterName: !Ref AWS::StackName + + SecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: !Sub ${AWS::StackName}-sg + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref RpcPort + ToPort: !Ref RpcPort + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref WsPort + ToPort: !Ref WsPort + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + + ECSAutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + VPCZoneIdentifier: !Ref Subnets + LaunchConfigurationName: !Ref ECSLaunchConfiguration + MinSize: !Ref ClusterSize + MaxSize: !Ref ClusterSize + DesiredCapacity: !Ref ClusterSize + Tags: + - Key: Name + Value: !Sub ${AWS::StackName} ECS host + PropagateAtLaunch: true + CreationPolicy: + ResourceSignal: + Timeout: PT15M + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: 2 + MaxBatchSize: 1 + PauseTime: PT15M + SuspendProcesses: + - HealthCheck + - ReplaceUnhealthy + - AZRebalance + - AlarmNotification + - ScheduledActions + WaitOnResourceSignals: true + + ECSLaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ECSAMI + InstanceType: !Ref InstanceType + KeyName: !Ref KeyPair + AssociatePublicIpAddress: True + # Uncomment if you would like to use Spot instances (subject to unexpected termination) + # SpotPrice: !Ref SpotPrice + SecurityGroups: + - !Ref SecurityGroup + IamInstanceProfile: !Ref ECSInstanceProfile + UserData: + "Fn::Base64": !Sub | + #!/bin/bash + yum install -y aws-cfn-bootstrap hibagent rsync awscli + yum update -y + service amazon-ssm-agent restart + + # determine if we have an NVMe SSD attached + find /dev/nvme1 + if [ $? -eq 0 ] + then + mount_point=/var/lib/docker + + # copy existing files from mount point + service docker stop + echo 'DOCKER_STORAGE_OPTIONS="--storage-driver overlay2"' > /etc/sysconfig/docker-storage + mkdir -p /tmp$mount_point + rsync -val $mount_point/ /tmp/$mount_point/ + + # make a new filesystem and mount it + mkfs -t ext4 /dev/nvme1n1 + mkdir -p $mount_point + mount -t ext4 -o noatime /dev/nvme1n1 $mount_point + + # Copy files back to new mount point + rsync -val /tmp/$mount_point/ $mount_point/ + rm -rf /tmp$mount_point + service docker start + + # Make raid appear on reboot + echo >> /etc/fstab + echo "/dev/nvme1n1 $mount_point ext4 noatime 0 0" | tee -a /etc/fstab + fi + + # Set Linux traffic control to limit outbound bandwidth usage of peering + #tc qdisc add dev eth0 root handle 1:0 htb default 1 + #tc class add dev eth0 parent 1:0 classid 1:10 htb rate ${Bandwidth}kbit ceil {BandwidthCeiling}kbit prio 0 + #tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dport 30303 0xffff flowid 1:10 + + /opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + /opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSAutoScalingGroup + /usr/bin/enable-ec2-spot-hibernation + + # Attach an EIP from the pool of available EIPs in scope "vpc" + alloc=`aws ec2 describe-addresses --region ${AWS::Region} --output text | grep -v eni | head -1 | cut -f 2` + instanceid=`curl --silent 169.254.169.254/latest/meta-data/instance-id` + aws ec2 associate-address --region ${AWS::Region} --allocation-id $alloc --instance-id $instanceid + echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=5m" >> /etc/ecs/ecs.config + + reboot + + Metadata: + AWS::CloudFormation::Init: + config: + packages: + yum: + awslogs: [] + + commands: + 01_add_instance_to_cluster: + command: !Sub echo ECS_CLUSTER=${Cluster} >> /etc/ecs/ecs.config + files: + "/etc/cfn/cfn-hup.conf": + mode: 000400 + owner: root + group: root + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.ECSLaunchConfiguration.Metadata.AWS::CloudFormation::Init + action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + + services: + sysvinit: + cfn-hup: + enabled: true + ensureRunning: true + files: + - /etc/cfn/cfn-hup.conf + - /etc/cfn/hooks.d/cfn-auto-reloader.conf + + NodeLoadBalancer: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + Properties: + Name: !Sub ${AWS::StackName}-node-NLB + Type: network + Scheme: internal + Subnets: !Ref Subnets + Tags: + - Key: Name + Value: !Sub ${AWS::StackName}-node-NLB + + NodeTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + DependsOn: NodeLoadBalancer + Properties: + VpcId: !Ref VPC + Port: !Ref RpcPort + Protocol: TCP + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 120 + + NodeListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref NodeTargetGroup + LoadBalancerArn: !Ref NodeLoadBalancer + Port: !Ref RpcPort + Protocol: TCP + + NodeWsTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + DependsOn: NodeLoadBalancer + Properties: + VpcId: !Ref VPC + Port: !Ref WsPort + Protocol: TCP + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 120 + + NodeWsListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref NodeWsTargetGroup + LoadBalancerArn: !Ref NodeLoadBalancer + Port: !Ref WsPort + Protocol: TCP + + # This IAM Role is attached to all of the ECS hosts. It is based on the default role + # published here: + # http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html + # + # You can add other IAM policy statements here to allow access from your ECS hosts + # to other AWS services. + + ECSRole: + Type: AWS::IAM::Role + Properties: + Path: / + RoleName: !Sub ${AWS::StackName}-ECSRole-${AWS::Region} + AssumeRolePolicyDocument: | + { + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + }] + } + Policies: + - PolicyName: ecs-service + PolicyDocument: | + { + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:Submit*", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken", + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetManifest", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:PutParameter", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation", + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply", + "cloudwatch:PutMetricData", + "ec2:DescribeInstanceStatus", + "ds:CreateComputer", + "ds:DescribeDirectories", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "firehose:PutRecord", + "firehose:PutRecordBatch", + "ec2:DescribeAddresses", + "ec2:DescribeInstances", + "ec2:AssociateAddress" + ], + "Resource": "*" + }] + } + + ECSInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref ECSRole + + ECSServiceAutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + Action: + - 'sts:AssumeRole' + Effect: Allow + Principal: + Service: + - application-autoscaling.amazonaws.com + Path: / + Policies: + - PolicyName: ecs-service-autoscaling + PolicyDocument: + Statement: + Effect: Allow + Action: + - application-autoscaling:* + - cloudwatch:DescribeAlarms + - cloudwatch:PutMetricAlarm + - ecs:DescribeServices + - ecs:UpdateService + Resource: "*" + + NodeTaskExecutionRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: ecs-tasks.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + Policies: + - PolicyName: !Sub ecs-task-S3-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - + Effect: Allow + Action: + - "s3:Get*" + - "s3:List*" + Resource: + - Fn::Join: + - "" + - + - "arn:aws:s3:::" + - !FindInMap + - RegionMap + - !Ref 'AWS::Region' + - !Ref Network + + NodeLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub /ecs/${AWS::StackName}-node + RetentionInDays: 14 + + NodeECSService: + Type: AWS::ECS::Service + DependsOn: NodeListener + Properties: + Cluster: !Ref Cluster + DesiredCount: !Ref NodeDesiredCount + HealthCheckGracePeriodSeconds: 3600 + TaskDefinition: !Ref NodeTaskDefinition + LaunchType: EC2 + DeploymentConfiguration: + MaximumPercent: 150 + MinimumHealthyPercent: 50 + LoadBalancers: + - ContainerName: !Ref NodeTaskName + ContainerPort: !Ref RpcPort + TargetGroupArn: !Ref NodeTargetGroup + - ContainerName: !Ref NodeTaskName + ContainerPort: !Ref WsPort + TargetGroupArn: !Ref NodeWsTargetGroup + + NodeTaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Ref NodeTaskName + RequiresCompatibilities: + - EC2 + NetworkMode: host + ExecutionRoleArn: !Ref NodeTaskExecutionRole + ContainerDefinitions: + - Name: !Ref NodeTaskName + Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${NodeRepository} + Essential: true + MemoryReservation: !Ref MemoryLimit + Environment: + - Name: "region" + Value: !Ref AWS::Region + - Name: "network" + Value: !Ref Network + - Name: "syncmode" + Value: !Ref SyncMode + - Name: "connections" + Value: !Ref Connections + - Name: "rpcport" + Value: !Ref RpcPort + - Name: "wsport" + Value: !Ref WsPort + - Name: "netport" + Value: !Ref NetPort + - Name: "chainbucket" + Value: !FindInMap + - RegionMap + - !Ref 'AWS::Region' + - !Ref Network + - Name: "s3key" + Value: node + PortMappings: + - ContainerPort: !Ref RpcPort + - ContainerPort: !Ref WsPort + - ContainerPort: !Ref NetPort + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWS::Region + awslogs-group: !Ref NodeLogGroup + awslogs-stream-prefix: !Ref AWS::StackName + #HealthCheck: + # Command: + # - CMD-SHELL + # - '[ `echo "eth.syncing.highestBlock - eth.syncing.currentBlock"|geth attach|head -10|tail -1` -lt 200 ] || exit 1' + # Interval: 300 + # Timeout: 60 + # Retries: 10 + # StartPeriod: 300 + +# CodePipeline Resources + + NodeRepository: + Type: AWS::ECR::Repository + + NodeCodeBuildServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: "*" + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ecr:GetAuthorizationToken + - Resource: !Sub arn:aws:s3:::${NodeArtifactBucket}/* + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + - Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${NodeRepository} + Effect: Allow + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + + NodeCodePipelineServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: + - !Sub arn:aws:s3:::${NodeArtifactBucket}/* + Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketVersioning + - Resource: "*" + Effect: Allow + Action: + - ecs:DescribeServices + - ecs:DescribeTaskDefinition + - ecs:DescribeTasks + - ecs:ListTasks + - ecs:RegisterTaskDefinition + - ecs:UpdateService + - codebuild:StartBuild + - codebuild:BatchGetBuilds + - iam:PassRole + + NodeArtifactBucket: + Type: AWS::S3::Bucket + + NodeCodeBuildProject: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: CODEPIPELINE + Source: + Type: CODEPIPELINE + BuildSpec: | + version: 0.2 + phases: + install: + runtime-versions: + docker: 19 + pre_build: + commands: + - $(aws ecr get-login --no-include-email) + - TAG="$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)" + - IMAGE_URI="${REPOSITORY_URI}:${TAG}" + - cp infra/Dockerfile.node ./Dockerfile + build: + commands: + - docker build --tag "$IMAGE_URI" . + - docker build --tag "${REPOSITORY_URI}:latest" . + post_build: + commands: + - docker push "$IMAGE_URI" + - docker push "${REPOSITORY_URI}:latest" + - printf '[{"name":"mev-geth-node","imageUri":"%s"}]' "$IMAGE_URI" > images.json + artifacts: + files: images.json + Environment: + ComputeType: BUILD_GENERAL1_LARGE + Image: aws/codebuild/amazonlinux2-aarch64-standard:1.0 + Type: ARM_CONTAINER + PrivilegedMode: true + EnvironmentVariables: + - Name: AWS_DEFAULT_REGION + Value: !Ref AWS::Region + - Name: REPOSITORY_URI + Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${NodeRepository} + Cache: + Type: S3 + Location: !Sub ${NodeArtifactBucket}/buildcache + Name: !Sub ${AWS::StackName}-node + ServiceRole: !Ref NodeCodeBuildServiceRole + + NodePipeline: + Type: AWS::CodePipeline::Pipeline + Properties: + RoleArn: !GetAtt NodeCodePipelineServiceRole.Arn + ArtifactStore: + Type: S3 + Location: !Ref NodeArtifactBucket + Stages: + - Name: Source + Actions: + - Name: App + ActionTypeId: + Category: Source + Owner: ThirdParty + Version: 1 + Provider: GitHub + Configuration: + Owner: !Ref GitHubUser + Repo: !Ref NodeGitHubRepo + Branch: !Ref NodeGitHubBranch + OAuthToken: !Ref GitHubToken + OutputArtifacts: + - Name: App + RunOrder: 1 + - Name: Build + Actions: + - Name: Build + ActionTypeId: + Category: Build + Owner: AWS + Version: 1 + Provider: CodeBuild + Configuration: + ProjectName: !Ref NodeCodeBuildProject + InputArtifacts: + - Name: App + OutputArtifacts: + - Name: BuildOutput + RunOrder: 1 + - Name: Deploy + Actions: + - Name: Deploy + ActionTypeId: + Category: Deploy + Owner: AWS + Version: 1 + Provider: ECS + Configuration: + ClusterName: !Ref Cluster + ServiceName: !Ref NodeECSService + FileName: images.json + InputArtifacts: + - Name: BuildOutput + RunOrder: 1 + +# SNS Resources + + SNSTopic: + Type: AWS::SNS::Topic + Properties: + DisplayName: String + Subscription: + - + Endpoint: !Ref SNSSubscriptionEndpoint + Protocol: !Ref SNSSubscriptionProtocol + TopicName: !Ref AWS::StackName + +# CloudWatch Resources + + CPUAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub ${AWS::StackName} average CPU utilization greater than threshold. + AlarmDescription: Alarm if CPU utilization is greater than threshold. + Namespace: AWS/ECS + MetricName: CPUUtilization + Dimensions: + - Name: ClusterName + Value: !Ref Cluster + Statistic: Average + Period: '60' + EvaluationPeriods: '3' + Threshold: !Ref CPUAlarmThreshold + ComparisonOperator: GreaterThanThreshold + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + + MemoryAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub ${AWS::StackName} average memory utilization greater than threshold. + AlarmDescription: Alarm if memory utilization is greater than threshold. + Namespace: AWS/ECS + MetricName: MemoryUtilization + Dimensions: + - Name: ClusterName + Value: !Ref Cluster + Statistic: Average + Period: '60' + EvaluationPeriods: '3' + Threshold: !Ref MemoryAlarmThreshold + ComparisonOperator: GreaterThanThreshold + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + + HealthyHostAlarm: + Type: 'AWS::CloudWatch::Alarm' + Properties: + AlarmName: !Sub ${AWS::StackName} alarm no healthy hosts connected to ELB. + AlarmDescription: Alarm if no healthy hosts connected to ELB. + MetricName: HealthyHostCount + Namespace: AWS/NetworkELB + Statistic: Average + Period: '60' + EvaluationPeriods: '3' + Threshold: '1' + ComparisonOperator: LessThanThreshold + Dimensions: + - Name: TargetGroup + Value: !GetAtt NodeTargetGroup.TargetGroupFullName + - Name: LoadBalancer + Value: !GetAtt NodeLoadBalancer.LoadBalancerFullName + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + +Outputs: + ClusterName: + Value: !Ref Cluster + NodeService: + Value: !Ref NodeECSService + NodePipelineUrl: + Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${NodePipeline} + NodeTargetGroup: + Value: !Ref NodeTargetGroup + NodeServiceUrl: + Description: URL of the load balancer for the node service. + Value: !Sub http://${NodeLoadBalancer.DNSName} diff --git a/infra/mev-geth-nodes-x86-64.yaml b/infra/mev-geth-nodes-x86-64.yaml new file mode 100644 index 000000000000..bf7a196caa52 --- /dev/null +++ b/infra/mev-geth-nodes-x86-64.yaml @@ -0,0 +1,972 @@ +--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: > + This template creates an automated continuous deployment pipeline to Amazon Elastic Container Service (ECS) + Created by Luke Youngblood, luke@blockscale.net + +Parameters: + # GitHub Parameters + + GitHubUser: + Type: String + Default: lyoungblood + Description: Your team or username on GitHub. + + NodeGitHubRepo: + Type: String + Default: mev-geth + Description: The repo name of the node service. + + NodeGitHubBranch: + Type: String + Default: master + Description: The branch of the node repo to continuously deploy. + + GitHubToken: + Type: String + NoEcho: true + Description: > + Token for the team or user specified above. (https://github.com/settings/tokens) + + # VPC Parameters + + VPC: + Type: AWS::EC2::VPC::Id + + Subnets: + Type: List + + VpcCIDR: + Type: String + Default: 172.31.0.0/16 + + # ECS Parameters + + InstanceType: + Type: String + Default: i3en.large + + MemoryLimit: + Type: Number + Default: 6144 + + KeyPair: + Type: AWS::EC2::KeyPair::KeyName + + SpotPrice: + Type: Number + Default: 0.0904 + + ClusterSize: + Type: Number + Default: 5 + + Bandwidth: + Type: Number + Default: 2048 + + BandwidthCeiling: + Type: Number + Default: 4096 + + NodeDesiredCount: + Type: Number + Default: 0 + + NodeTaskName: + Type: String + Default: mev-geth-node + + ECSAMI: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id + + # SNS Parameters + + SNSSubscriptionEndpoint: + Type: String + Default: https://events.pagerduty.com/integration/44cbdb66f22b4f3caf5dd15741c7eb17/enqueue + + SNSSubscriptionProtocol: + Type: String + Default: HTTPS + + # CloudWatch Alarm Parameters + + CPUAlarmThreshold: + Type: Number + Default: 80 + + MemoryAlarmThreshold: + Type: Number + Default: 80 + + # Mev-Geth Parameters + + Network: + Type: String + Default: mainnet + AllowedValues: + - mainnet + - goerli + + SyncMode: + Type: String + Default: fast + AllowedValues: + - full + - fast + - light + + Connections: + Type: Number + Default: 50 + + RpcPort: + Type: Number + Default: 8545 + + WsPort: + Type: Number + Default: 8546 + + NetPort: + Type: Number + Default: 30303 + +Metadata: + AWS::CloudFormation::Interface: + ParameterLabels: + GitHubUser: + default: "User" + NodeGitHubRepo: + default: "Node Repo" + NodeGitHubBranch: + default: "Node Branch" + GitHubToken: + default: "Personal Access Token" + VPC: + default: "Choose which VPC the autoscaling group should be deployed to" + Subnets: + default: "Choose which subnets the autoscaling group should be deployed to" + VpcCIDR: + default: "VPC CIDR Block" + InstanceType: + default: "Which instance type should we use to build the ECS cluster?" + MemoryLimit: + default: "How much memory should be reserved for each task. Set to greater than 50% of instance memory capacity." + KeyPair: + default: "Which keypair should be used to allow SSH to the nodes?" + ClusterSize: + default: "How many ECS hosts do you want to initially deploy?" + SpotPrice: + default: "The maximum spot price to pay for instances - this should normally be set to the on demand price." + Bandwidth: + default: "How much bandwidth, in kb/sec., should be allocated to Ethereum peers (upload) per EC2 instance" + BandwidthCeiling: + default: "How much bandwidth, in kb/sec., should be allocated to Ethereum peers as a ceiling (max. upload)" + NodeDesiredCount: + default: "How many ECS Tasks do you want to initially execute?" + NodeTaskName: + default: "The name of the node ECS Task" + ECSAMI: + default: "The ECS AMI ID populated from SSM." + Network: + default: "The Ethereum network you will be connecting to" + SyncMode: + default: "The synchronization mode that Mev-Geth should use (full, fast, or light)" + Connections: + default: "The number of desired connections on the Mev-Geth node" + RpcPort: + default: "The RPC port used for communication with the local Mev-Geth node" + WsPort: + default: "The Websockets port used for communication with the local Mev-Geth node" + NetPort: + default: "The TCP port used for connectivity to other Ethereum peer nodes" + ParameterGroups: + - Label: + default: GitHub Configuration + Parameters: + - NodeGitHubRepo + - NodeGitHubBranch + - GitHubUser + - GitHubToken + - Label: + default: VPC Configuration + Parameters: + - VPC + - Subnets + - VpcCIDR + - Label: + default: ECS Configuration + Parameters: + - InstanceType + - MemoryLimit + - KeyPair + - SpotPrice + - ClusterSize + - Bandwidth + - BandwidthCeiling + - NodeDesiredCount + - NodeTaskName + - ECSAMI + - Label: + default: Mev-Geth Configuration + Parameters: + - Network + - SyncMode + - Connections + - RpcPort + - WsPort + - NetPort + - Label: + default: PagerDuty Endpoint Configuration + Parameters: + - SNSSubscriptionEndpoint + - SNSSubscriptionProtocol + - Label: + default: CloudWatch Alarms Configuration + Parameters: + - CPUAlarmThreshold + - MemoryAlarmThreshold + +# Mappings + +Mappings: + RegionMap: + us-east-2: + mainnet: mev-geth-updater-fast-chainbucket-17p2xhnhcydlz + goerli: mev-geth-updater-fast-goerli-chainbucket-j6dujg8apbna + #us-west-2: + # mainnet: + # goerli: + +Resources: + # ECS Resources + + Cluster: + Type: AWS::ECS::Cluster + Properties: + ClusterName: !Ref AWS::StackName + + SecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: !Sub ${AWS::StackName}-sg + VpcId: !Ref VPC + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref RpcPort + ToPort: !Ref RpcPort + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref WsPort + ToPort: !Ref WsPort + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + + ECSAutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + VPCZoneIdentifier: !Ref Subnets + LaunchConfigurationName: !Ref ECSLaunchConfiguration + MinSize: !Ref ClusterSize + MaxSize: !Ref ClusterSize + DesiredCapacity: !Ref ClusterSize + Tags: + - Key: Name + Value: !Sub ${AWS::StackName} ECS host + PropagateAtLaunch: true + CreationPolicy: + ResourceSignal: + Timeout: PT15M + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: 2 + MaxBatchSize: 1 + PauseTime: PT15M + SuspendProcesses: + - HealthCheck + - ReplaceUnhealthy + - AZRebalance + - AlarmNotification + - ScheduledActions + WaitOnResourceSignals: true + + ECSLaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ECSAMI + InstanceType: !Ref InstanceType + KeyName: !Ref KeyPair + AssociatePublicIpAddress: True + # Uncomment if you would like to use Spot instances (subject to unexpected termination) + # SpotPrice: !Ref SpotPrice + SecurityGroups: + - !Ref SecurityGroup + IamInstanceProfile: !Ref ECSInstanceProfile + UserData: + "Fn::Base64": !Sub | + #!/bin/bash + yum install -y aws-cfn-bootstrap hibagent rsync awscli + yum update -y + service amazon-ssm-agent restart + + # determine if we have an NVMe SSD attached + find /dev/nvme1 + if [ $? -eq 0 ] + then + mount_point=/var/lib/docker + + # copy existing files from mount point + service docker stop + echo 'DOCKER_STORAGE_OPTIONS="--storage-driver overlay2"' > /etc/sysconfig/docker-storage + mkdir -p /tmp$mount_point + rsync -val $mount_point/ /tmp/$mount_point/ + + # make a new filesystem and mount it + mkfs -t ext4 /dev/nvme1n1 + mkdir -p $mount_point + mount -t ext4 -o noatime /dev/nvme1n1 $mount_point + + # Copy files back to new mount point + rsync -val /tmp/$mount_point/ $mount_point/ + rm -rf /tmp$mount_point + service docker start + + # Make raid appear on reboot + echo >> /etc/fstab + echo "/dev/nvme1n1 $mount_point ext4 noatime 0 0" | tee -a /etc/fstab + fi + + # Set Linux traffic control to limit outbound bandwidth usage of peering + #tc qdisc add dev eth0 root handle 1:0 htb default 1 + #tc class add dev eth0 parent 1:0 classid 1:10 htb rate ${Bandwidth}kbit ceil {BandwidthCeiling}kbit prio 0 + #tc filter add dev eth0 protocol ip parent 1:0 prio 1 u32 match ip dport 30303 0xffff flowid 1:10 + + /opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + /opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSAutoScalingGroup + /usr/bin/enable-ec2-spot-hibernation + + # Attach an EIP from the pool of available EIPs in scope "vpc" + alloc=`aws ec2 describe-addresses --region ${AWS::Region} --output text | grep -v eni | head -1 | cut -f 2` + instanceid=`curl --silent 169.254.169.254/latest/meta-data/instance-id` + aws ec2 associate-address --region ${AWS::Region} --allocation-id $alloc --instance-id $instanceid + echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=5m" >> /etc/ecs/ecs.config + + reboot + + Metadata: + AWS::CloudFormation::Init: + config: + packages: + yum: + awslogs: [] + + commands: + 01_add_instance_to_cluster: + command: !Sub echo ECS_CLUSTER=${Cluster} >> /etc/ecs/ecs.config + files: + "/etc/cfn/cfn-hup.conf": + mode: 000400 + owner: root + group: root + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.ECSLaunchConfiguration.Metadata.AWS::CloudFormation::Init + action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + + services: + sysvinit: + cfn-hup: + enabled: true + ensureRunning: true + files: + - /etc/cfn/cfn-hup.conf + - /etc/cfn/hooks.d/cfn-auto-reloader.conf + + NodeLoadBalancer: + Type: AWS::ElasticLoadBalancingV2::LoadBalancer + Properties: + Name: !Sub ${AWS::StackName}-node-NLB + Type: network + Scheme: internal + Subnets: !Ref Subnets + Tags: + - Key: Name + Value: !Sub ${AWS::StackName}-node-NLB + + NodeTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + DependsOn: NodeLoadBalancer + Properties: + VpcId: !Ref VPC + Port: !Ref RpcPort + Protocol: TCP + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 120 + + NodeListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref NodeTargetGroup + LoadBalancerArn: !Ref NodeLoadBalancer + Port: !Ref RpcPort + Protocol: TCP + + NodeWsTargetGroup: + Type: AWS::ElasticLoadBalancingV2::TargetGroup + DependsOn: NodeLoadBalancer + Properties: + VpcId: !Ref VPC + Port: !Ref WsPort + Protocol: TCP + TargetGroupAttributes: + - Key: deregistration_delay.timeout_seconds + Value: 120 + + NodeWsListener: + Type: AWS::ElasticLoadBalancingV2::Listener + Properties: + DefaultActions: + - Type: forward + TargetGroupArn: !Ref NodeWsTargetGroup + LoadBalancerArn: !Ref NodeLoadBalancer + Port: !Ref WsPort + Protocol: TCP + + # This IAM Role is attached to all of the ECS hosts. It is based on the default role + # published here: + # http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html + # + # You can add other IAM policy statements here to allow access from your ECS hosts + # to other AWS services. + + ECSRole: + Type: AWS::IAM::Role + Properties: + Path: / + RoleName: !Sub ${AWS::StackName}-ECSRole-${AWS::Region} + AssumeRolePolicyDocument: | + { + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + }] + } + Policies: + - PolicyName: ecs-service + PolicyDocument: | + { + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:Submit*", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken", + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetManifest", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:PutParameter", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation", + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply", + "cloudwatch:PutMetricData", + "ec2:DescribeInstanceStatus", + "ds:CreateComputer", + "ds:DescribeDirectories", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:AbortMultipartUpload", + "s3:ListMultipartUploadParts", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "firehose:PutRecord", + "firehose:PutRecordBatch", + "ec2:DescribeAddresses", + "ec2:DescribeInstances", + "ec2:AssociateAddress" + ], + "Resource": "*" + }] + } + + ECSInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref ECSRole + + ECSServiceAutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + Action: + - "sts:AssumeRole" + Effect: Allow + Principal: + Service: + - application-autoscaling.amazonaws.com + Path: / + Policies: + - PolicyName: ecs-service-autoscaling + PolicyDocument: + Statement: + Effect: Allow + Action: + - application-autoscaling:* + - cloudwatch:DescribeAlarms + - cloudwatch:PutMetricAlarm + - ecs:DescribeServices + - ecs:UpdateService + Resource: "*" + + NodeTaskExecutionRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: ecs-tasks.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + Policies: + - PolicyName: !Sub ecs-task-S3-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - "s3:Get*" + - "s3:List*" + Resource: + - Fn::Join: + - "" + - - "arn:aws:s3:::" + - !FindInMap + - RegionMap + - !Ref "AWS::Region" + - !Ref Network + + NodeLogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub /ecs/${AWS::StackName}-node + RetentionInDays: 14 + + NodeECSService: + Type: AWS::ECS::Service + DependsOn: NodeListener + Properties: + Cluster: !Ref Cluster + DesiredCount: !Ref NodeDesiredCount + HealthCheckGracePeriodSeconds: 3600 + TaskDefinition: !Ref NodeTaskDefinition + LaunchType: EC2 + DeploymentConfiguration: + MaximumPercent: 150 + MinimumHealthyPercent: 50 + LoadBalancers: + - ContainerName: !Ref NodeTaskName + ContainerPort: !Ref RpcPort + TargetGroupArn: !Ref NodeTargetGroup + - ContainerName: !Ref NodeTaskName + ContainerPort: !Ref WsPort + TargetGroupArn: !Ref NodeWsTargetGroup + + NodeTaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Ref NodeTaskName + RequiresCompatibilities: + - EC2 + NetworkMode: host + ExecutionRoleArn: !Ref NodeTaskExecutionRole + ContainerDefinitions: + - Name: !Ref NodeTaskName + Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${NodeRepository} + Essential: true + MemoryReservation: !Ref MemoryLimit + Environment: + - Name: "region" + Value: !Ref AWS::Region + - Name: "network" + Value: !Ref Network + - Name: "syncmode" + Value: !Ref SyncMode + - Name: "connections" + Value: !Ref Connections + - Name: "rpcport" + Value: !Ref RpcPort + - Name: "wsport" + Value: !Ref WsPort + - Name: "netport" + Value: !Ref NetPort + - Name: "chainbucket" + Value: !FindInMap + - RegionMap + - !Ref "AWS::Region" + - !Ref Network + - Name: "s3key" + Value: node + PortMappings: + - ContainerPort: !Ref RpcPort + - ContainerPort: !Ref WsPort + - ContainerPort: !Ref NetPort + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWS::Region + awslogs-group: !Ref NodeLogGroup + awslogs-stream-prefix: !Ref AWS::StackName + #HealthCheck: + # Command: + # - CMD-SHELL + # - '[ `echo "eth.syncing.highestBlock - eth.syncing.currentBlock"|geth attach|head -10|tail -1` -lt 200 ] || exit 1' + # Interval: 300 + # Timeout: 60 + # Retries: 10 + # StartPeriod: 300 + + # CodePipeline Resources + + NodeRepository: + Type: AWS::ECR::Repository + + NodeCodeBuildServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: "*" + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ecr:GetAuthorizationToken + - Resource: !Sub arn:aws:s3:::${NodeArtifactBucket}/* + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + - Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${NodeRepository} + Effect: Allow + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + + NodeCodePipelineServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: + - !Sub arn:aws:s3:::${NodeArtifactBucket}/* + Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketVersioning + - Resource: "*" + Effect: Allow + Action: + - ecs:DescribeServices + - ecs:DescribeTaskDefinition + - ecs:DescribeTasks + - ecs:ListTasks + - ecs:RegisterTaskDefinition + - ecs:UpdateService + - codebuild:StartBuild + - codebuild:BatchGetBuilds + - iam:PassRole + + NodeArtifactBucket: + Type: AWS::S3::Bucket + + NodeCodeBuildProject: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: CODEPIPELINE + Source: + Type: CODEPIPELINE + BuildSpec: | + version: 0.2 + phases: + install: + runtime-versions: + docker: 19 + pre_build: + commands: + - $(aws ecr get-login --no-include-email) + - TAG="$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)" + - IMAGE_URI="${REPOSITORY_URI}:${TAG}" + - cp infra/Dockerfile.node ./Dockerfile + build: + commands: + - docker build --tag "$IMAGE_URI" . + - docker build --tag "${REPOSITORY_URI}:latest" . + post_build: + commands: + - docker push "$IMAGE_URI" + - docker push "${REPOSITORY_URI}:latest" + - printf '[{"name":"mev-geth-node","imageUri":"%s"}]' "$IMAGE_URI" > images.json + artifacts: + files: images.json + Environment: + ComputeType: BUILD_GENERAL1_SMALL + Image: aws/codebuild/docker:17.09.0 + Type: LINUX_CONTAINER + PrivilegedMode: true + EnvironmentVariables: + - Name: AWS_DEFAULT_REGION + Value: !Ref AWS::Region + - Name: REPOSITORY_URI + Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${NodeRepository} + Cache: + Type: S3 + Location: !Sub ${NodeArtifactBucket}/buildcache + Name: !Sub ${AWS::StackName}-node + ServiceRole: !Ref NodeCodeBuildServiceRole + + NodePipeline: + Type: AWS::CodePipeline::Pipeline + Properties: + RoleArn: !GetAtt NodeCodePipelineServiceRole.Arn + ArtifactStore: + Type: S3 + Location: !Ref NodeArtifactBucket + Stages: + - Name: Source + Actions: + - Name: App + ActionTypeId: + Category: Source + Owner: ThirdParty + Version: 1 + Provider: GitHub + Configuration: + Owner: !Ref GitHubUser + Repo: !Ref NodeGitHubRepo + Branch: !Ref NodeGitHubBranch + OAuthToken: !Ref GitHubToken + OutputArtifacts: + - Name: App + RunOrder: 1 + - Name: Build + Actions: + - Name: Build + ActionTypeId: + Category: Build + Owner: AWS + Version: 1 + Provider: CodeBuild + Configuration: + ProjectName: !Ref NodeCodeBuildProject + InputArtifacts: + - Name: App + OutputArtifacts: + - Name: BuildOutput + RunOrder: 1 + - Name: Deploy + Actions: + - Name: Deploy + ActionTypeId: + Category: Deploy + Owner: AWS + Version: 1 + Provider: ECS + Configuration: + ClusterName: !Ref Cluster + ServiceName: !Ref NodeECSService + FileName: images.json + InputArtifacts: + - Name: BuildOutput + RunOrder: 1 + + # SNS Resources + + SNSTopic: + Type: AWS::SNS::Topic + Properties: + DisplayName: String + Subscription: + - Endpoint: !Ref SNSSubscriptionEndpoint + Protocol: !Ref SNSSubscriptionProtocol + TopicName: !Ref AWS::StackName + + # CloudWatch Resources + + CPUAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub ${AWS::StackName} average CPU utilization greater than threshold. + AlarmDescription: Alarm if CPU utilization is greater than threshold. + Namespace: AWS/ECS + MetricName: CPUUtilization + Dimensions: + - Name: ClusterName + Value: !Ref Cluster + Statistic: Average + Period: "60" + EvaluationPeriods: "3" + Threshold: !Ref CPUAlarmThreshold + ComparisonOperator: GreaterThanThreshold + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + + MemoryAlarm: + Type: AWS::CloudWatch::Alarm + Properties: + AlarmName: !Sub ${AWS::StackName} average memory utilization greater than threshold. + AlarmDescription: Alarm if memory utilization is greater than threshold. + Namespace: AWS/ECS + MetricName: MemoryUtilization + Dimensions: + - Name: ClusterName + Value: !Ref Cluster + Statistic: Average + Period: "60" + EvaluationPeriods: "3" + Threshold: !Ref MemoryAlarmThreshold + ComparisonOperator: GreaterThanThreshold + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + + HealthyHostAlarm: + Type: "AWS::CloudWatch::Alarm" + Properties: + AlarmName: !Sub ${AWS::StackName} alarm no healthy hosts connected to ELB. + AlarmDescription: Alarm if no healthy hosts connected to ELB. + MetricName: HealthyHostCount + Namespace: AWS/NetworkELB + Statistic: Average + Period: "60" + EvaluationPeriods: "3" + Threshold: "1" + ComparisonOperator: LessThanThreshold + Dimensions: + - Name: TargetGroup + Value: !GetAtt NodeTargetGroup.TargetGroupFullName + - Name: LoadBalancer + Value: !GetAtt NodeLoadBalancer.LoadBalancerFullName + AlarmActions: + - Ref: SNSTopic + OKActions: + - Ref: SNSTopic + +Outputs: + ClusterName: + Value: !Ref Cluster + NodeService: + Value: !Ref NodeECSService + NodePipelineUrl: + Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${NodePipeline} + NodeTargetGroup: + Value: !Ref NodeTargetGroup + NodeServiceUrl: + Description: URL of the load balancer for the node service. + Value: !Sub http://${NodeLoadBalancer.DNSName} diff --git a/infra/mev-geth-updater-arm64.yaml b/infra/mev-geth-updater-arm64.yaml new file mode 100644 index 000000000000..ad81ece1b034 --- /dev/null +++ b/infra/mev-geth-updater-arm64.yaml @@ -0,0 +1,749 @@ +--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: > + This template creates an automated continuous deployment pipeline to Amazon Elastic Container Service (ECS) + Created by Luke Youngblood, luke@blockscale.net + +Parameters: + +# GitHub Parameters + + GitHubUser: + Type: String + Default: lyoungblood + Description: Your team or username on GitHub. + + GitHubRepo: + Type: String + Default: mev-geth + Description: The repo name of the baker service. + + GitHubBranch: + Type: String + Default: master + Description: The branch of the repo to continuously deploy. + + GitHubToken: + Type: String + NoEcho: true + Description: > + Token for the team or user specified above. (https://github.com/settings/tokens) + +# VPC Parameters + + VPC: + Type: AWS::EC2::VPC::Id + + Subnets: + Type: List + + VpcCIDR: + Type: String + Default: 172.31.0.0/16 + +# ECS Parameters + + InstanceType: + Type: String + Default: m6gd.large + + KeyPair: + Type: AWS::EC2::KeyPair::KeyName + + ClusterSize: + Type: Number + Default: 1 + + DesiredCount: + Type: Number + Default: 0 + + TaskName: + Type: String + Default: mev-geth-updater + + ECSAMI: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ecs/optimized-ami/amazon-linux-2/arm64/recommended/image_id + +# Mev-Geth Parameters + + Network: + Type: String + Default: mainnet + AllowedValues: + - mainnet + - goerli + + SyncMode: + Type: String + Default: fast + AllowedValues: + - full + - fast + - light + + Connections: + Type: Number + Default: 50 + + NetPort: + Type: Number + Default: 30303 + +Metadata: + + AWS::CloudFormation::Interface: + ParameterLabels: + GitHubUser: + default: "User" + GitHubRepo: + default: "Mev-Geth GitHub Repository" + GitHubBranch: + default: "Branch in GitHub repository" + GitHubToken: + default: "Personal Access Token" + VPC: + default: "Choose which VPC the autoscaling group should be deployed to" + Subnets: + default: "Choose which subnets the autoscaling group should be deployed to" + VpcCIDR: + default: "VPC CIDR Block" + InstanceType: + default: "Which instance type should we use to build the ECS cluster?" + KeyPair: + default: "Which keypair should be used for access to the ECS cluster?" + ClusterSize: + default: "How many ECS hosts do you want to initially deploy?" + DesiredCount: + default: "How many Updater tasks do you want to initially execute?" + TaskName: + default: "The name of the Updater ECS Task" + ECSAMI: + default: "The ECS AMI ID populated from SSM." + Network: + default: "The network the Mev-Geth node should join" + SyncMode: + default: "The synchronization mode that Mev-Geth should use (full, fast, or light)" + Connections: + default: "The number of connections the Mev-Geth node should be configured with" + NetPort: + default: "The TCP/UDP port used for Mev-Geth connectivity to other Ethereum peer nodes" + ParameterGroups: + - Label: + default: GitHub Configuration + Parameters: + - GitHubRepo + - GitHubBranch + - GitHubUser + - GitHubToken + - Label: + default: VPC Configuration + Parameters: + - VPC + - Subnets + - VpcCIDR + - Label: + default: ECS Configuration + Parameters: + - InstanceType + - KeyPair + - ClusterSize + - DesiredCount + - TaskName + - ECSAMI + - Label: + default: Mev-Geth Configuration + Parameters: + - Network + - SyncMode + - Connections + - NetPort + +Resources: + +# ECS Resources + + ChainBucket: + Type: AWS::S3::Bucket + + ChainBucketPolicy: + Type: AWS::S3::BucketPolicy + Properties: + Bucket: !Ref ChainBucket + PolicyDocument: + Statement: + - + Action: + - s3:GetObject + - s3:ListBucket + Effect: Allow + Resource: + - Fn::Join: + - "" + - + - "arn:aws:s3:::" + - + Ref: "ChainBucket" + - "/*" + - Fn::Join: + - "" + - + - "arn:aws:s3:::" + - + Ref: "ChainBucket" + Principal: + AWS: "*" + + Cluster: + Type: AWS::ECS::Cluster + Properties: + ClusterName: !Ref AWS::StackName + + SecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: !Sub ${AWS::StackName}-sg + VpcId: !Ref VPC + Tags: + - + Key: Name + Value: !Sub ${AWS::StackName}-sg + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + + ECSAutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + VPCZoneIdentifier: !Ref Subnets + LaunchConfigurationName: !Ref ECSLaunchConfiguration + MinSize: !Ref ClusterSize + MaxSize: !Ref ClusterSize + DesiredCapacity: !Ref ClusterSize + Tags: + - Key: Name + Value: !Sub ${AWS::StackName} ECS host + PropagateAtLaunch: true + CreationPolicy: + ResourceSignal: + Timeout: PT15M + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: 0 + MaxBatchSize: 1 + PauseTime: PT15M + SuspendProcesses: + - HealthCheck + - ReplaceUnhealthy + - AZRebalance + - AlarmNotification + - ScheduledActions + WaitOnResourceSignals: true + + ECSLaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ECSAMI + InstanceType: !Ref InstanceType + KeyName: !Ref KeyPair + SecurityGroups: + - !Ref SecurityGroup + IamInstanceProfile: !Ref ECSInstanceProfile + UserData: + "Fn::Base64": !Sub | + #!/bin/bash + yum install -y aws-cfn-bootstrap hibagent rsync awscli + yum update -y + service amazon-ssm-agent restart + + # determine if we have an NVMe SSD attached + find /dev/nvme1 + if [ $? -eq 0 ] + then + mount_point=/var/lib/docker + + # copy existing files from mount point + service docker stop + echo 'DOCKER_STORAGE_OPTIONS="--storage-driver overlay2"' > /etc/sysconfig/docker-storage + mkdir -p /tmp$mount_point + rsync -val $mount_point/ /tmp/$mount_point/ + + # make a new filesystem and mount it + mkfs -t ext4 /dev/nvme1n1 + mkdir -p $mount_point + mount -t ext4 -o noatime /dev/nvme1n1 $mount_point + + # Copy files back to new mount point + rsync -val /tmp/$mount_point/ $mount_point/ + rm -rf /tmp$mount_point + service docker start + + # Make raid appear on reboot + echo >> /etc/fstab + echo "/dev/nvme1n1 $mount_point ext4 noatime 0 0" | tee -a /etc/fstab + fi + + /opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + /opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSAutoScalingGroup + /usr/bin/enable-ec2-spot-hibernation + echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=5m" >> /etc/ecs/ecs.config + + reboot + + Metadata: + AWS::CloudFormation::Init: + config: + packages: + yum: + awslogs: [] + + commands: + 01_add_instance_to_cluster: + command: !Sub echo ECS_CLUSTER=${Cluster} >> /etc/ecs/ecs.config + files: + "/etc/cfn/cfn-hup.conf": + mode: 000400 + owner: root + group: root + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.ECSLaunchConfiguration.Metadata.AWS::CloudFormation::Init + action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + + "/etc/awslogs/awscli.conf": + content: !Sub | + [plugins] + cwlogs = cwlogs + [default] + region = ${AWS::Region} + + services: + sysvinit: + cfn-hup: + enabled: true + ensureRunning: true + files: + - /etc/cfn/cfn-hup.conf + - /etc/cfn/hooks.d/cfn-auto-reloader.conf + + # This IAM Role is attached to all of the ECS hosts. It is based on the default role + # published here: + # http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html + # + # You can add other IAM policy statements here to allow access from your ECS hosts + # to other AWS services. + + ECSRole: + Type: AWS::IAM::Role + Properties: + Path: / + RoleName: !Sub ${AWS::StackName}-ECSRole-${AWS::Region} + AssumeRolePolicyDocument: | + { + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + }] + } + Policies: + - PolicyName: ecs-service + PolicyDocument: | + { + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:Submit*", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken", + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetManifest", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:PutParameter", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation", + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply", + "cloudwatch:PutMetricData", + "ec2:DescribeInstanceStatus", + "ds:CreateComputer", + "ds:DescribeDirectories", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "s3:*" + ], + "Resource": "*" + }] + } + + ECSInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref ECSRole + + ECSServiceAutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + Action: + - 'sts:AssumeRole' + Effect: Allow + Principal: + Service: + - application-autoscaling.amazonaws.com + Path: / + Policies: + - PolicyName: ecs-service-autoscaling + PolicyDocument: + Statement: + Effect: Allow + Action: + - application-autoscaling:* + - cloudwatch:DescribeAlarms + - cloudwatch:PutMetricAlarm + - ecs:DescribeServices + - ecs:UpdateService + Resource: "*" + + TaskExecutionRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: ecs-tasks.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + Policies: + - PolicyName: !Sub ecs-task-S3-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - + Effect: Allow + Action: + - "s3:Get*" + - "s3:List*" + - "s3:Put*" + Resource: + - !GetAtt ChainBucket.Arn + - PolicyName: !Sub ecs-task-SSM-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - + Effect: Allow + Action: + - "ssm:DescribeParameters" + - "ssm:PutParameter" + - "ssm:GetParameters" + Resource: + - !Sub "arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/${AWS::StackName}/*" + + LogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub /ecs/${AWS::StackName} + RetentionInDays: 14 + + ECSService: + Type: AWS::ECS::Service + Properties: + Cluster: !Ref Cluster + DesiredCount: !Ref DesiredCount + TaskDefinition: !Ref TaskDefinition + LaunchType: EC2 + DeploymentConfiguration: + MaximumPercent: 100 + MinimumHealthyPercent: 0 + + TaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Sub ${AWS::StackName}-${TaskName} + RequiresCompatibilities: + - EC2 + NetworkMode: host + ExecutionRoleArn: !Ref TaskExecutionRole + ContainerDefinitions: + - Name: !Ref TaskName + Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository} + Essential: true + MemoryReservation: 6144 + Environment: + - Name: "network" + Value: !Ref Network + - Name: "syncmode" + Value: !Ref SyncMode + - Name: "connections" + Value: !Ref Connections + - Name: "netport" + Value: !Ref NetPort + - Name: "region" + Value: !Ref AWS::Region + - Name: "chainbucket" + Value: !Ref ChainBucket + - Name: "s3key" + Value: node + PortMappings: + - ContainerPort: !Ref NetPort + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWS::Region + awslogs-group: !Ref LogGroup + awslogs-stream-prefix: !Ref AWS::StackName + +# CodePipeline Resources + + Repository: + Type: AWS::ECR::Repository + + CodeBuildServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: "*" + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ecr:GetAuthorizationToken + - Resource: !Sub arn:aws:s3:::${ArtifactBucket}/* + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + - Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${Repository} + Effect: Allow + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + + CodePipelineServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: + - !Sub arn:aws:s3:::${ArtifactBucket}/* + Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketVersioning + - Resource: "*" + Effect: Allow + Action: + - ecs:DescribeServices + - ecs:DescribeTaskDefinition + - ecs:DescribeTasks + - ecs:ListTasks + - ecs:RegisterTaskDefinition + - ecs:UpdateService + - codebuild:StartBuild + - codebuild:BatchGetBuilds + - iam:PassRole + + ArtifactBucket: + Type: AWS::S3::Bucket + + CodeBuildProject: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: CODEPIPELINE + Source: + Type: CODEPIPELINE + BuildSpec: | + version: 0.2 + phases: + install: + runtime-versions: + docker: 19 + pre_build: + commands: + - $(aws ecr get-login --no-include-email) + - TAG="$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)" + - IMAGE_URI="${REPOSITORY_URI}:${TAG}" + - cp infra/Dockerfile.updater ./Dockerfile + build: + commands: + - docker build --tag "$IMAGE_URI" . + - docker build --tag "${REPOSITORY_URI}:latest" . + post_build: + commands: + - docker push "$IMAGE_URI" + - docker push "${REPOSITORY_URI}:latest" + - printf '[{"name":"mev-geth-updater","imageUri":"%s"}]' "$IMAGE_URI" > images.json + artifacts: + files: images.json + Environment: + ComputeType: BUILD_GENERAL1_LARGE + Image: aws/codebuild/amazonlinux2-aarch64-standard:1.0 + Type: ARM_CONTAINER + PrivilegedMode: true + EnvironmentVariables: + - Name: AWS_DEFAULT_REGION + Value: !Ref AWS::Region + - Name: REPOSITORY_URI + Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository} + Name: !Ref AWS::StackName + ServiceRole: !Ref CodeBuildServiceRole + + Pipeline: + Type: AWS::CodePipeline::Pipeline + Properties: + RoleArn: !GetAtt CodePipelineServiceRole.Arn + ArtifactStore: + Type: S3 + Location: !Ref ArtifactBucket + Stages: + - Name: Source + Actions: + - Name: App + ActionTypeId: + Category: Source + Owner: ThirdParty + Version: 1 + Provider: GitHub + Configuration: + Owner: !Ref GitHubUser + Repo: !Ref GitHubRepo + Branch: !Ref GitHubBranch + OAuthToken: !Ref GitHubToken + OutputArtifacts: + - Name: App + RunOrder: 1 + - Name: Build + Actions: + - Name: Build + ActionTypeId: + Category: Build + Owner: AWS + Version: 1 + Provider: CodeBuild + Configuration: + ProjectName: !Ref CodeBuildProject + InputArtifacts: + - Name: App + OutputArtifacts: + - Name: BuildOutput + RunOrder: 1 + - Name: Deploy + Actions: + - Name: Deploy + ActionTypeId: + Category: Deploy + Owner: AWS + Version: 1 + Provider: ECS + Configuration: + ClusterName: !Ref Cluster + ServiceName: !Ref ECSService + FileName: images.json + InputArtifacts: + - Name: BuildOutput + RunOrder: 1 + +Outputs: + + ClusterName: + Value: !Ref Cluster + Service: + Value: !Ref ECSService + PipelineUrl: + Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${Pipeline} \ No newline at end of file diff --git a/infra/mev-geth-updater-x86-64.yaml b/infra/mev-geth-updater-x86-64.yaml new file mode 100644 index 000000000000..a69d1bb10d18 --- /dev/null +++ b/infra/mev-geth-updater-x86-64.yaml @@ -0,0 +1,737 @@ +--- +AWSTemplateFormatVersion: 2010-09-09 + +Description: > + This template creates an automated continuous deployment pipeline to Amazon Elastic Container Service (ECS) + Created by Luke Youngblood, luke@blockscale.net + +Parameters: + # GitHub Parameters + + GitHubUser: + Type: String + Default: lyoungblood + Description: Your team or username on GitHub. + + GitHubRepo: + Type: String + Default: mev-geth + Description: The repo name of the baker service. + + GitHubBranch: + Type: String + Default: master + Description: The branch of the repo to continuously deploy. + + GitHubToken: + Type: String + NoEcho: true + Description: > + Token for the team or user specified above. (https://github.com/settings/tokens) + + # VPC Parameters + + VPC: + Type: AWS::EC2::VPC::Id + + Subnets: + Type: List + + VpcCIDR: + Type: String + Default: 172.31.0.0/16 + + # ECS Parameters + + InstanceType: + Type: String + Default: i3en.large + + KeyPair: + Type: AWS::EC2::KeyPair::KeyName + + ClusterSize: + Type: Number + Default: 1 + + DesiredCount: + Type: Number + Default: 0 + + TaskName: + Type: String + Default: mev-geth-updater + + ECSAMI: + Type: AWS::SSM::Parameter::Value + Default: /aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id + + # Mev-Geth Parameters + + Network: + Type: String + Default: mainnet + AllowedValues: + - mainnet + - goerli + + SyncMode: + Type: String + Default: fast + AllowedValues: + - full + - fast + - light + + Connections: + Type: Number + Default: 50 + + NetPort: + Type: Number + Default: 30303 + +Metadata: + AWS::CloudFormation::Interface: + ParameterLabels: + GitHubUser: + default: "User" + GitHubRepo: + default: "Mev-Geth GitHub Repository" + GitHubBranch: + default: "Branch in GitHub repository" + GitHubToken: + default: "Personal Access Token" + VPC: + default: "Choose which VPC the autoscaling group should be deployed to" + Subnets: + default: "Choose which subnets the autoscaling group should be deployed to" + VpcCIDR: + default: "VPC CIDR Block" + InstanceType: + default: "Which instance type should we use to build the ECS cluster?" + KeyPair: + default: "Which keypair should be used for access to the ECS cluster?" + ClusterSize: + default: "How many ECS hosts do you want to initially deploy?" + DesiredCount: + default: "How many Updater tasks do you want to initially execute?" + TaskName: + default: "The name of the Updater ECS Task" + ECSAMI: + default: "The ECS AMI ID populated from SSM." + Network: + default: "The network the Mev-Geth node should join" + SyncMode: + default: "The synchronization mode that Mev-Geth should use (full, fast, or light)" + Connections: + default: "The number of connections the Mev-Geth node should be configured with" + NetPort: + default: "The TCP/UDP port used for Mev-Geth connectivity to other Ethereum peer nodes" + ParameterGroups: + - Label: + default: GitHub Configuration + Parameters: + - GitHubRepo + - GitHubBranch + - GitHubUser + - GitHubToken + - Label: + default: VPC Configuration + Parameters: + - VPC + - Subnets + - VpcCIDR + - Label: + default: ECS Configuration + Parameters: + - InstanceType + - KeyPair + - ClusterSize + - DesiredCount + - TaskName + - ECSAMI + - Label: + default: Mev-Geth Configuration + Parameters: + - Network + - SyncMode + - Connections + - NetPort + +Resources: + # ECS Resources + + ChainBucket: + Type: AWS::S3::Bucket + + ChainBucketPolicy: + Type: AWS::S3::BucketPolicy + Properties: + Bucket: !Ref ChainBucket + PolicyDocument: + Statement: + - Action: + - s3:GetObject + - s3:ListBucket + Effect: Allow + Resource: + - Fn::Join: + - "" + - - "arn:aws:s3:::" + - Ref: "ChainBucket" + - "/*" + - Fn::Join: + - "" + - - "arn:aws:s3:::" + - Ref: "ChainBucket" + Principal: + AWS: "*" + + Cluster: + Type: AWS::ECS::Cluster + Properties: + ClusterName: !Ref AWS::StackName + + SecurityGroup: + Type: "AWS::EC2::SecurityGroup" + Properties: + GroupDescription: !Sub ${AWS::StackName}-sg + VpcId: !Ref VPC + Tags: + - Key: Name + Value: !Sub ${AWS::StackName}-sg + SecurityGroupIngress: + - IpProtocol: tcp + FromPort: 22 + ToPort: 22 + CidrIp: !Ref VpcCIDR + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: tcp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIp: 0.0.0.0/0 + - IpProtocol: udp + FromPort: !Ref NetPort + ToPort: !Ref NetPort + CidrIpv6: ::/0 + + ECSAutoScalingGroup: + Type: AWS::AutoScaling::AutoScalingGroup + Properties: + VPCZoneIdentifier: !Ref Subnets + LaunchConfigurationName: !Ref ECSLaunchConfiguration + MinSize: !Ref ClusterSize + MaxSize: !Ref ClusterSize + DesiredCapacity: !Ref ClusterSize + Tags: + - Key: Name + Value: !Sub ${AWS::StackName} ECS host + PropagateAtLaunch: true + CreationPolicy: + ResourceSignal: + Timeout: PT15M + UpdatePolicy: + AutoScalingRollingUpdate: + MinInstancesInService: 0 + MaxBatchSize: 1 + PauseTime: PT15M + SuspendProcesses: + - HealthCheck + - ReplaceUnhealthy + - AZRebalance + - AlarmNotification + - ScheduledActions + WaitOnResourceSignals: true + + ECSLaunchConfiguration: + Type: AWS::AutoScaling::LaunchConfiguration + Properties: + ImageId: !Ref ECSAMI + InstanceType: !Ref InstanceType + KeyName: !Ref KeyPair + SecurityGroups: + - !Ref SecurityGroup + IamInstanceProfile: !Ref ECSInstanceProfile + UserData: + "Fn::Base64": !Sub | + #!/bin/bash + yum install -y aws-cfn-bootstrap hibagent rsync awscli + yum update -y + service amazon-ssm-agent restart + + # determine if we have an NVMe SSD attached + find /dev/nvme1 + if [ $? -eq 0 ] + then + mount_point=/var/lib/docker + + # copy existing files from mount point + service docker stop + echo 'DOCKER_STORAGE_OPTIONS="--storage-driver overlay2"' > /etc/sysconfig/docker-storage + mkdir -p /tmp$mount_point + rsync -val $mount_point/ /tmp/$mount_point/ + + # make a new filesystem and mount it + mkfs -t ext4 /dev/nvme1n1 + mkdir -p $mount_point + mount -t ext4 -o noatime /dev/nvme1n1 $mount_point + + # Copy files back to new mount point + rsync -val /tmp/$mount_point/ $mount_point/ + rm -rf /tmp$mount_point + service docker start + + # Make raid appear on reboot + echo >> /etc/fstab + echo "/dev/nvme1n1 $mount_point ext4 noatime 0 0" | tee -a /etc/fstab + fi + + /opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + /opt/aws/bin/cfn-signal -e $? --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSAutoScalingGroup + /usr/bin/enable-ec2-spot-hibernation + echo "ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION=5m" >> /etc/ecs/ecs.config + + reboot + + Metadata: + AWS::CloudFormation::Init: + config: + packages: + yum: + awslogs: [] + + commands: + 01_add_instance_to_cluster: + command: !Sub echo ECS_CLUSTER=${Cluster} >> /etc/ecs/ecs.config + files: + "/etc/cfn/cfn-hup.conf": + mode: 000400 + owner: root + group: root + content: !Sub | + [main] + stack=${AWS::StackId} + region=${AWS::Region} + + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": + content: !Sub | + [cfn-auto-reloader-hook] + triggers=post.update + path=Resources.ECSLaunchConfiguration.Metadata.AWS::CloudFormation::Init + action=/opt/aws/bin/cfn-init -v --region ${AWS::Region} --stack ${AWS::StackName} --resource ECSLaunchConfiguration + + "/etc/awslogs/awscli.conf": + content: !Sub | + [plugins] + cwlogs = cwlogs + [default] + region = ${AWS::Region} + + services: + sysvinit: + cfn-hup: + enabled: true + ensureRunning: true + files: + - /etc/cfn/cfn-hup.conf + - /etc/cfn/hooks.d/cfn-auto-reloader.conf + + # This IAM Role is attached to all of the ECS hosts. It is based on the default role + # published here: + # http://docs.aws.amazon.com/AmazonECS/latest/developerguide/instance_IAM_role.html + # + # You can add other IAM policy statements here to allow access from your ECS hosts + # to other AWS services. + + ECSRole: + Type: AWS::IAM::Role + Properties: + Path: / + RoleName: !Sub ${AWS::StackName}-ECSRole-${AWS::Region} + AssumeRolePolicyDocument: | + { + "Statement": [{ + "Action": "sts:AssumeRole", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + } + }] + } + Policies: + - PolicyName: ecs-service + PolicyDocument: | + { + "Statement": [{ + "Effect": "Allow", + "Action": [ + "ecs:CreateCluster", + "ecs:DeregisterContainerInstance", + "ecs:DiscoverPollEndpoint", + "ecs:Poll", + "ecs:RegisterContainerInstance", + "ecs:StartTelemetrySession", + "ecs:Submit*", + "logs:CreateLogStream", + "logs:PutLogEvents", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken", + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:GetManifest", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:PutParameter", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation", + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply", + "cloudwatch:PutMetricData", + "ec2:DescribeInstanceStatus", + "ds:CreateComputer", + "ds:DescribeDirectories", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "s3:*" + ], + "Resource": "*" + }] + } + + ECSInstanceProfile: + Type: AWS::IAM::InstanceProfile + Properties: + Path: / + Roles: + - !Ref ECSRole + + ECSServiceAutoScalingRole: + Type: AWS::IAM::Role + Properties: + AssumeRolePolicyDocument: + Version: "2012-10-17" + Statement: + Action: + - "sts:AssumeRole" + Effect: Allow + Principal: + Service: + - application-autoscaling.amazonaws.com + Path: / + Policies: + - PolicyName: ecs-service-autoscaling + PolicyDocument: + Statement: + Effect: Allow + Action: + - application-autoscaling:* + - cloudwatch:DescribeAlarms + - cloudwatch:PutMetricAlarm + - ecs:DescribeServices + - ecs:UpdateService + Resource: "*" + + TaskExecutionRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Action: sts:AssumeRole + Effect: Allow + Principal: + Service: ecs-tasks.amazonaws.com + ManagedPolicyArns: + - arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy + Policies: + - PolicyName: !Sub ecs-task-S3-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - "s3:Get*" + - "s3:List*" + - "s3:Put*" + Resource: + - !GetAtt ChainBucket.Arn + - PolicyName: !Sub ecs-task-SSM-${AWS::StackName} + PolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Action: + - "ssm:DescribeParameters" + - "ssm:PutParameter" + - "ssm:GetParameters" + Resource: + - !Sub "arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter/${AWS::StackName}/*" + + LogGroup: + Type: AWS::Logs::LogGroup + Properties: + LogGroupName: !Sub /ecs/${AWS::StackName} + RetentionInDays: 14 + + ECSService: + Type: AWS::ECS::Service + Properties: + Cluster: !Ref Cluster + DesiredCount: !Ref DesiredCount + TaskDefinition: !Ref TaskDefinition + LaunchType: EC2 + DeploymentConfiguration: + MaximumPercent: 100 + MinimumHealthyPercent: 0 + + TaskDefinition: + Type: AWS::ECS::TaskDefinition + Properties: + Family: !Sub ${AWS::StackName}-${TaskName} + RequiresCompatibilities: + - EC2 + NetworkMode: host + ExecutionRoleArn: !Ref TaskExecutionRole + ContainerDefinitions: + - Name: !Ref TaskName + Image: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository} + Essential: true + MemoryReservation: 6144 + Environment: + - Name: "network" + Value: !Ref Network + - Name: "syncmode" + Value: !Ref SyncMode + - Name: "connections" + Value: !Ref Connections + - Name: "netport" + Value: !Ref NetPort + - Name: "region" + Value: !Ref AWS::Region + - Name: "chainbucket" + Value: !Ref ChainBucket + - Name: "s3key" + Value: node + PortMappings: + - ContainerPort: !Ref NetPort + LogConfiguration: + LogDriver: awslogs + Options: + awslogs-region: !Ref AWS::Region + awslogs-group: !Ref LogGroup + awslogs-stream-prefix: !Ref AWS::StackName + + # CodePipeline Resources + + Repository: + Type: AWS::ECR::Repository + + CodeBuildServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codebuild.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: "*" + Effect: Allow + Action: + - logs:CreateLogGroup + - logs:CreateLogStream + - logs:PutLogEvents + - ecr:GetAuthorizationToken + - Resource: !Sub arn:aws:s3:::${ArtifactBucket}/* + Effect: Allow + Action: + - s3:GetObject + - s3:PutObject + - s3:GetObjectVersion + - Resource: !Sub arn:aws:ecr:${AWS::Region}:${AWS::AccountId}:repository/${Repository} + Effect: Allow + Action: + - ecr:GetDownloadUrlForLayer + - ecr:BatchGetImage + - ecr:BatchCheckLayerAvailability + - ecr:PutImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + + CodePipelineServiceRole: + Type: AWS::IAM::Role + Properties: + Path: / + AssumeRolePolicyDocument: + Version: 2012-10-17 + Statement: + - Effect: Allow + Principal: + Service: codepipeline.amazonaws.com + Action: sts:AssumeRole + Policies: + - PolicyName: root + PolicyDocument: + Version: 2012-10-17 + Statement: + - Resource: + - !Sub arn:aws:s3:::${ArtifactBucket}/* + Effect: Allow + Action: + - s3:PutObject + - s3:GetObject + - s3:GetObjectVersion + - s3:GetBucketVersioning + - Resource: "*" + Effect: Allow + Action: + - ecs:DescribeServices + - ecs:DescribeTaskDefinition + - ecs:DescribeTasks + - ecs:ListTasks + - ecs:RegisterTaskDefinition + - ecs:UpdateService + - codebuild:StartBuild + - codebuild:BatchGetBuilds + - iam:PassRole + + ArtifactBucket: + Type: AWS::S3::Bucket + + CodeBuildProject: + Type: AWS::CodeBuild::Project + Properties: + Artifacts: + Type: CODEPIPELINE + Source: + Type: CODEPIPELINE + BuildSpec: | + version: 0.2 + phases: + install: + runtime-versions: + docker: 19 + pre_build: + commands: + - $(aws ecr get-login --no-include-email) + - TAG="$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | head -c 8)" + - IMAGE_URI="${REPOSITORY_URI}:${TAG}" + - cp infra/Dockerfile.updater ./Dockerfile + build: + commands: + - docker build --tag "$IMAGE_URI" . + - docker build --tag "${REPOSITORY_URI}:latest" . + post_build: + commands: + - docker push "$IMAGE_URI" + - docker push "${REPOSITORY_URI}:latest" + - printf '[{"name":"mev-geth-updater","imageUri":"%s"}]' "$IMAGE_URI" > images.json + artifacts: + files: images.json + Environment: + ComputeType: BUILD_GENERAL1_SMALL + Image: aws/codebuild/docker:17.09.0 + Type: LINUX_CONTAINER + PrivilegedMode: true + EnvironmentVariables: + - Name: AWS_DEFAULT_REGION + Value: !Ref AWS::Region + - Name: REPOSITORY_URI + Value: !Sub ${AWS::AccountId}.dkr.ecr.${AWS::Region}.amazonaws.com/${Repository} + Name: !Ref AWS::StackName + ServiceRole: !Ref CodeBuildServiceRole + + Pipeline: + Type: AWS::CodePipeline::Pipeline + Properties: + RoleArn: !GetAtt CodePipelineServiceRole.Arn + ArtifactStore: + Type: S3 + Location: !Ref ArtifactBucket + Stages: + - Name: Source + Actions: + - Name: App + ActionTypeId: + Category: Source + Owner: ThirdParty + Version: 1 + Provider: GitHub + Configuration: + Owner: !Ref GitHubUser + Repo: !Ref GitHubRepo + Branch: !Ref GitHubBranch + OAuthToken: !Ref GitHubToken + OutputArtifacts: + - Name: App + RunOrder: 1 + - Name: Build + Actions: + - Name: Build + ActionTypeId: + Category: Build + Owner: AWS + Version: 1 + Provider: CodeBuild + Configuration: + ProjectName: !Ref CodeBuildProject + InputArtifacts: + - Name: App + OutputArtifacts: + - Name: BuildOutput + RunOrder: 1 + - Name: Deploy + Actions: + - Name: Deploy + ActionTypeId: + Category: Deploy + Owner: AWS + Version: 1 + Provider: ECS + Configuration: + ClusterName: !Ref Cluster + ServiceName: !Ref ECSService + FileName: images.json + InputArtifacts: + - Name: BuildOutput + RunOrder: 1 + +Outputs: + ClusterName: + Value: !Ref Cluster + Service: + Value: !Ref ECSService + PipelineUrl: + Value: !Sub https://console.aws.amazon.com/codepipeline/home?region=${AWS::Region}#/view/${Pipeline} diff --git a/infra/start-mev-geth-node.sh b/infra/start-mev-geth-node.sh new file mode 100755 index 000000000000..0740445f5f73 --- /dev/null +++ b/infra/start-mev-geth-node.sh @@ -0,0 +1,98 @@ +#!/bin/sh -x +# Starts the Mev-Geth node client +# Written by Luke Youngblood, luke@blockscale.net + +# network=mainnet # normally set by environment +# syncmode=fast # normally set by environment +# rpcport=8545 # normally set by environment +# wsport=8546 # normally set by environment +# netport=30303 # normally set by environment + +init_node() { + # You can put any commands you would like to run to initialize the node here. + echo Initializing node... +} + +start_node() { + if [ $network = "goerli" ] + then + geth \ + --port $netport \ + --http \ + --http.addr 0.0.0.0 \ + --http.port $rpcport \ + --http.api eth,net,web3 \ + --http.vhosts '*' \ + --http.corsdomain '*' \ + --graphql \ + --graphql.corsdomain '*' \ + --graphql.vhosts '*' \ + --ws \ + --ws.addr 0.0.0.0 \ + --ws.port $wsport \ + --ws.api eth,net,web3 \ + --ws.origins '*' \ + --syncmode $syncmode \ + --gcmode archive \ + --cache 4096 \ + --maxpeers $connections \ + --goerli + if [ $? -ne 0 ] + then + echo "Node failed to start; exiting." + exit 1 + fi + else + geth \ + --port $netport \ + --http \ + --http.addr 0.0.0.0 \ + --http.port $rpcport \ + --http.api eth,net,web3 \ + --http.vhosts '*' \ + --http.corsdomain '*' \ + --graphql \ + --graphql.corsdomain '*' \ + --graphql.vhosts '*' \ + --ws \ + --ws.addr 0.0.0.0 \ + --ws.port $wsport \ + --ws.api eth,net,web3 \ + --ws.origins '*' \ + --syncmode $syncmode \ + --gcmode archive \ + --cache 4096 \ + --maxpeers $connections + if [ $? -ne 0 ] + then + echo "Node failed to start; exiting." + exit 1 + fi + fi +} + +s3_sync() { + # Determine data directory + if [ $network = "goerli" ] + then + datadir=/root/.ethereum/goerli/geth/chaindata + else + datadir=/root/.ethereum/geth/chaindata + fi + # If the current1 key exists, node1 is the most current set of blockchain data + echo "A 404 error below is expected and nothing to be concerned with." + aws s3api head-object --request-payer requester --bucket $chainbucket --key current1 + if [ $? -eq 0 ] + then + s3key=node1 + else + s3key=node2 + fi + aws s3 sync --only-show-errors --request-payer requester --region $region s3://$chainbucket/$s3key $datadir +} + +# main + +init_node +s3_sync +start_node diff --git a/infra/start-mev-geth-updater.sh b/infra/start-mev-geth-updater.sh new file mode 100755 index 000000000000..abad72fab9bb --- /dev/null +++ b/infra/start-mev-geth-updater.sh @@ -0,0 +1,183 @@ +#!/bin/sh -x +# Starts the Mev-Geth updater client +# Written by Luke Youngblood, luke@blockscale.net + +# netport=30303 # normally set by environment + +init_node() { + # Initialization steps can go here + echo Initializing node... + aws configure set default.s3.max_concurrent_requests 64 + aws configure set default.s3.max_queue_size 20000 +} + +start_node() { + if [ $network = "goerli" ] + then + geth \ + --port $netport \ + --syncmode $syncmode \ + --cache 4096 \ + --gcmode archive \ + --maxpeers $connections \ + --goerli & + if [ $? -ne 0 ] + then + echo "Node failed to start; exiting." + exit 1 + fi + else + geth \ + --port $netport \ + --syncmode $syncmode \ + --cache 4096 \ + --gcmode archive \ + --maxpeers $connections & + if [ $? -ne 0 ] + then + echo "Node failed to start; exiting." + exit 1 + fi + fi +} + +s3_sync_down() { + # Determine data directory + if [ $network = "goerli" ] + then + datadir=/root/.ethereum/goerli/geth/chaindata + else + datadir=/root/.ethereum/geth/chaindata + fi + + # If the current1 object exists, node1 is the key we should download + echo "A 404 error below is expected and nothing to be concerned with." + aws s3api head-object --bucket $chainbucket --key current1 + if [ $? -eq 0 ] + then + echo "current1 key exists; downloading node1" + s3key=node1 + else + echo "current1 key doesn't exist; downloading node2" + s3key=node2 + fi + + aws s3 sync --region $region --only-show-errors s3://$chainbucket/$s3key $datadir + if [ $? -ne 0 ] + then + echo "aws s3 sync command failed; exiting." + exit 2 + fi +} + +kill_node() { + tries=0 + while [ ! -z `ps -ef |grep geth|grep -v geth-updater|grep -v grep|awk '{print $1}'` ] + do + ps -ef |grep geth|grep -v geth-updater|grep -v grep + pid=`ps -ef |grep geth|grep -v geth-updater|grep -v grep|awk '{print $1}'` + kill $pid + sleep 30 + echo "Waiting for the node to shutdown cleanly... try number $tries" + let "tries+=1" + if [ $tries -gt 29 ] + then + echo "Node has not stopped cleanly after $tries, forcibly killing." + ps -ef |grep geth|grep -v geth-updater|grep -v grep + pid=`ps -ef |grep geth|grep -v geth-updater|grep -v grep|awk '{print $1}'` + kill -9 $pid + fi + if [ $tries -gt 30 ] + then + echo "Node has not stopped cleanly after $tries, exiting..." + exit 3 + fi + done +} + +s3_sync_up() { + # Determine data directory + if [ $network = "goerli" ] + then + datadir=/root/.ethereum/goerli/geth/chaindata + else + datadir=/root/.ethereum/geth/chaindata + fi + + # If the current1 object exists, node1 is the folder that clients will download, so we should update node2 + aws s3api head-object --bucket $chainbucket --key current1 + if [ $? -eq 0 ] + then + echo "current1 key exists; updating node2" + s3key=node2 + else + echo "current1 key doesn't exist; updating node1" + s3key=node1 + fi + + aws s3 sync --delete --region $region --only-show-errors --acl public-read $datadir s3://$chainbucket/$s3key + if [ $? -ne 0 ] + then + echo "aws s3 sync upload command failed; exiting." + exit 4 + fi + + if [ "$s3key" = "node2" ] + then + echo "Removing current1 key, as the node2 key was just updated." + aws s3 rm --region $region s3://$chainbucket/current1 + if [ $? -ne 0 ] + then + echo "aws s3 rm command failed; retrying." + sleep 5 + aws s3 rm --region $region s3://$chainbucket/current1 + if [ $? -ne 0 ] + then + echo "aws s3 rm command failed; exiting." + exit 5 + fi + fi + else + echo "Touching current1 key, as the node1 key was just updated." + touch ~/current1 + aws s3 cp --region $region --acl public-read ~/current1 s3://$chainbucket/ + if [ $? -ne 0 ] + then + echo "aws s3 cp command failed; retrying." + sleep 5 + aws s3 cp --region $region --acl public-read ~/current1 s3://$chainbucket/ + if [ $? -ne 0 ] + then + echo "aws s3 cp command failed; exiting." + exit 6 + fi + fi + fi +} + +continuous() { + # This function continuously stops the node every hour + # and syncs the chain data with S3, then restarts the node. + while true + do + echo "Sleeping for 60 minutes at `date`..." + sleep 3600 + echo "Cleanly shutting down the node so we can update S3 with the latest chaindata at `date`..." + kill_node + echo "Syncing chain data to S3 at `date`..." + s3_sync_up + echo "Restarting the node after syncing to S3 at `date`..." + start_node + done +} + +# main + +echo "Initializing the node at `date`..." +init_node +echo "Syncing initial chain data with stored chain data in S3 at `date`..." +s3_sync_down +echo "Starting the node at `date`..." +start_node +echo "Starting the continuous loop at `date`..." +continuous diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index fe5c3388b57e..1a2864af3c39 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -19,6 +19,7 @@ package ethapi import ( "bytes" "context" + "encoding/hex" "errors" "fmt" "math/big" @@ -45,6 +46,7 @@ import ( "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" "github.com/tyler-smith/go-bip39" + "golang.org/x/crypto/sha3" ) // PublicEthereumAPI provides an API to access Ethereum related information. @@ -2100,3 +2102,186 @@ func toHexSlice(b [][]byte) []string { } return r } + +// ---------------------------------------------------------------- FlashBots ---------------------------------------------------------------- + +// PrivateTxBundleAPI offers an API for accepting bundled transactions +type PrivateTxBundleAPI struct { + b Backend +} + +// NewPrivateTxBundleAPI creates a new Tx Bundle API instance. +func NewPrivateTxBundleAPI(b Backend) *PrivateTxBundleAPI { + return &PrivateTxBundleAPI{b} +} + +// SendBundle will add the signed transaction to the transaction pool. +// The sender is responsible for signing the transaction and using the correct nonce and ensuring validity +func (s *PrivateTxBundleAPI) SendBundle(ctx context.Context, encodedTxs []hexutil.Bytes, blockNumber rpc.BlockNumber, minTimestampPtr, maxTimestampPtr *uint64) error { + var txs types.Transactions + + for _, encodedTx := range encodedTxs { + tx := new(types.Transaction) + if err := rlp.DecodeBytes(encodedTx, tx); err != nil { + return err + } + txs = append(txs, tx) + } + + var minTimestamp, maxTimestamp uint64 + if minTimestampPtr != nil { + minTimestamp = *minTimestampPtr + } + if maxTimestampPtr != nil { + maxTimestamp = *maxTimestampPtr + } + + return s.b.SendBundle(ctx, txs, blockNumber, minTimestamp, maxTimestamp) +} + +// BundleAPI offers an API for accepting bundled transactions +type BundleAPI struct { + b Backend + chain *core.BlockChain +} + +// NewBundleAPI creates a new Tx Bundle API instance. +func NewBundleAPI(b Backend, chain *core.BlockChain) *BundleAPI { + return &BundleAPI{b, chain} +} + +// CallBundle will simulate a bundle of transactions at the top of a given block +// number with the state of another (or the same) block. This can be used to +// simulate future blocks with the current state, or it can be used to simulate +// a past block. +// The sender is responsible for signing the transactions and using the correct +// nonce and ensuring validity +func (s *BundleAPI) CallBundle(ctx context.Context, encodedTxs []hexutil.Bytes, blockNr rpc.BlockNumber, stateBlockNumberOrHash rpc.BlockNumberOrHash, coinbaseArg *string, blockTimestamp *uint64, timeoutMilliSecondsPtr *int64) (map[string]interface{}, error) { + if len(encodedTxs) == 0 { + return nil, nil + } + var txs types.Transactions + + for _, encodedTx := range encodedTxs { + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(encodedTx); err != nil { + return nil, err + } + txs = append(txs, tx) + } + defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) + + timeoutMilliSeconds := int64(5000) + if timeoutMilliSecondsPtr != nil { + timeoutMilliSeconds = *timeoutMilliSecondsPtr + } + timeout := time.Millisecond * time.Duration(timeoutMilliSeconds) + state, parent, err := s.b.StateAndHeaderByNumberOrHash(ctx, stateBlockNumberOrHash) + if state == nil || err != nil { + return nil, err + } + blockNumber := big.NewInt(int64(blockNr)) + + timestamp := parent.Time + 1 + if blockTimestamp != nil { + timestamp = *blockTimestamp + } + coinbase := parent.Coinbase + if coinbaseArg != nil { + coinbase = common.HexToAddress(*coinbaseArg) + } + header := &types.Header{ + ParentHash: parent.Hash(), + Number: blockNumber, + GasLimit: parent.GasLimit, + Time: timestamp, + Difficulty: parent.Difficulty, + Coinbase: coinbase, + } + + // Setup context so it may be cancelled the call has completed + // or, in case of unmetered gas, setup a context with a timeout. + var cancel context.CancelFunc + if timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + // Make sure the context is cancelled when the call has completed + // this makes sure resources are cleaned up. + defer cancel() + + vmconfig := vm.Config{} + + // Setup the gas pool (also for unmetered requests) + // and apply the message. + gp := new(core.GasPool).AddGas(math.MaxUint64) + + results := []map[string]interface{}{} + coinbaseBalanceBefore := state.GetBalance(coinbase) + + bundleHash := sha3.NewLegacyKeccak256() + signer := types.MakeSigner(s.b.ChainConfig(), blockNumber) + var totalGasUsed uint64 + gasFees := new(big.Int) + for i, tx := range txs { + coinbaseBalanceBeforeTx := state.GetBalance(coinbase) + state.Prepare(tx.Hash(), common.Hash{}, i) + + receipt, result, err := core.ApplyTransactionWithResult(s.b.ChainConfig(), s.chain, &coinbase, gp, state, header, tx, &header.GasUsed, vmconfig) + if err != nil { + return nil, fmt.Errorf("err: %w; txhash %s", err, tx.Hash()) + } + + txHash := tx.Hash().String() + from, err := types.Sender(signer, tx) + if err != nil { + return nil, fmt.Errorf("err: %w; txhash %s", err, tx.Hash()) + } + to := "0x" + if tx.To() != nil { + to = tx.To().String() + } + jsonResult := map[string]interface{}{ + "txHash": txHash, + "gasUsed": receipt.GasUsed, + "fromAddress": from.String(), + "toAddress": to, + } + totalGasUsed += receipt.GasUsed + gasFeesTx := new(big.Int).Mul(big.NewInt(int64(receipt.GasUsed)), tx.GasPrice()) + gasFees.Add(gasFees, gasFeesTx) + bundleHash.Write(tx.Hash().Bytes()) + if result.Err != nil { + jsonResult["error"] = result.Err.Error() + revert := result.Revert() + if len(revert) > 0 { + jsonResult["revert"] = string(revert) + } + } else { + dst := make([]byte, hex.EncodedLen(len(result.Return()))) + hex.Encode(dst, result.Return()) + jsonResult["value"] = "0x" + string(dst) + } + coinbaseDiffTx := new(big.Int).Sub(state.GetBalance(coinbase), coinbaseBalanceBeforeTx) + jsonResult["coinbaseDiff"] = coinbaseDiffTx.String() + jsonResult["gasFees"] = gasFeesTx.String() + jsonResult["ethSentToCoinbase"] = new(big.Int).Sub(coinbaseDiffTx, gasFeesTx).String() + jsonResult["gasPrice"] = new(big.Int).Div(coinbaseDiffTx, big.NewInt(int64(receipt.GasUsed))).String() + jsonResult["gasUsed"] = receipt.GasUsed + results = append(results, jsonResult) + } + + ret := map[string]interface{}{} + ret["results"] = results + coinbaseDiff := new(big.Int).Sub(state.GetBalance(coinbase), coinbaseBalanceBefore) + ret["coinbaseDiff"] = coinbaseDiff.String() + ret["gasFees"] = gasFees.String() + ret["ethSentToCoinbase"] = new(big.Int).Sub(coinbaseDiff, gasFees).String() + ret["bundleGasPrice"] = new(big.Int).Div(coinbaseDiff, big.NewInt(int64(totalGasUsed))).String() + ret["totalGasUsed"] = totalGasUsed + ret["stateBlockNumber"] = parent.Number.Int64() + + ret["bundleHash"] = "0x" + common.Bytes2Hex(bundleHash.Sum(nil)) + return ret, nil +} diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 07e76583f317..8ebdd2b43880 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -70,6 +70,7 @@ type Backend interface { // Transaction pool API SendTx(ctx context.Context, signedTx *types.Transaction) error + SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64) error GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) GetPoolTransactions() (types.Transactions, error) GetPoolTransaction(txHash common.Hash) *types.Transaction @@ -90,7 +91,7 @@ type Backend interface { Engine() consensus.Engine } -func GetAPIs(apiBackend Backend) []rpc.API { +func GetAPIs(apiBackend Backend, chain *core.BlockChain) []rpc.API { nonceLock := new(AddrLocker) return []rpc.API{ { @@ -132,6 +133,16 @@ func GetAPIs(apiBackend Backend) []rpc.API { Version: "1.0", Service: NewPrivateAccountAPI(apiBackend, nonceLock), Public: false, + }, { + Namespace: "eth", + Version: "1.0", + Service: NewPrivateTxBundleAPI(apiBackend), + Public: true, + }, { + Namespace: "eth", + Version: "1.0", + Service: NewBundleAPI(apiBackend, chain), + Public: true, }, } } diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 1934412c9014..8570b5868840 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -581,6 +581,16 @@ web3._extend({ params: 2, inputFormatter: [null, web3._extend.formatters.inputBlockNumberFormatter], }), + new web3._extend.Method({ + name: 'sendBundle', + call: 'eth_sendBundle', + params: 4 + }), + new web3._extend.Method({ + name: 'callBundle', + call: 'eth_callBundle', + params: 6 + }), ], properties: [ new web3._extend.Property({ diff --git a/les/api_backend.go b/les/api_backend.go index 60c64a8bdf3c..b9b8fa6e7541 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -187,6 +187,9 @@ func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) func (b *LesApiBackend) RemoveTx(txHash common.Hash) { b.eth.txPool.RemoveTx(txHash) } +func (b *LesApiBackend) SendBundle(ctx context.Context, txs types.Transactions, blockNumber rpc.BlockNumber, minTimestamp uint64, maxTimestamp uint64) error { + return b.eth.txPool.AddMevBundle(txs, big.NewInt(blockNumber.Int64()), minTimestamp, maxTimestamp) +} func (b *LesApiBackend) GetPoolTransactions() (types.Transactions, error) { return b.eth.txPool.GetTransactions() diff --git a/les/client.go b/les/client.go index 7534eb3ea0b9..bd46dba70028 100644 --- a/les/client.go +++ b/les/client.go @@ -285,7 +285,7 @@ func (s *LightDummyAPI) Mining() bool { // APIs returns the collection of RPC services the ethereum package offers. // NOTE, some of these services probably need to be moved to somewhere else. func (s *LightEthereum) APIs() []rpc.API { - apis := ethapi.GetAPIs(s.ApiBackend) + apis := ethapi.GetAPIs(s.ApiBackend, nil) apis = append(apis, s.engine.APIs(s.BlockChain().HeaderChain())...) return append(apis, []rpc.API{ { diff --git a/light/txpool.go b/light/txpool.go index 1296389e3b11..bb21a5475ea8 100644 --- a/light/txpool.go +++ b/light/txpool.go @@ -531,3 +531,14 @@ func (pool *TxPool) RemoveTx(hash common.Hash) { pool.chainDb.Delete(hash[:]) pool.relay.Discard([]common.Hash{hash}) } + +// MevBundles returns a list of bundles valid for the given blockNumber/blockTimestamp +// also prunes bundles that are outdated +func (pool *TxPool) MevBundles(blockNumber *big.Int, blockTimestamp uint64) ([]types.Transactions, error) { + return nil, nil +} + +// AddMevBundle adds a mev bundle to the pool +func (pool *TxPool) AddMevBundle(txs types.Transactions, blockNumber *big.Int, minTimestamp uint64, maxTimestamp uint64) error { + return nil +} diff --git a/miner/miner.go b/miner/miner.go index 00c3d0cb5cfc..794444bfce42 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -56,7 +56,7 @@ type Config struct { // Miner creates blocks and searches for proof-of-work values. type Miner struct { mux *event.TypeMux - worker *worker + worker *multiWorker coinbase common.Address eth Backend engine consensus.Engine @@ -73,7 +73,7 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even exitCh: make(chan struct{}), startCh: make(chan common.Address), stopCh: make(chan struct{}), - worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true), + worker: newMultiWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true), } go miner.update() @@ -182,7 +182,7 @@ func (miner *Miner) SetRecommitInterval(interval time.Duration) { // Pending returns the currently pending block and associated state. func (miner *Miner) Pending() (*types.Block, *state.StateDB) { - return miner.worker.pending() + return miner.worker.regularWorker.pending() } // PendingBlock returns the currently pending block. @@ -191,7 +191,7 @@ func (miner *Miner) Pending() (*types.Block, *state.StateDB) { // simultaneously, please use Pending(), as the pending state can // change between multiple method calls func (miner *Miner) PendingBlock() *types.Block { - return miner.worker.pendingBlock() + return miner.worker.regularWorker.pendingBlock() } func (miner *Miner) SetEtherbase(addr common.Address) { @@ -219,5 +219,5 @@ func (miner *Miner) DisablePreseal() { // SubscribePendingLogs starts delivering logs from pending transactions // to the given channel. func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription { - return miner.worker.pendingLogsFeed.Subscribe(ch) + return miner.worker.regularWorker.pendingLogsFeed.Subscribe(ch) } diff --git a/miner/multi_worker.go b/miner/multi_worker.go new file mode 100644 index 000000000000..ea2b2e4f6299 --- /dev/null +++ b/miner/multi_worker.go @@ -0,0 +1,80 @@ +package miner + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" +) + +type multiWorker struct { + regularWorker *worker + flashbotsWorker *worker +} + +func (w *multiWorker) stop() { + w.regularWorker.stop() + w.flashbotsWorker.stop() +} + +func (w *multiWorker) start() { + w.regularWorker.start() + w.flashbotsWorker.start() +} + +func (w *multiWorker) close() { + w.regularWorker.close() + w.flashbotsWorker.close() +} + +func (w *multiWorker) isRunning() bool { + return w.regularWorker.isRunning() || w.flashbotsWorker.isRunning() +} + +func (w *multiWorker) setExtra(extra []byte) { + w.regularWorker.setExtra(extra) + w.flashbotsWorker.setExtra(extra) +} + +func (w *multiWorker) setRecommitInterval(interval time.Duration) { + w.regularWorker.setRecommitInterval(interval) + w.flashbotsWorker.setRecommitInterval(interval) +} + +func (w *multiWorker) setEtherbase(addr common.Address) { + w.regularWorker.setEtherbase(addr) + w.flashbotsWorker.setEtherbase(addr) +} + +func (w *multiWorker) enablePreseal() { + w.regularWorker.enablePreseal() + w.flashbotsWorker.enablePreseal() +} + +func (w *multiWorker) disablePreseal() { + w.regularWorker.disablePreseal() + w.flashbotsWorker.disablePreseal() +} + +func newMultiWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *multiWorker { + queue := make(chan *task) + + return &multiWorker{ + regularWorker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, init, &flashbotsData{ + isFlashbots: false, + queue: queue, + }), + flashbotsWorker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, init, &flashbotsData{ + isFlashbots: true, + queue: queue, + }), + } +} + +type flashbotsData struct { + isFlashbots bool + queue chan *task +} diff --git a/miner/worker.go b/miner/worker.go index 2cee6af0c326..ac3b99ce89a2 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -87,6 +87,7 @@ type environment struct { uncles mapset.Set // uncle set tcount int // tx count in cycle gasPool *core.GasPool // available gas used to pack transactions + profit *big.Int header *types.Header txs []*types.Transaction @@ -99,6 +100,9 @@ type task struct { state *state.StateDB block *types.Block createdAt time.Time + + profit *big.Int + isFlashbots bool } const ( @@ -180,6 +184,8 @@ type worker struct { // External functions isLocalBlock func(block *types.Block) bool // Function used to determine whether the specified block is mined by local miner. + flashbots *flashbotsData + // Test hooks newTaskHook func(*task) // Method to call upon receiving a new sealing task. skipSealHook func(*task) bool // Method to decide whether skipping the sealing. @@ -187,7 +193,30 @@ type worker struct { resubmitHook func(time.Duration, time.Duration) // Method to call upon updating resubmitting interval. } -func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { +func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool, flashbots *flashbotsData) *worker { + exitCh := make(chan struct{}) + taskCh := make(chan *task) + if flashbots.isFlashbots { + // publish to the flashbots queue + taskCh = flashbots.queue + } else { + // read from the flashbots queue + go func() { + for { + select { + case flashbotsTask := <-flashbots.queue: + select { + case taskCh <- flashbotsTask: + case <-exitCh: + return + } + case <-exitCh: + return + } + } + }() + } + worker := &worker{ config: config, chainConfig: chainConfig, @@ -204,12 +233,13 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), newWorkCh: make(chan *newWorkReq), - taskCh: make(chan *task), + taskCh: taskCh, resultCh: make(chan *types.Block, resultQueueSize), - exitCh: make(chan struct{}), + exitCh: exitCh, startCh: make(chan struct{}, 1), resubmitIntervalCh: make(chan time.Duration), resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), + flashbots: flashbots, } // Subscribe NewTxsEvent for tx pool worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) @@ -226,8 +256,11 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus go worker.mainLoop() go worker.newWorkLoop(recommit) - go worker.resultLoop() - go worker.taskLoop() + if !flashbots.isFlashbots { + // only mine if not flashbots + go worker.resultLoop() + go worker.taskLoop() + } // Submit first work to initialize pending state. if init { @@ -536,6 +569,9 @@ func (w *worker) taskLoop() { var ( stopCh chan struct{} prev common.Hash + + prevParentHash common.Hash + prevProfit *big.Int ) // interrupt aborts the in-flight sealing task. @@ -556,10 +592,20 @@ func (w *worker) taskLoop() { if sealHash == prev { continue } + + taskParentHash := task.block.Header().ParentHash + // reject new tasks which don't profit + if taskParentHash == prevParentHash && + prevProfit != nil && task.profit.Cmp(prevProfit) < 0 { + continue + } + prevParentHash = taskParentHash + prevProfit = task.profit + // Interrupt previous sealing operation interrupt() stopCh, prev = make(chan struct{}), sealHash - + log.Info("Proposed miner block", "blockNumber", task.block.Number(), "profit", prevProfit, "isFlashbots", task.isFlashbots, "sealhash", sealHash, "parentHash", prevParentHash) if w.skipSealHook != nil && w.skipSealHook(task) { continue } @@ -643,15 +689,11 @@ func (w *worker) resultLoop() { } } -// makeCurrent creates a new environment for the current cycle. -func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { - // Retrieve the parent state to execute on top and start a prefetcher for - // the miner to speed block sealing up a bit +func (w *worker) generateEnv(parent *types.Block, header *types.Header) (*environment, error) { state, err := w.chain.StateAt(parent.Root()) if err != nil { - return err + return nil, err } - state.StartPrefetcher("miner") env := &environment{ signer: types.MakeSigner(w.chainConfig, header.Number), @@ -660,6 +702,7 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { family: mapset.NewSet(), uncles: mapset.NewSet(), header: header, + profit: new(big.Int), } // when 08 is processed ancestors contain 07 (quick block) for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { @@ -671,6 +714,17 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { } // Keep track of transactions which return errors so they can be removed env.tcount = 0 + env.gasPool = new(core.GasPool).AddGas(header.GasLimit) + return env, nil +} + +// makeCurrent creates a new environment for the current cycle. +func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { + env, err := w.generateEnv(parent, header) + env.state.StartPrefetcher("miner") + if err != nil { + return err + } // Swap out the old work with the new one, terminating any leftover prefetcher // processes in the mean time and starting a new one. @@ -733,8 +787,9 @@ func (w *worker) updateSnapshot() { w.snapshotState = w.current.state.Copy() } -func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { +func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Address, trackProfit bool) ([]*types.Log, error) { snap := w.current.state.Snapshot() + initialBalance := w.current.state.GetBalance(w.coinbase) receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &coinbase, w.current.gasPool, w.current.state, w.current.header, tx, &w.current.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { @@ -744,17 +799,128 @@ func (w *worker) commitTransaction(tx *types.Transaction, coinbase common.Addres w.current.txs = append(w.current.txs, tx) w.current.receipts = append(w.current.receipts, receipt) + // coinbase balance difference already contains gas fee + if trackProfit { + finalBalance := w.current.state.GetBalance(w.coinbase) + w.current.profit.Add(w.current.profit, new(big.Int).Sub(finalBalance, initialBalance)) + } else { + gasUsed := new(big.Int).SetUint64(receipt.GasUsed) + w.current.profit.Add(w.current.profit, gasUsed.Mul(gasUsed, tx.GasPrice())) + } + return receipt.Logs, nil } -func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { +func (w *worker) commitBundle(txs types.Transactions, coinbase common.Address, interrupt *int32) bool { // Short circuit if current is nil if w.current == nil { return true } - if w.current.gasPool == nil { - w.current.gasPool = new(core.GasPool).AddGas(w.current.header.GasLimit) + var coalescedLogs []*types.Log + + for _, tx := range txs { + // In the following three cases, we will interrupt the execution of the transaction. + // (1) new head block event arrival, the interrupt signal is 1 + // (2) worker start or restart, the interrupt signal is 1 + // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. + // For the first two cases, the semi-finished work will be discarded. + // For the third case, the semi-finished work will be submitted to the consensus engine. + if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { + // Notify resubmit loop to increase resubmitting interval due to too frequent commits. + if atomic.LoadInt32(interrupt) == commitInterruptResubmit { + ratio := float64(w.current.header.GasLimit-w.current.gasPool.Gas()) / float64(w.current.header.GasLimit) + if ratio < 0.1 { + ratio = 0.1 + } + w.resubmitAdjustCh <- &intervalAdjust{ + ratio: ratio, + inc: true, + } + } + return atomic.LoadInt32(interrupt) == commitInterruptNewHead + } + // If we don't have enough gas for any further transactions then we're done + if w.current.gasPool.Gas() < params.TxGas { + log.Trace("Not enough gas for further transactions", "have", w.current.gasPool, "want", params.TxGas) + break + } + if tx == nil { + log.Error("Unexpected nil transaction in bundle") + return true + } + // Error may be ignored here. The error has already been checked + // during transaction acceptance is the transaction pool. + // + // We use the eip155 signer regardless of the current hf. + from, _ := types.Sender(w.current.signer, tx) + // Check whether the tx is replay protected. If we're not in the EIP155 hf + // phase, start ignoring the sender until we do. + if tx.Protected() && !w.chainConfig.IsEIP155(w.current.header.Number) { + log.Debug("Unexpected protected transaction in bundle") + return true + } + // Start executing the transaction + w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) + + logs, err := w.commitTransaction(tx, coinbase, true) + switch err { + case core.ErrGasLimitReached: + // Pop the current out-of-gas transaction without shifting in the next from the account + log.Error("Unexpected gas limit exceeded for current block in the bundle", "sender", from) + return true + + case core.ErrNonceTooLow: + // New head notification data race between the transaction pool and miner, shift + log.Error("Transaction with low nonce in the bundle", "sender", from, "nonce", tx.Nonce()) + return true + + case core.ErrNonceTooHigh: + // Reorg notification data race between the transaction pool and miner, skip account = + log.Error("Account with high nonce in the bundle", "sender", from, "nonce", tx.Nonce()) + return true + + case nil: + // Everything ok, collect the logs and shift in the next transaction from the same account + coalescedLogs = append(coalescedLogs, logs...) + w.current.tcount++ + continue + + default: + // Strange error, discard the transaction and get the next in line (note, the + // nonce-too-high clause will prevent us from executing in vain). + log.Error("Transaction failed in the bundle", "hash", tx.Hash(), "err", err) + return true + } + } + + if !w.isRunning() && len(coalescedLogs) > 0 { + // We don't push the pendingLogsEvent while we are mining. The reason is that + // when we are mining, the worker will regenerate a mining block every 3 seconds. + // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. + + // make a copy, the state caches the logs and these logs get "upgraded" from pending to mined + // logs by filling in the block hash when the block was mined by the local miner. This can + // cause a race condition if a log was "upgraded" before the PendingLogsEvent is processed. + cpy := make([]*types.Log, len(coalescedLogs)) + for i, l := range coalescedLogs { + cpy[i] = new(types.Log) + *cpy[i] = *l + } + w.pendingLogsFeed.Send(cpy) + } + // Notify resubmit loop to decrease resubmitting interval if current interval is larger + // than the user-specified one. + if interrupt != nil { + w.resubmitAdjustCh <- &intervalAdjust{inc: false} + } + return false +} + +func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coinbase common.Address, interrupt *int32) bool { + // Short circuit if current is nil + if w.current == nil { + return true } var coalescedLogs []*types.Log @@ -806,7 +972,7 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin // Start executing the transaction w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) - logs, err := w.commitTransaction(tx, coinbase) + logs, err := w.commitTransaction(tx, coinbase, false) switch { case errors.Is(err, core.ErrGasLimitReached): // Pop the current out-of-gas transaction without shifting in the next from the account @@ -869,7 +1035,6 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) { w.mu.RLock() defer w.mu.RUnlock() - tstart := time.Now() parent := w.chain.CurrentBlock() @@ -957,10 +1122,14 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) log.Error("Failed to fetch pending transactions", "err", err) return } - // Short circuit if there is no available pending transactions. + // Short circuit if there is no available pending transactions or bundles. // But if we disable empty precommit already, ignore it. Since // empty block is necessary to keep the liveness of the network. - if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 { + noBundles := true + if w.flashbots.isFlashbots && len(w.eth.TxPool().AllMevBundles()) > 0 { + noBundles = false + } + if len(pending) == 0 && atomic.LoadUint32(&w.noempty) == 0 && noBundles { w.updateSnapshot() return } @@ -972,6 +1141,18 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) localTxs[account] = txs } } + if w.flashbots.isFlashbots { + bundles, err := w.eth.TxPool().MevBundles(header.Number, header.Time) + if err != nil { + log.Error("Failed to fetch pending transactions", "err", err) + return + } + maxBundle, bundlePrice, ethToCoinbase, gasUsed := w.findMostProfitableBundle(bundles, w.coinbase, parent, header) + log.Info("Flashbots bundle", "ethToCoinbase", ethToCoinbase, "gasUsed", gasUsed, "bundlePrice", bundlePrice, "bundleLength", len(maxBundle)) + if w.commitBundle(maxBundle, w.coinbase, interrupt) { + return + } + } if len(localTxs) > 0 { txs := types.NewTransactionsByPriceAndNonce(w.current.signer, localTxs) if w.commitTransactions(txs, w.coinbase, interrupt) { @@ -1002,12 +1183,13 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st interval() } select { - case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now()}: + case w.taskCh <- &task{receipts: receipts, state: s, block: block, createdAt: time.Now(), profit: w.current.profit, isFlashbots: w.flashbots.isFlashbots}: w.unconfirmed.Shift(block.NumberU64() - 1) log.Info("Commit new mining work", "number", block.Number(), "sealhash", w.engine.SealHash(block.Header()), "uncles", len(uncles), "txs", w.current.tcount, "gas", block.GasUsed(), "fees", totalFees(block, receipts), - "elapsed", common.PrettyDuration(time.Since(start))) + "elapsed", common.PrettyDuration(time.Since(start)), + "isFlashbots", w.flashbots.isFlashbots) case <-w.exitCh: log.Info("Worker has exited") @@ -1019,6 +1201,66 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st return nil } +func (w *worker) findMostProfitableBundle(bundles []types.Transactions, coinbase common.Address, parent *types.Block, header *types.Header) (types.Transactions, *big.Int, *big.Int, uint64) { + maxBundlePrice := new(big.Int) + maxTotalEth := new(big.Int) + var maxTotalGasUsed uint64 + maxBundle := types.Transactions{} + for _, bundle := range bundles { + if len(bundle) == 0 { + continue + } + totalEth, totalGasUsed, err := w.computeBundleGas(bundle, parent, header) + + if err != nil { + log.Debug("Error computing gas for a bundle", "error", err) + continue + } + + mevGasPrice := new(big.Int).Div(totalEth, new(big.Int).SetUint64(totalGasUsed)) + if mevGasPrice.Cmp(maxBundlePrice) > 0 { + maxBundle = bundle + maxBundlePrice = mevGasPrice + maxTotalEth = totalEth + maxTotalGasUsed = totalGasUsed + } + } + + return maxBundle, maxBundlePrice, maxTotalEth, maxTotalGasUsed +} + +// Compute the adjusted gas price for a whole bundle +// Done by calculating all gas spent, adding transfers to the coinbase, and then dividing by gas used +func (w *worker) computeBundleGas(bundle types.Transactions, parent *types.Block, header *types.Header) (*big.Int, uint64, error) { + env, err := w.generateEnv(parent, header) + if err != nil { + return nil, 0, err + } + + var totalGasUsed uint64 = 0 + var tempGasUsed uint64 + gasFees := new(big.Int) + + coinbaseBalanceBefore := env.state.GetBalance(w.coinbase) + + for _, tx := range bundle { + receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, &w.coinbase, env.gasPool, env.state, env.header, tx, &tempGasUsed, *w.chain.GetVMConfig()) + if err != nil { + return nil, 0, err + } + if receipt.Status == types.ReceiptStatusFailed { + return nil, 0, errors.New("revert") + } + + totalGasUsed += receipt.GasUsed + gasFees.Add(gasFees, new(big.Int).Mul(big.NewInt(int64(totalGasUsed)), tx.GasPrice())) + } + coinbaseBalanceAfter := env.state.GetBalance(w.coinbase) + coinbaseDiff := new(big.Int).Sub(new(big.Int).Sub(coinbaseBalanceAfter, gasFees), coinbaseBalanceBefore) + + return coinbaseDiff, totalGasUsed, nil +} + // copyReceipts makes a deep copy of the given receipts. func copyReceipts(receipts []*types.Receipt) []*types.Receipt { result := make([]*types.Receipt, len(receipts)) diff --git a/miner/worker_test.go b/miner/worker_test.go index 0fe62316e1f1..87b2eb7ad6e4 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -193,7 +193,10 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) backend.txPool.AddLocals(pendingTxs) - w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) + w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, &flashbotsData{ + isFlashbots: false, + queue: nil, + }) w.setEtherbase(testBankAddress) return w, backend }