diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml index 8e1838bd5a3e..fae29198caaa 100644 --- a/.github/workflows/build-test.yml +++ b/.github/workflows/build-test.yml @@ -10,7 +10,7 @@ jobs: unit-test: strategy: matrix: - go-version: [1.19.x] + go-version: [1.20.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8a19355213e6..9d7c33719e22 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -10,7 +10,7 @@ jobs: golang-lint: strategy: matrix: - go-version: [1.19.x] + go-version: [1.20.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ae15237310a8..6f3304b8b396 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -16,7 +16,7 @@ jobs: name: Build Release strategy: matrix: - go-version: [1.19.x] + go-version: [1.20.x] os: [ubuntu-20.04, macos-latest] # windows-latest runs-on: ${{ matrix.os }} steps: @@ -131,7 +131,7 @@ jobs: id: changelog run: | chmod 755 ./.github/generate_change_log.sh - CHANGELOG=$(./.github/generate_change_log.sh ${{ env.RELEASE_VERSION}}) + CHANGELOG=$(./.github/generate_change_log.sh ${{ env.RELEASE_VERSION }}) echo "CHANGELOG<> $GITHUB_ENV echo "$CHANGELOG" >> $GITHUB_ENV echo "EOF" >> $GITHUB_ENV diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 2df40c876b4b..53cc1b94c116 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -10,7 +10,7 @@ jobs: unit-test: strategy: matrix: - go-version: [1.19.x] + go-version: [1.20.x] os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: diff --git a/.golangci.yml b/.golangci.yml index 2b746fe31f59..e7d4e36d705a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -51,9 +51,6 @@ issues: - path: core/state/metrics.go linters: - unused - - path: core/state/statedb_fuzz_test.go - linters: - - unused - path: core/txpool/legacypool/list.go linters: - staticcheck diff --git a/CHANGELOG.md b/CHANGELOG.md index a8fb1d6a03c3..f647e2ece609 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,28 @@ # Changelog +## v1.3.2 +BUGFIX +fix: remove sharedPool + +## v1.3.1 +FEATURE +* [\#1881](https://github.com/bnb-chain/bsc/pull/1881) feat: active pbss +* [\#1882](https://github.com/bnb-chain/bsc/pull/1882) cmd/geth: add hbss to pbss convert tool +* [\#1916](https://github.com/bnb-chain/bsc/pull/1916) feat: cherry-pick pbss patch commits from eth repo in v1.13.2 +* [\#1939](https://github.com/bnb-chain/bsc/pull/1939) dependency: go version to 1.20 and some dependencies in go.mod +* [\#1955](https://github.com/bnb-chain/bsc/pull/1955) eth, trie/triedb/pathdb: pbss patches +* [\#1962](https://github.com/bnb-chain/bsc/pull/1962) cherry pick pbss patches from go-ethereum + +BUGFIX +* [\#1923](https://github.com/bnb-chain/bsc/pull/1923) consensus/parlia: fix nextForkHash in Extra filed of block header +* [\#1950](https://github.com/bnb-chain/bsc/pull/1950) fix: 2 APIs of get receipt related +* [\#1951](https://github.com/bnb-chain/bsc/pull/1951) txpool: fix a potential crash issue in shutdown; +* [\#1963](https://github.com/bnb-chain/bsc/pull/1963) fix: revert trie commited flag after delete statedb mpt cache + +IMPROVEMENT +* [\#1948](https://github.com/bnb-chain/bsc/pull/1948) performance: commitTire concurrently +* [\#1949](https://github.com/bnb-chain/bsc/pull/1949) code: remove accountTrieCache and storageTrieCache +* [\#1954](https://github.com/bnb-chain/bsc/pull/1954) trie: keep trie prefetch during validation phase + ## v1.3.0 #### RPC * [internal/ethapi: add debug_getRawReceipts RPC method (#24773)](https://github.com/bnb-chain/bsc/pull/1840/commits/ae7d834bc752a2d94fef9d354ee78fcb9425f3d1) diff --git a/README.md b/README.md index 4b71f372fb50..cad7b1896433 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ Many of the below are the same as or similar to go-ethereum. For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth). -Building `geth` requires both a Go (version 1.19 or later) and a C compiler (GCC 5 or higher). You can install +Building `geth` requires both a Go (version 1.20 or later) and a C compiler (GCC 5 or higher). You can install them using your favourite package manager. Once the dependencies are installed, run ```shell diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 4d65858f1ee0..7ccc09e05204 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -2194,7 +2194,7 @@ func TestGolangBindings(t *testing.T) { t.Fatalf("failed to replace cometbft dependency to bnb-chain source: %v\n%s", err, out) } - tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.19") + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.20") tidier.Dir = pkg if out, err := tidier.CombinedOutput(); err != nil { t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) diff --git a/cmd/evm/blockrunner.go b/cmd/evm/blockrunner.go index ffd3165f5569..0be5f6971138 100644 --- a/cmd/evm/blockrunner.go +++ b/cmd/evm/blockrunner.go @@ -22,6 +22,7 @@ import ( "fmt" "os" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" @@ -64,7 +65,7 @@ func blockTestCmd(ctx *cli.Context) error { return err } for i, test := range tests { - if err := test.Run(false, tracer); err != nil { + if err := test.Run(false, rawdb.HashScheme, tracer); err != nil { return fmt.Errorf("test %v: %w", i, err) } } diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index b6f85a968d97..55c044c34044 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -43,6 +43,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/urfave/cli/v2" ) @@ -143,12 +144,23 @@ func runCmd(ctx *cli.Context) error { gen := readGenesis(ctx.String(GenesisFlag.Name)) genesisConfig = gen db := rawdb.NewMemoryDatabase() - genesis := gen.MustCommit(db) - sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages}) + triedb := trie.NewDatabase(db, &trie.Config{ + Preimages: preimages, + HashDB: hashdb.Defaults, + }) + defer triedb.Close() + genesis := gen.MustCommit(db, triedb) + sdb := state.NewDatabaseWithNodeDB(db, triedb) statedb, _ = state.New(genesis.Root(), sdb, nil) chainConfig = gen.Config } else { - sdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages}) + db := rawdb.NewMemoryDatabase() + triedb := trie.NewDatabase(db, &trie.Config{ + Preimages: preimages, + HashDB: hashdb.Defaults, + }) + defer triedb.Close() + sdb := state.NewDatabaseWithNodeDB(db, triedb) statedb, _ = state.New(types.EmptyRootHash, sdb, nil) genesisConfig = new(core.Genesis) } diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 231830f6d78b..a29c4b18f6f5 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -23,7 +23,9 @@ import ( "os" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/log" @@ -104,25 +106,22 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error { for _, st := range test.Subtests() { // Run the test and aggregate the result result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} - _, s, err := test.Run(st, cfg, false) - // print state root for evmlab tracing - if s != nil { - root := s.IntermediateRoot(false) - result.Root = &root - if jsonOut { - fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root) + test.Run(st, cfg, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + if err != nil { + // Test failed, mark as so and dump any state to aid debugging + result.Pass, result.Error = false, err.Error() + if dump { + dump := state.RawDump(nil) + result.State = &dump + } + } else { + root := state.IntermediateRoot(false) + result.Root = &root + if jsonOut { + fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root) + } } - } - if err != nil { - // Test failed, mark as so and dump any state to aid debugging - result.Pass, result.Error = false, err.Error() - if dump && s != nil { - s, _ = state.New(*result.Root, s.Database(), nil) - dump := s.RawDump(nil) - result.State = &dump - } - } - + }) results = append(results, *result) } } diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 9ea39fe1e00b..2d3c7d0d0d53 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -44,7 +44,6 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/trie" "github.com/urfave/cli/v2" ) @@ -54,7 +53,10 @@ var ( Name: "init", Usage: "Bootstrap and initialize a new genesis block", ArgsUsage: "", - Flags: flags.Merge([]cli.Flag{utils.CachePreimagesFlag}, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{ + utils.CachePreimagesFlag, + utils.StateSchemeFlag, + }, utils.DatabasePathFlags), Description: ` The init command initializes a new genesis block and definition for the network. This is a destructive action and changes the network in which you will be @@ -116,6 +118,9 @@ if one is set. Otherwise it prints the genesis from the datadir.`, utils.MetricsInfluxDBBucketFlag, utils.MetricsInfluxDBOrganizationFlag, utils.TxLookupLimitFlag, + utils.TransactionHistoryFlag, + utils.StateSchemeFlag, + utils.StateHistoryFlag, }, utils.DatabasePathFlags), Description: ` The import command imports blocks from an RLP-encoded form. The form can be one file @@ -132,6 +137,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`, Flags: flags.Merge([]cli.Flag{ utils.CacheFlag, utils.SyncModeFlag, + utils.StateSchemeFlag, }, utils.DatabasePathFlags), Description: ` Requires a first argument of the file to write to. @@ -181,6 +187,7 @@ It's deprecated, please use "geth db export" instead. utils.IncludeIncompletesFlag, utils.StartKeyFlag, utils.DumpLimitFlag, + utils.StateSchemeFlag, }, utils.DatabasePathFlags), Description: ` This command dumps out the state for a given block (or latest, if none provided). @@ -217,14 +224,15 @@ func initGenesis(ctx *cli.Context) error { if err != nil { utils.Fatalf("Failed to open database: %v", err) } - triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{ - Preimages: ctx.Bool(utils.CachePreimagesFlag.Name), - }) + defer chaindb.Close() + + triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false) + defer triedb.Close() + _, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis) if err != nil { utils.Fatalf("Failed to write genesis block: %v", err) } - chaindb.Close() log.Info("Successfully wrote genesis state", "database", name, "hash", hash) } return nil @@ -423,7 +431,7 @@ func dumpGenesis(ctx *cli.Context) error { if ctx.IsSet(utils.DataDirFlag.Name) { utils.Fatalf("no existing datadir at %s", stack.Config().DataDir) } - utils.Fatalf("no network preset provided. no exisiting genesis in the default datadir") + utils.Fatalf("no network preset provided, no existing genesis in the default datadir") return nil } @@ -647,10 +655,10 @@ func dump(ctx *cli.Context) error { if err != nil { return err } - config := &trie.Config{ - Preimages: true, // always enable preimage lookup - } - state, err := state.New(root, state.NewDatabaseWithConfig(db, config), nil) + triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup + defer triedb.Close() + + state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { return err } diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 7f957a7ba29f..25e6798d78d5 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -19,6 +19,7 @@ package main import ( "bytes" "fmt" + "math" "os" "os/signal" "path/filepath" @@ -72,6 +73,9 @@ Remove blockchain and state databases`, // no legacy stored receipts for bsc // dbMigrateFreezerCmd, dbCheckStateContentCmd, + dbHbss2PbssCmd, + dbTrieGetCmd, + dbTrieDeleteCmd, }, } dbInspectCmd = &cli.Command{ @@ -94,6 +98,45 @@ Remove blockchain and state databases`, For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates a data corruption.`, } + dbHbss2PbssCmd = &cli.Command{ + Action: hbss2pbss, + Name: "hbss-to-pbss", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.ForceFlag, + utils.AncientFlag, + }, + Usage: "Convert Hash-Base to Path-Base trie node.", + Description: `This command iterates the entire trie node database and convert the hash-base node to path-base node.`, + } + dbTrieGetCmd = &cli.Command{ + Action: dbTrieGet, + Name: "trie-get", + Usage: "Show the value of a trie node path key", + ArgsUsage: "[trie owner] ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.StateSchemeFlag, + }, + Description: "This command looks up the specified trie node key from the database.", + } + dbTrieDeleteCmd = &cli.Command{ + Action: dbTrieDelete, + Name: "trie-delete", + Usage: "delete the specify trie node", + ArgsUsage: "[trie owner] | ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.StateSchemeFlag, + }, + Description: "This command delete the specify trie node from the database.", + } dbStatCmd = &cli.Command{ Action: dbStats, Name: "stats", @@ -154,6 +197,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, ArgsUsage: " ", Flags: flags.Merge([]cli.Flag{ utils.SyncModeFlag, + utils.StateSchemeFlag, }, utils.NetworkFlags, utils.DatabasePathFlags), Description: "This command looks up the specified database key from the database.", } @@ -432,6 +476,133 @@ func dbGet(ctx *cli.Context) error { return nil } +// dbTrieGet shows the value of a given database key +func dbTrieGet(ctx *cli.Context) error { + if ctx.NArg() < 1 || ctx.NArg() > 2 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, false, false) + defer db.Close() + + scheme := ctx.String(utils.StateSchemeFlag.Name) + if scheme == "" { + scheme = rawdb.HashScheme + } + + if scheme == rawdb.PathScheme { + var ( + pathKey []byte + owner []byte + err error + ) + if ctx.NArg() == 1 { + pathKey, err = hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + nodeVal, hash := rawdb.ReadAccountTrieNode(db, pathKey) + log.Info("TrieGet result ", "PathKey", common.Bytes2Hex(pathKey), "Hash: ", hash, "node: ", trie.NodeString(hash.Bytes(), nodeVal)) + } else if ctx.NArg() == 2 { + owner, err = hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + pathKey, err = hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + + nodeVal, hash := rawdb.ReadStorageTrieNode(db, common.BytesToHash(owner), pathKey) + log.Info("TrieGet result ", "PathKey: ", common.Bytes2Hex(pathKey), "Owner: ", common.BytesToHash(owner), "Hash: ", hash, "node: ", trie.NodeString(hash.Bytes(), nodeVal)) + } + } else if scheme == rawdb.HashScheme { + if ctx.NArg() == 1 { + hashKey, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + val, err := db.Get(hashKey) + if err != nil { + log.Error("db get failed, ", "error: ", err) + return err + } + log.Info("TrieGet result ", "HashKey: ", common.BytesToHash(hashKey), "node: ", trie.NodeString(hashKey, val)) + } else { + log.Error("args too much") + } + } + + return nil +} + +// dbTrieDelete delete the trienode of a given database key +func dbTrieDelete(ctx *cli.Context) error { + if ctx.NArg() < 1 || ctx.NArg() > 2 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, false, false) + defer db.Close() + + scheme := ctx.String(utils.StateSchemeFlag.Name) + if scheme == "" { + scheme = rawdb.HashScheme + } + + if scheme == rawdb.PathScheme { + var ( + pathKey []byte + owner []byte + err error + ) + if ctx.NArg() == 1 { + pathKey, err = hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + rawdb.DeleteAccountTrieNode(db, pathKey) + } else if ctx.NArg() == 2 { + owner, err = hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + pathKey, err = hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + rawdb.DeleteStorageTrieNode(db, common.BytesToHash(owner), pathKey) + } + } else if scheme == rawdb.HashScheme { + if ctx.NArg() == 1 { + hashKey, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + err = db.Delete(hashKey) + if err != nil { + log.Error("db delete failed", "err", err) + return err + } + } else { + log.Error("args too much") + } + } + return nil +} + // dbDelete deletes a key from the database func dbDelete(ctx *cli.Context) error { if ctx.NArg() != 1 { @@ -504,6 +675,9 @@ func dbDumpTrie(ctx *cli.Context) error { db := utils.MakeChainDatabase(ctx, stack, true, false) defer db.Close() + triedb := utils.MakeTrieDatabase(ctx, db, false, true) + defer triedb.Close() + var ( state []byte storage []byte @@ -537,7 +711,7 @@ func dbDumpTrie(ctx *cli.Context) error { } } id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage)) - theTrie, err := trie.New(id, trie.NewDatabase(db)) + theTrie, err := trie.New(id, triedb) if err != nil { return err } @@ -739,3 +913,101 @@ func showMetaData(ctx *cli.Context) error { table.Render() return nil } + +func hbss2pbss(ctx *cli.Context) error { + if ctx.NArg() > 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + + var jobnum uint64 + var err error + if ctx.NArg() == 1 { + jobnum, err = strconv.ParseUint(ctx.Args().Get(0), 10, 64) + if err != nil { + return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err) + } + } else { + // by default + jobnum = 1000 + } + + force := ctx.Bool(utils.ForceFlag.Name) + + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + db := utils.MakeChainDatabase(ctx, stack, false, false) + db.Sync() + defer db.Close() + + // convert hbss trie node to pbss trie node + lastStateID := rawdb.ReadPersistentStateID(db) + if lastStateID == 0 || force { + config := trie.HashDefaults + triedb := trie.NewDatabase(db, config) + triedb.Cap(0) + log.Info("hbss2pbss triedb", "scheme", triedb.Scheme()) + defer triedb.Close() + + headerHash := rawdb.ReadHeadHeaderHash(db) + blockNumber := rawdb.ReadHeaderNumber(db, headerHash) + if blockNumber == nil { + log.Error("read header number failed.") + return fmt.Errorf("read header number failed") + } + + log.Info("hbss2pbss converting", "HeaderHash: ", headerHash.String(), ", blockNumber: ", *blockNumber) + + var headerBlockHash common.Hash + var trieRootHash common.Hash + + if *blockNumber != math.MaxUint64 { + headerBlockHash = rawdb.ReadCanonicalHash(db, *blockNumber) + if headerBlockHash == (common.Hash{}) { + return fmt.Errorf("ReadHeadBlockHash empty hash") + } + blockHeader := rawdb.ReadHeader(db, headerBlockHash, *blockNumber) + trieRootHash = blockHeader.Root + fmt.Println("Canonical Hash: ", headerBlockHash.String(), ", TrieRootHash: ", trieRootHash.String()) + } + if (trieRootHash == common.Hash{}) { + log.Error("Empty root hash") + return fmt.Errorf("Empty root hash.") + } + + id := trie.StateTrieID(trieRootHash) + theTrie, err := trie.New(id, triedb) + if err != nil { + log.Error("fail to new trie tree", "err", err, "rootHash", err, trieRootHash.String()) + return err + } + + h2p, err := trie.NewHbss2Pbss(theTrie, triedb, trieRootHash, *blockNumber, jobnum) + if err != nil { + log.Error("fail to new hash2pbss", "err", err, "rootHash", err, trieRootHash.String()) + return err + } + h2p.Run() + } else { + log.Info("Convert hbss to pbss success. Nothing to do.") + } + + // repair state ancient offset + lastStateID = rawdb.ReadPersistentStateID(db) + if lastStateID == 0 { + log.Error("Convert hbss to pbss trie node error. The last state id is still 0") + } + ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name)) + err = rawdb.ResetStateFreezerTableOffset(ancient, lastStateID) + if err != nil { + log.Error("Reset state freezer table offset failed", "error", err) + return err + } + // prune hbss trie node + err = rawdb.PruneHashTrieNodeInDataBase(db) + if err != nil { + log.Error("Prune Hash trie node in database failed", "error", err) + return err + } + return nil +} diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index fcfaa6e975b4..5a505b802213 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -176,12 +176,12 @@ func TestCustomBackend(t *testing.T) { { // Can't start pebble on top of leveldb initArgs: []string{"--db.engine", "leveldb"}, execArgs: []string{"--db.engine", "pebble"}, - execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, + execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, }, { // Can't start leveldb on top of pebble initArgs: []string{"--db.engine", "pebble"}, execArgs: []string{"--db.engine", "leveldb"}, - execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, + execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, }, { // Reject invalid backend choice initArgs: []string{"--db.engine", "mssql"}, diff --git a/cmd/geth/main.go b/cmd/geth/main.go index dd16e052b59d..c0c8df9933eb 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -96,6 +96,10 @@ var ( utils.GCModeFlag, utils.SnapshotFlag, utils.TxLookupLimitFlag, + utils.TransactionHistoryFlag, + utils.StateSchemeFlag, + utils.StateHistoryFlag, + utils.PathDBSyncFlag, utils.LightServeFlag, utils.LightIngressFlag, utils.LightEgressFlag, diff --git a/cmd/geth/pruneblock_test.go b/cmd/geth/pruneblock_test.go index a20ba01fab6a..836155921222 100644 --- a/cmd/geth/pruneblock_test.go +++ b/cmd/geth/pruneblock_test.go @@ -41,6 +41,7 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -136,7 +137,11 @@ func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemai t.Fatalf("failed to create database with ancient backend") } defer db.Close() - genesis := gspec.MustCommit(db) + + triedb := trie.NewDatabase(db, nil) + defer triedb.Close() + + genesis := gspec.MustCommit(db, triedb) // Initialize a fresh chain with only a genesis block blockchain, err := core.NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index 6e4a27007c77..c6ce0c784092 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -23,6 +23,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "time" "github.com/prometheus/tsdb/fileutil" @@ -71,10 +72,7 @@ two version states are available: genesis and the specific one. The default pruning target is the HEAD-127 state. -WARNING: It's necessary to delete the trie clean cache after the pruning. -If you specify another directory for the trie clean cache via "--cache.trie.journal" -during the use of Geth, please also specify it here for correct deletion. Otherwise -the trie clean cache with default directory will be deleted. +WARNING: it's only supported in hash mode(--state.scheme=hash)". `, }, { @@ -106,7 +104,9 @@ so it's very necessary to do block data prune, this feature will handle it. Usage: "Recalculate state hash based on the snapshot for verification", ArgsUsage: "", Action: verifyState, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{ + utils.StateSchemeFlag, + }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` geth snapshot verify-state will traverse the whole accounts and storages set based on the specified @@ -164,7 +164,9 @@ information about the specified address. Usage: "Traverse the state with given root hash and perform quick verification", ArgsUsage: "", Action: traverseState, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{ + utils.StateSchemeFlag, + }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` geth snapshot traverse-state will traverse the whole state from the given state root and will abort if any @@ -179,7 +181,9 @@ It's also usable without snapshot enabled. Usage: "Traverse the state with given root hash and perform detailed verification", ArgsUsage: "", Action: traverseRawState, - Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{ + utils.StateSchemeFlag, + }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` geth snapshot traverse-rawstate will traverse the whole state from the given root and will abort if any referenced @@ -201,6 +205,7 @@ It's also usable without snapshot enabled. utils.StartKeyFlag, utils.DumpLimitFlag, utils.TriesInMemoryFlag, + utils.StateSchemeFlag, }, utils.NetworkFlags, utils.DatabasePathFlags), Description: ` This command is semantically equivalent to 'geth dump', but uses the snapshots @@ -235,7 +240,7 @@ func accessDb(ctx *cli.Context, stack *node.Node) (ethdb.Database, error) { NoBuild: true, AsyncBuild: false, } - snaptree, err := snapshot.New(snapconfig, chaindb, trie.NewDatabase(chaindb), headBlock.Root(), TriesInMemory, false) + snaptree, err := snapshot.New(snapconfig, chaindb, trie.NewDatabase(chaindb, nil), headBlock.Root(), TriesInMemory, false) if err != nil { log.Error("snaptree error", "err", err) return nil, err // The relevant snapshot(s) might not exist @@ -356,7 +361,21 @@ func pruneBlock(ctx *cli.Context) error { if path == "" { return errors.New("prune failed, did not specify the AncientPath") } - newAncientPath = filepath.Join(path, "ancient_back") + newVersionPath := false + files, err := os.ReadDir(oldAncientPath) + if err != nil { + return err + } + for _, file := range files { + if file.IsDir() && file.Name() == "chain" { + newVersionPath = true + } + } + if newVersionPath && !strings.HasSuffix(oldAncientPath, "geth/chaindata/ancient/chain") { + log.Error("datadir.ancient subdirectory incorrect", "got path", oldAncientPath, "want subdirectory", "geth/chaindata/ancient/chain/") + return errors.New("datadir.ancient subdirectory incorrect") + } + newAncientPath = filepath.Join(path, "chain_back") blockpruner = pruner.NewBlockPruner(chaindb, stack, oldAncientPath, newAncientPath, blockAmountReserved) @@ -413,6 +432,9 @@ func pruneState(ctx *cli.Context) error { chaindb := utils.MakeChainDatabase(ctx, stack, false, false) defer chaindb.Close() + if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme { + log.Crit("Offline pruning is not required for path scheme") + } prunerconfig := pruner.Config{ Datadir: stack.ResolvePath(""), BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name), @@ -471,6 +493,7 @@ func pruneAllState(ctx *cli.Context) error { } chaindb := utils.MakeChainDatabase(ctx, stack, false, false) + defer chaindb.Close() pruner, err := pruner.NewAllPruner(chaindb) if err != nil { log.Error("Failed to open snapshot tree", "err", err) @@ -488,18 +511,22 @@ func verifyState(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true, false) + defer chaindb.Close() headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { log.Error("Failed to load head block") return errors.New("no head block") } - snapconfig := snapshot.Config{ + triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true) + defer triedb.Close() + + snapConfig := snapshot.Config{ CacheSize: 256, Recovery: false, NoBuild: true, AsyncBuild: false, } - snaptree, err := snapshot.New(snapconfig, chaindb, trie.NewDatabase(chaindb), headBlock.Root(), 128, false) + snaptree, err := snapshot.New(snapConfig, chaindb, triedb, headBlock.Root(), 128, false) if err != nil { log.Error("Failed to open snapshot tree", "err", err) return err @@ -541,6 +568,11 @@ func traverseState(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true, false) + defer chaindb.Close() + + triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true) + defer triedb.Close() + headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { log.Error("Failed to load head block") @@ -565,7 +597,6 @@ func traverseState(ctx *cli.Context) error { root = headBlock.Root() log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) } - triedb := trie.NewDatabase(chaindb) t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb) if err != nil { log.Error("Failed to open trie", "root", root, "err", err) @@ -641,6 +672,11 @@ func traverseRawState(ctx *cli.Context) error { defer stack.Close() chaindb := utils.MakeChainDatabase(ctx, stack, true, false) + defer chaindb.Close() + + triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true) + defer triedb.Close() + headBlock := rawdb.ReadHeadBlock(chaindb) if headBlock == nil { log.Error("Failed to load head block") @@ -665,7 +701,6 @@ func traverseRawState(ctx *cli.Context) error { root = headBlock.Root() log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) } - triedb := trie.NewDatabase(chaindb) t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb) if err != nil { log.Error("Failed to open trie", "root", root, "err", err) @@ -686,6 +721,11 @@ func traverseRawState(ctx *cli.Context) error { log.Error("Failed to open iterator", "root", root, "err", err) return err } + reader, err := triedb.Reader(root) + if err != nil { + log.Error("State is non-existent", "root", root) + return nil + } for accIter.Next(true) { nodes += 1 node := accIter.Hash() @@ -693,7 +733,7 @@ func traverseRawState(ctx *cli.Context) error { // Check the present for non-empty hash node(embedded node doesn't // have their own hash). if node != (common.Hash{}) { - blob := rawdb.ReadLegacyTrieNode(chaindb, node) + blob, _ := reader.Node(common.Hash{}, accIter.Path(), node) if len(blob) == 0 { log.Error("Missing trie node(account)", "hash", node) return errors.New("missing account") @@ -734,7 +774,7 @@ func traverseRawState(ctx *cli.Context) error { // Check the presence for non-empty hash node(embedded node doesn't // have their own hash). if node != (common.Hash{}) { - blob := rawdb.ReadLegacyTrieNode(chaindb, node) + blob, _ := reader.Node(common.BytesToHash(accIter.LeafKey()), storageIter.Path(), node) if len(blob) == 0 { log.Error("Missing trie node(storage)", "hash", node) return errors.New("missing storage") @@ -794,6 +834,9 @@ func dumpState(ctx *cli.Context) error { if err != nil { return err } + triedb := utils.MakeTrieDatabase(ctx, db, false, true) + defer triedb.Close() + snapConfig := snapshot.Config{ CacheSize: 256, Recovery: false, @@ -801,7 +844,7 @@ func dumpState(ctx *cli.Context) error { AsyncBuild: false, } triesInMemory := ctx.Uint64(utils.TriesInMemoryFlag.Name) - snaptree, err := snapshot.New(snapConfig, db, trie.NewDatabase(db), root, int(triesInMemory), false) + snaptree, err := snapshot.New(snapConfig, db, trie.NewDatabase(db, nil), root, int(triesInMemory), false) if err != nil { return err } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 1522a2631adf..36455d86b2d8 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -51,6 +51,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" @@ -75,6 +76,9 @@ import ( "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // These are all the command line flags we support. @@ -203,7 +207,12 @@ var ( Usage: "Exits after block synchronisation completes", Category: flags.EthCategory, } - + // hbss2pbss command options + ForceFlag = &cli.BoolFlag{ + Name: "force", + Usage: "Force convert hbss trie node to pbss trie node. Ingore any metadata", + Value: false, + } // Dump command options. IterativeOutputFlag = &cli.BoolFlag{ Name: "iterative", @@ -234,30 +243,12 @@ var ( } defaultSyncMode = ethconfig.Defaults.SyncMode - SyncModeFlag = &flags.TextMarshalerFlag{ - Name: "syncmode", - Usage: `Blockchain sync mode ("snap", "full" or "light")`, - Value: &defaultSyncMode, - Category: flags.EthCategory, - } - GCModeFlag = &cli.StringFlag{ - Name: "gcmode", - Usage: `Blockchain garbage collection mode ("full", "archive")`, - Value: "full", - Category: flags.EthCategory, - } - SnapshotFlag = &cli.BoolFlag{ + SnapshotFlag = &cli.BoolFlag{ Name: "snapshot", Usage: `Enables snapshot-database mode (default = enable)`, Value: true, Category: flags.EthCategory, } - TxLookupLimitFlag = &cli.Uint64Flag{ - Name: "txlookuplimit", - Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)", - Value: ethconfig.Defaults.TxLookupLimit, - Category: flags.EthCategory, - } LightKDFFlag = &cli.BoolFlag{ Name: "lightkdf", Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", @@ -305,6 +296,42 @@ var ( Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } + SyncModeFlag = &flags.TextMarshalerFlag{ + Name: "syncmode", + Usage: `Blockchain sync mode ("snap", "full" or "light")`, + Value: &defaultSyncMode, + Category: flags.StateCategory, + } + GCModeFlag = &cli.StringFlag{ + Name: "gcmode", + Usage: `Blockchain garbage collection mode, only relevant in state.scheme=hash ("full", "archive")`, + Value: "full", + Category: flags.StateCategory, + } + StateSchemeFlag = &cli.StringFlag{ + Name: "state.scheme", + Usage: "Scheme to use for storing ethereum state ('hash' or 'path')", + Value: rawdb.HashScheme, + Category: flags.StateCategory, + } + PathDBSyncFlag = &cli.BoolFlag{ + Name: "pathdb.sync", + Usage: "sync flush nodes cache to disk in path schema", + Value: false, + Category: flags.StateCategory, + } + StateHistoryFlag = &cli.Uint64Flag{ + Name: "history.state", + Usage: "Number of recent blocks to retain state history for (default = 90,000 blocks, 0 = entire chain)", + Value: ethconfig.Defaults.StateHistory, + Category: flags.StateCategory, + } + TransactionHistoryFlag = &cli.Uint64Flag{ + Name: "history.transactions", + Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)", + Value: ethconfig.Defaults.TransactionHistory, + Category: flags.StateCategory, + } // Light server and client settings LightServeFlag = &cli.IntFlag{ Name: "light.serve", @@ -1818,13 +1845,8 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { CheckExclusive(ctx, MainnetFlag, DeveloperFlag) CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer - if ctx.String(GCModeFlag.Name) == "archive" && ctx.Uint64(TxLookupLimitFlag.Name) != 0 { - ctx.Set(TxLookupLimitFlag.Name, "0") - log.Warn("Disable transaction unindexing for archive node") - } - if ctx.IsSet(LightServeFlag.Name) && ctx.Uint64(TxLookupLimitFlag.Name) != 0 { - log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited") - } + + // Set configurations from CLI flags setEtherbase(ctx, cfg) setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light") setTxPool(ctx, &cfg.TxPool) @@ -1911,8 +1933,39 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { cfg.Preimages = true log.Info("Enabling recording of key preimages since archive mode is used") } - if ctx.IsSet(TxLookupLimitFlag.Name) { - cfg.TxLookupLimit = ctx.Uint64(TxLookupLimitFlag.Name) + if ctx.IsSet(StateHistoryFlag.Name) { + cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name) + } + // Parse state scheme, abort the process if it's not compatible. + chaindb := tryMakeReadOnlyDatabase(ctx, stack) + scheme, err := ParseStateScheme(ctx, chaindb) + chaindb.Close() + if err != nil { + Fatalf("%v", err) + } + cfg.StateScheme = scheme + + // Parse transaction history flag, if user is still using legacy config + // file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'. + if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit { + log.Warn("The config option 'TxLookupLimit' is deprecated and will be removed, please use 'TransactionHistory'") + cfg.TransactionHistory = cfg.TxLookupLimit + } + if ctx.IsSet(TransactionHistoryFlag.Name) { + cfg.TransactionHistory = ctx.Uint64(TransactionHistoryFlag.Name) + } else if ctx.IsSet(TxLookupLimitFlag.Name) { + log.Warn("The flag --txlookuplimit is deprecated and will be removed, please use --history.transactions") + cfg.TransactionHistory = ctx.Uint64(TransactionHistoryFlag.Name) + } + if ctx.IsSet(PathDBSyncFlag.Name) { + cfg.PathSyncFlush = true + } + if ctx.String(GCModeFlag.Name) == "archive" && cfg.TransactionHistory != 0 { + cfg.TransactionHistory = 0 + log.Warn("Disabled transaction unindexing for archive node") + } + if ctx.IsSet(LightServeFlag.Name) && cfg.TransactionHistory != 0 { + log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited") } if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) { cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100 @@ -2296,6 +2349,18 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly, disableFree return chainDb } +// tryMakeReadOnlyDatabase try to open the chain database in read-only mode, +// or fallback to write mode if the database is not initialized. +func tryMakeReadOnlyDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { + // If datadir doesn't exist we need to open db in write-mode + // so database engine can create files. + readonly := true + if !common.FileExist(stack.ResolvePath("chaindata")) { + readonly = false + } + return MakeChainDatabase(ctx, stack, readonly, false) +} + func IsNetworkPreset(ctx *cli.Context) bool { for _, flag := range NetworkFlags { bFlag, _ := flag.(*cli.BoolFlag) @@ -2358,14 +2423,21 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) } + scheme, err := ParseStateScheme(ctx, chainDb) + if err != nil { + Fatalf("%v", err) + } cache := &core.CacheConfig{ - TrieCleanLimit: ethconfig.Defaults.TrieCleanCache, - TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache, - TrieDirtyDisabled: ctx.String(GCModeFlag.Name) == "archive", - TrieTimeLimit: ethconfig.Defaults.TrieTimeout, - TriesInMemory: ethconfig.Defaults.TriesInMemory, - SnapshotLimit: ethconfig.Defaults.SnapshotCache, - Preimages: ctx.Bool(CachePreimagesFlag.Name), + TrieCleanLimit: ethconfig.Defaults.TrieCleanCache, + TrieCleanNoPrefetch: ctx.Bool(CacheNoPrefetchFlag.Name), + TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache, + TrieDirtyDisabled: ctx.String(GCModeFlag.Name) == "archive", + TrieTimeLimit: ethconfig.Defaults.TrieTimeout, + TriesInMemory: ethconfig.Defaults.TriesInMemory, + SnapshotLimit: ethconfig.Defaults.SnapshotCache, + Preimages: ctx.Bool(CachePreimagesFlag.Name), + StateScheme: scheme, + StateHistory: ctx.Uint64(StateHistoryFlag.Name), } if cache.TrieDirtyDisabled && !cache.Preimages { cache.Preimages = true @@ -2413,3 +2485,62 @@ func MakeConsolePreloads(ctx *cli.Context) []string { } return preloads } + +// ParseStateScheme resolves scheme identifier from CLI flag. If the provided +// state scheme is not compatible with the one of persistent scheme, an error +// will be returned. +// +// - none: use the scheme consistent with persistent state, or fallback +// to hash-based scheme if state is empty. +// - hash: use hash-based scheme or error out if not compatible with +// persistent state scheme. +// - path: use path-based scheme or error out if not compatible with +// persistent state scheme. +func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) { + // If state scheme is not specified, use the scheme consistent + // with persistent state, or fallback to hash mode if database + // is empty. + stored := rawdb.ReadStateScheme(disk) + if !ctx.IsSet(StateSchemeFlag.Name) { + if stored == "" { + // use default scheme for empty database, flip it when + // path mode is chosen as default + log.Info("State schema set to default", "scheme", "hash") + return rawdb.HashScheme, nil + } + log.Info("State scheme set to already existing", "scheme", stored) + return stored, nil // reuse scheme of persistent scheme + } + // If state scheme is specified, ensure it's compatible with + // persistent state. + scheme := ctx.String(StateSchemeFlag.Name) + if stored == "" || scheme == stored { + log.Info("State scheme set by user", "scheme", scheme) + return scheme, nil + } + return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme) +} + +// MakeTrieDatabase constructs a trie database based on the configured scheme. +func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database { + config := &trie.Config{ + Preimages: preimage, + } + scheme, err := ParseStateScheme(ctx, disk) + if err != nil { + Fatalf("%v", err) + } + if scheme == rawdb.HashScheme { + // Read-only mode is not implemented in hash mode, + // ignore the parameter silently. TODO(rjl493456442) + // please config it if read mode is implemented. + config.HashDB = hashdb.Defaults + return trie.NewDatabase(disk, config) + } + if readOnly { + config.PathDB = pathdb.ReadOnly + } else { + config.PathDB = pathdb.Defaults + } + return trie.NewDatabase(disk, config) +} diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index cf51b519361a..6669ff176fdd 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -19,6 +19,7 @@ package utils import ( "fmt" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/internal/flags" "github.com/urfave/cli/v2" ) @@ -37,6 +38,7 @@ var DeprecatedFlags = []cli.Flag{ CacheTrieJournalFlag, CacheTrieRejournalFlag, LegacyDiscoveryV5Flag, + TxLookupLimitFlag, } var ( @@ -68,6 +70,13 @@ var ( Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism (deprecated, use --discv5 instead)", Category: flags.DeprecatedCategory, } + // Deprecated August 2023 + TxLookupLimitFlag = &cli.Uint64Flag{ + Name: "txlookuplimit", + Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain) (deprecated, use history.transactions instead)", + Value: ethconfig.Defaults.TransactionHistory, + Category: flags.DeprecatedCategory, + } ) // showDeprecated displays deprecated flags that will be soon removed from the codebase. diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index b607a7b4703d..0a4ab52e2d57 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -951,7 +951,7 @@ func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header } header.Extra = header.Extra[:extraVanity-nextForkHashSize] - nextForkHash := forkid.NewID(p.chainConfig, p.genesisHash, number, header.Time).Hash + nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number, header.Time) header.Extra = append(header.Extra, nextForkHash[:]...) if err := p.prepareValidators(header); err != nil { @@ -1085,7 +1085,7 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade if err != nil { return err } - nextForkHash := forkid.NewID(p.chainConfig, p.genesisHash, number, header.Time).Hash + nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number, header.Time) if !snap.isMajorityFork(hex.EncodeToString(nextForkHash[:])) { log.Debug("there is a possible fork, and your client is not the majority. Please check...", "nextForkHash", hex.EncodeToString(nextForkHash[:])) } diff --git a/consensus/parlia/parlia_test.go b/consensus/parlia/parlia_test.go index 1d438986806a..b03f74753722 100644 --- a/consensus/parlia/parlia_test.go +++ b/consensus/parlia/parlia_test.go @@ -1,8 +1,9 @@ package parlia import ( + "crypto/rand" "fmt" - "math/rand" + mrand "math/rand" "testing" "golang.org/x/crypto/sha3" @@ -47,7 +48,7 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int) { validators[i] = true down[i] = i } - rand.Shuffle(totalValidators, func(i, j int) { + mrand.Shuffle(totalValidators, func(i, j int) { down[i], down[j] = down[j], down[i] }) for i := 0; i < downValidators; i++ { @@ -125,8 +126,8 @@ func simulateValidatorOutOfService(totalValidators int, downValidators int) { } func producerBlockDelay(candidates map[int]bool, height, numOfValidators int) (int, uint64) { - s := rand.NewSource(int64(height)) - r := rand.New(s) + s := mrand.NewSource(int64(height)) + r := mrand.New(s) n := numOfValidators backOffSteps := make([]int, 0, n) for idx := 0; idx < n; idx++ { diff --git a/core/block_validator_test.go b/core/block_validator_test.go index b623bfe0558f..614676c28243 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -35,6 +35,11 @@ import ( // Tests that simple header verification works, for both good and bad blocks. func TestHeaderVerification(t *testing.T) { + testHeaderVerification(t, rawdb.HashScheme) + testHeaderVerification(t, rawdb.PathScheme) +} + +func testHeaderVerification(t *testing.T, scheme string) { // Create a simple chain to verify var ( gspec = &Genesis{Config: params.TestChainConfig} @@ -45,7 +50,7 @@ func TestHeaderVerification(t *testing.T) { headers[i] = block.Header() } // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer chain.Stop() for i := 0; i < len(blocks); i++ { diff --git a/core/blockchain.go b/core/blockchain.go index c415e909b246..39d14f77ef91 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -55,6 +55,8 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "golang.org/x/exp/slices" ) @@ -143,7 +145,7 @@ const ( ) // CacheConfig contains the configuration values for the trie database -// that's resident in a blockchain. +// and state snapshot these are resident in a blockchain. type CacheConfig struct { TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks @@ -154,11 +156,37 @@ type CacheConfig struct { Preimages bool // Whether to store preimage of trie key to the disk TriesInMemory uint64 // How many tries keeps in memory NoTries bool // Insecure settings. Do not have any tries in databases if enabled. + StateHistory uint64 // Number of blocks from head whose state histories are reserved. + StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top + PathSyncFlush bool // Whether sync flush the trienodebuffer of pathdb to disk. SnapshotNoBuild bool // Whether the background generation is allowed SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it } +// triedbConfig derives the configures for trie database. +func (c *CacheConfig) triedbConfig() *trie.Config { + config := &trie.Config{ + Cache: c.TrieCleanLimit, + Preimages: c.Preimages, + NoTries: c.NoTries, + } + if c.StateScheme == rawdb.HashScheme { + config.HashDB = &hashdb.Config{ + CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, + } + } + if c.StateScheme == rawdb.PathScheme { + config.PathDB = &pathdb.Config{ + SyncFlush: c.PathSyncFlush, + StateHistory: c.StateHistory, + CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, + DirtyCacheSize: c.TrieDirtyLimit * 1024 * 1024, + } + } + return config +} + // defaultCacheConfig are the default caching values if none are specified by the // user (also used during testing). var defaultCacheConfig = &CacheConfig{ @@ -168,6 +196,15 @@ var defaultCacheConfig = &CacheConfig{ SnapshotLimit: 256, TriesInMemory: 128, SnapshotWait: true, + StateScheme: rawdb.HashScheme, +} + +// DefaultCacheConfigWithScheme returns a deep copied default cache config with +// a provided trie node scheme. +func DefaultCacheConfigWithScheme(scheme string) *CacheConfig { + config := *defaultCacheConfig + config.StateScheme = scheme + return &config } type BlockChainOption func(*BlockChain) (*BlockChain, error) @@ -281,11 +318,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis diffLayerChanCache, _ := exlru.New(diffLayerCacheLimit) // Open trie database with provided config - triedb := trie.NewDatabaseWithConfig(db, &trie.Config{ - Cache: cacheConfig.TrieCleanLimit, - Preimages: cacheConfig.Preimages, - NoTries: cacheConfig.NoTries, - }) + triedb := trie.NewDatabase(db, cacheConfig.triedbConfig()) // Setup the genesis block, commit the provided genesis specification // to database if the genesis block is not present yet, or load the // stored one from database. @@ -369,8 +402,14 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis if bc.cacheConfig.SnapshotLimit > 0 { diskRoot = rawdb.ReadSnapshotRoot(bc.db) } + if bc.triedb.Scheme() == rawdb.PathScheme { + recoverable, _ := bc.triedb.Recoverable(diskRoot) + if !bc.HasState(diskRoot) && !recoverable { + diskRoot = bc.triedb.Head() + } + } if diskRoot != (common.Hash{}) { - log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "snaproot", diskRoot) + log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "diskRoot", diskRoot) snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true) if err != nil { @@ -724,18 +763,18 @@ func (bc *BlockChain) loadLastState() error { blockTd = bc.GetTd(headBlock.Hash(), headBlock.NumberU64()) ) if headHeader.Hash() != headBlock.Hash() { - log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0))) + log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "hash", headHeader.Root, "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0))) } - log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0))) + log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "root", headBlock.Root(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0))) if headBlock.Hash() != currentSnapBlock.Hash() { snapTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64()) - log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", snapTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0))) + log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "root", currentSnapBlock.Root, "td", snapTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0))) } if posa, ok := bc.engine.(consensus.PoSA); ok { if currentFinalizedHeader := posa.GetFinalizedHeader(bc, headHeader); currentFinalizedHeader != nil { if currentFinalizedBlock := bc.GetBlockByHash(currentFinalizedHeader.Hash()); currentFinalizedBlock != nil { finalTd := bc.GetTd(currentFinalizedBlock.Hash(), currentFinalizedBlock.NumberU64()) - log.Info("Loaded most recent local finalized block", "number", currentFinalizedBlock.Number(), "hash", currentFinalizedBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalizedBlock.Time()), 0))) + log.Info("Loaded most recent local finalized block", "number", currentFinalizedBlock.Number(), "hash", currentFinalizedBlock.Hash(), "root", currentFinalizedBlock.Root(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalizedBlock.Time()), 0))) } } } @@ -835,33 +874,48 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha pivot := rawdb.ReadLastPivotNumber(bc.db) frozen, _ := bc.db.Ancients() + // resetState resets the persistent state to genesis if it's not available. + resetState := func() { + log.Info("Reset to block with genesis state", "number", bc.genesisBlock.NumberU64(), "hash", bc.genesisBlock.Hash()) + // Short circuit if the genesis state is already present. + if bc.HasState(bc.genesisBlock.Root()) { + return + } + // Reset the state database to empty for committing genesis state. + // Note, it should only happen in path-based scheme and Reset function + // is also only call-able in this mode. + if bc.triedb.Scheme() == rawdb.PathScheme { + if err := bc.triedb.Reset(types.EmptyRootHash); err != nil { + log.Crit("Failed to clean state", "err", err) // Shouldn't happen + } + } + // Write genesis state into database. + if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil { + log.Crit("Failed to commit genesis state", "err", err) + } + } updateFn := func(db ethdb.KeyValueWriter, header *types.Header) (*types.Header, bool) { // Rewind the blockchain, ensuring we don't end up with a stateless head // block. Note, depth equality is permitted to allow using SetHead as a // chain reparation mechanism without deleting any data! if currentBlock := bc.CurrentBlock(); currentBlock != nil && header.Number.Uint64() <= currentBlock.Number.Uint64() { newHeadBlock := bc.GetBlock(header.Hash(), header.Number.Uint64()) - lastBlockNum := header.Number.Uint64() if newHeadBlock == nil { log.Error("Gap in the chain, rewinding to genesis", "number", header.Number, "hash", header.Hash()) newHeadBlock = bc.genesisBlock + resetState() } else { // Block exists, keep rewinding until we find one with state, // keeping rewinding until we exceed the optional threshold // root hash beyondRoot := (root == common.Hash{}) // Flag whether we're beyond the requested root (no root, always true) - enoughBeyondCount := false - beyondCount := 0 + for { - beyondCount++ // If a root threshold was requested but not yet crossed, check if root != (common.Hash{}) && !beyondRoot && newHeadBlock.Root() == root { beyondRoot, rootNumber = true, newHeadBlock.NumberU64() } - - enoughBeyondCount = beyondCount > maxBeyondBlocks - - if !bc.HasState(newHeadBlock.Root()) { + if !bc.HasState(newHeadBlock.Root()) && !bc.stateRecoverable(newHeadBlock.Root()) { log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) if pivot == nil || newHeadBlock.NumberU64() > *pivot { parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) @@ -872,38 +926,21 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, time uint64, root common.Ha log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash()) newHeadBlock = bc.genesisBlock } else { - log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) + log.Info("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) newHeadBlock = bc.genesisBlock } } - if beyondRoot || (enoughBeyondCount && root != common.Hash{}) || newHeadBlock.NumberU64() == 0 { - if enoughBeyondCount && (root != common.Hash{}) && rootNumber == 0 { - for { - lastBlockNum++ - block := bc.GetBlockByNumber(lastBlockNum) - if block == nil { - break - } - if block.Root() == root { - rootNumber = block.NumberU64() - break - } - } - } + if beyondRoot || newHeadBlock.NumberU64() == 0 { if newHeadBlock.NumberU64() == 0 { - // Recommit the genesis state into disk in case the rewinding destination - // is genesis block and the relevant state is gone. In the future this - // rewinding destination can be the earliest block stored in the chain - // if the historical chain pruning is enabled. In that case the logic - // needs to be improved here. - if !bc.HasState(bc.genesisBlock.Root()) { - if err := CommitGenesisState(bc.db, bc.triedb, bc.genesisBlock.Hash()); err != nil { - log.Crit("Failed to commit genesis state", "err", err) - } - log.Debug("Recommitted genesis state to disk") + resetState() + } else if !bc.HasState(newHeadBlock.Root()) { + // Rewind to a block with recoverable state. If the state is + // missing, run the state recovery here. + if err := bc.triedb.Recover(newHeadBlock.Root()); err != nil { + log.Crit("Failed to rollback state", "err", err) // Shouldn't happen } } - log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) + log.Info("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) break } log.Debug("Skipping block with threshold state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "root", newHeadBlock.Root()) @@ -1007,7 +1044,13 @@ func (bc *BlockChain) SnapSyncCommitHead(hash common.Hash) error { if block == nil { return fmt.Errorf("non existent block [%x..]", hash[:4]) } + // Reset the trie database with the fresh snap synced state. root := block.Root() + if bc.triedb.Scheme() == rawdb.PathScheme { + if err := bc.triedb.Reset(root); err != nil { + return err + } + } if !bc.HasState(root) { return fmt.Errorf("non existent state [%x..]", root[:4]) } @@ -1183,49 +1226,62 @@ func (bc *BlockChain) Stop() { log.Error("Failed to journal state snapshot", "err", err) } } + if bc.triedb.Scheme() == rawdb.PathScheme { + // Ensure that the in-memory trie nodes are journaled to disk properly. + if err := bc.triedb.Journal(bc.CurrentBlock().Root); err != nil { + log.Info("Failed to journal in-memory trie nodes", "err", err) + } + } else { + // Ensure the state of a recent block is also stored to disk before exiting. + // We're writing three different states to catch different restart scenarios: + // - HEAD: So we don't need to reprocess any blocks in the general case + // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle + // - HEAD-127: So we have a hard limit on the number of blocks reexecuted + if !bc.cacheConfig.TrieDirtyDisabled { + triedb := bc.triedb + var once sync.Once + for _, offset := range []uint64{0, 1, TriesInMemory - 1} { + if number := bc.CurrentBlock().Number.Uint64(); number > offset { + recent := bc.GetBlockByNumber(number - offset) + log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) + if err := triedb.Commit(recent.Root(), true); err != nil { + log.Error("Failed to commit recent state trie", "err", err) + } else { + rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64()) + once.Do(func() { + rawdb.WriteHeadBlockHash(bc.db, recent.Hash()) + }) + } + } + } + if snapBase != (common.Hash{}) { + log.Info("Writing snapshot state to disk", "root", snapBase) + if err := triedb.Commit(snapBase, true); err != nil { + log.Error("Failed to commit recent state trie", "err", err) + } else { + rawdb.WriteSafePointBlockNumber(bc.db, bc.CurrentBlock().Number.Uint64()) + } + } - // Ensure the state of a recent block is also stored to disk before exiting. - // We're writing three different states to catch different restart scenarios: - // - HEAD: So we don't need to reprocess any blocks in the general case - // - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle - // - HEAD-127: So we have a hard limit on the number of blocks reexecuted - if !bc.cacheConfig.TrieDirtyDisabled { - triedb := bc.triedb - var once sync.Once - - for _, offset := range []uint64{0, 1, bc.triesInMemory - 1} { - if number := bc.CurrentBlock().Number.Uint64(); number > offset { - recent := bc.GetBlockByNumber(number - offset) - - log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root()) - if err := triedb.Commit(recent.Root(), true); err != nil { + if snapBase != (common.Hash{}) { + log.Info("Writing snapshot state to disk", "root", snapBase) + if err := bc.triedb.Commit(snapBase, true); err != nil { log.Error("Failed to commit recent state trie", "err", err) } else { - rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64()) - once.Do(func() { - rawdb.WriteHeadBlockHash(bc.db, recent.Hash()) - }) + rawdb.WriteSafePointBlockNumber(bc.db, bc.CurrentBlock().Number.Uint64()) } } - } - if snapBase != (common.Hash{}) { - log.Info("Writing snapshot state to disk", "root", snapBase) - if err := triedb.Commit(snapBase, true); err != nil { - log.Error("Failed to commit recent state trie", "err", err) - } else { - rawdb.WriteSafePointBlockNumber(bc.db, bc.CurrentBlock().Number.Uint64()) + for !bc.triegc.Empty() { + triedb.Dereference(bc.triegc.PopItem()) + } + if _, size, _, _ := triedb.Size(); size != 0 { + log.Error("Dangling trie nodes after full cleanup") } - } - for !bc.triegc.Empty() { - triedb.Dereference(bc.triegc.PopItem()) - } - if size, _ := triedb.Size(); size != 0 { - log.Error("Dangling trie nodes after full cleanup") } } - // Flush the collected preimages to disk - if err := bc.stateCache.TrieDB().Close(); err != nil { - log.Error("Failed to close trie db", "err", err) + // Close the trie database, release all the held resources as the last step. + if err := bc.triedb.Close(); err != nil { + log.Error("Failed to close trie database", "err", err) } log.Info("Blockchain stopped") } @@ -1601,6 +1657,12 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. bc.commitLock.Lock() defer bc.commitLock.Unlock() + // If node is running in path mode, skip explicit gc operation + // which is unnecessary in this mode. + if bc.triedb.Scheme() == rawdb.PathScheme { + return nil + } + triedb := bc.stateCache.TrieDB() // If we're running an archive node, always flush if bc.cacheConfig.TrieDirtyDisabled { @@ -1616,8 +1678,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if current := block.NumberU64(); current > bc.triesInMemory { // If we exceeded our memory allowance, flush matured singleton nodes to disk var ( - nodes, imgs = triedb.Size() - limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 + _, nodes, _, imgs = triedb.Size() + limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 ) if nodes > limit || imgs > 4*1024*1024 { triedb.Cap(limit - ethdb.IdealBatchSize) @@ -2124,8 +2186,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) stats.processed++ stats.usedGas += usedGas - dirty, _ := bc.triedb.Size() - stats.report(chain, it.index, dirty, setHead) + trieDiffNodes, trieBufNodes, trieImmutableBufNodes, _ := bc.triedb.Size() + stats.report(chain, it.index, trieDiffNodes, trieBufNodes, trieImmutableBufNodes, setHead) if !setHead { // After merge we expect few side chains. Simply count @@ -2299,6 +2361,12 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i ) parent := it.previous() for parent != nil && !bc.HasState(parent.Root) { + if bc.stateRecoverable(parent.Root) { + if err := bc.triedb.Recover(parent.Root); err != nil { + return 0, err + } + break + } hashes = append(hashes, parent.Hash()) numbers = append(numbers, parent.Number.Uint64()) @@ -2358,6 +2426,12 @@ func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) parent = block ) for parent != nil && !bc.HasState(parent.Root()) { + if bc.stateRecoverable(parent.Root()) { + if err := bc.triedb.Recover(parent.Root()); err != nil { + return common.Hash{}, err + } + break + } hashes = append(hashes, parent.Hash()) numbers = append(numbers, parent.NumberU64()) parent = bc.GetBlock(parent.ParentHash(), parent.NumberU64()-1) @@ -2871,6 +2945,7 @@ func (bc *BlockChain) maintainTxIndex() { return } defer sub.Unsubscribe() + log.Info("Initialized transaction indexer", "limit", bc.TxLookupLimit()) for { select { diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go index ffe2d6501c4d..520feaf277cf 100644 --- a/core/blockchain_insert.go +++ b/core/blockchain_insert.go @@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second // report prints statistics if some number of blocks have been processed // or more than a few seconds have passed since the last message. -func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) { +func (st *insertStats) report(chain []*types.Block, index int, trieDiffNodes, trieBufNodes, trieImmutableBufNodes common.StorageSize, setHead bool) { // Fetch the timings for the batch var ( now = mclock.Now() @@ -63,7 +63,13 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) } - context = append(context, []interface{}{"dirty", dirty}...) + if trieDiffNodes != 0 { // pathdb + context = append(context, []interface{}{"triediffs", trieDiffNodes}...) + context = append(context, []interface{}{"triedirty", trieBufNodes}...) + context = append(context, []interface{}{"trieimutabledirty", trieImmutableBufNodes}...) + } else { + context = append(context, []interface{}{"triedirty", trieBufNodes}...) + } if st.queued > 0 { context = append(context, []interface{}{"queued", st.queued}...) diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 1cae3a054510..802b979a10bb 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -319,10 +319,16 @@ func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool { return bc.HasState(block.Root()) } -// TrieNode retrieves a blob of data associated with a trie node -// either from ephemeral in-memory cache, or from persistent storage. -func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) { - return bc.stateCache.TrieDB().Node(hash) +// stateRecoverable checks if the specified state is recoverable. +// Note, this function assumes the state is not present, because +// state is not treated as recoverable if it's available, thus +// false will be returned in this case. +func (bc *BlockChain) stateRecoverable(root common.Hash) bool { + if bc.triedb.Scheme() == rawdb.HashScheme { + return false + } + result, _ := bc.triedb.Recoverable(root) + return result } // ContractCodeWithPrefix retrieves a blob of data associated with a contract diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index 439a3d706f0f..acef2e2d59a6 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -22,6 +22,7 @@ package core import ( "math/big" + "path" "testing" "time" @@ -1749,16 +1750,23 @@ func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { } func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + testRepairWithScheme(t, tt, snapshots, scheme) + } +} + +func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) { // It's hard to follow the test case, visualize the input //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(true)) // Create a temporary persistent database datadir := t.TempDir() + ancient := path.Join(datadir, "ancient") db, err := rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) @@ -1777,6 +1785,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 0, // Disable snapshot by default + StateScheme: scheme, } ) defer engine.Close() @@ -1807,7 +1816,9 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { t.Fatalf("Failed to import canonical chain start: %v", err) } if tt.commitBlock > 0 { - chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), false) + if err := chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false); err != nil { + t.Fatalf("Failed to flush trie state: %v", err) + } if snapshots { if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { t.Fatalf("Failed to flatten snapshots: %v", err) @@ -1830,21 +1841,21 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { rawdb.WriteLastPivotNumber(db, *tt.pivotBlock) } // Pull the plug on the database, simulating a hard crash + chain.triedb.Close() db.Close() chain.stopWithoutSaving() // Start a new blockchain back up and see where the repair leads us db, err = rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, }) - if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } defer db.Close() - newChain, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) + newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -1887,17 +1898,22 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // In this case the snapshot layer of B3 is not created because of existent // state. func TestIssue23496(t *testing.T) { + testIssue23496(t, rawdb.HashScheme) + testIssue23496(t, rawdb.PathScheme) +} + +func testIssue23496(t *testing.T, scheme string) { // It's hard to follow the test case, visualize the input //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // Create a temporary persistent database datadir := t.TempDir() + ancient := path.Join(datadir, "ancient") db, err := rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, }) - if err != nil { t.Fatalf("Failed to create persistent database: %v", err) } @@ -1910,16 +1926,8 @@ func TestIssue23496(t *testing.T) { BaseFee: big.NewInt(params.InitialBaseFee), } engine = ethash.NewFullFaker() - config = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TriesInMemory: 128, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: true, - } ) - chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -1932,7 +1940,7 @@ func TestIssue23496(t *testing.T) { if _, err := chain.InsertChain(blocks[:1]); err != nil { t.Fatalf("Failed to import canonical chain start: %v", err) } - chain.stateCache.TrieDB().Commit(blocks[0].Root(), false) + chain.triedb.Commit(blocks[0].Root(), false) // Insert block B2 and commit the snapshot into disk if _, err := chain.InsertChain(blocks[1:2]); err != nil { @@ -1946,7 +1954,7 @@ func TestIssue23496(t *testing.T) { if _, err := chain.InsertChain(blocks[2:3]); err != nil { t.Fatalf("Failed to import canonical chain start: %v", err) } - chain.stateCache.TrieDB().Commit(blocks[2].Root(), false) + chain.triedb.Commit(blocks[2].Root(), false) // Insert the remaining blocks if _, err := chain.InsertChain(blocks[3:]); err != nil { @@ -1954,20 +1962,21 @@ func TestIssue23496(t *testing.T) { } // Pull the plug on the database, simulating a hard crash + chain.triedb.Close() db.Close() chain.stopWithoutSaving() // Start a new blockchain back up and see where the repair leads us db, err = rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, }) if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } defer db.Close() - chain, err = NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -1979,8 +1988,12 @@ func TestIssue23496(t *testing.T) { if head := chain.CurrentSnapBlock(); head.Number.Uint64() != uint64(4) { t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, uint64(4)) } - if head := chain.CurrentBlock(); head.Number.Uint64() != uint64(1) { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, uint64(1)) + expHead := uint64(1) + if scheme == rawdb.PathScheme { + expHead = uint64(2) + } + if head := chain.CurrentBlock(); head.Number.Uint64() != expHead { + t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead) } // Reinsert B2-B4 diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go index b34b14b838ae..72c1e56db250 100644 --- a/core/blockchain_sethead_test.go +++ b/core/blockchain_sethead_test.go @@ -22,6 +22,7 @@ package core import ( "fmt" "math/big" + "path" "strings" "testing" "time" @@ -29,9 +30,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // rewindTest is a test case for chain rollback upon user request. @@ -1949,16 +1954,23 @@ func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { } func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + testSetHeadWithScheme(t, tt, snapshots, scheme) + } +} + +func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) { // It's hard to follow the test case, visualize the input // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) // fmt.Println(tt.dump(false)) // Create a temporary persistent database datadir := t.TempDir() + ancient := path.Join(datadir, "ancient") db, err := rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) @@ -1977,6 +1989,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { TrieDirtyLimit: 256, TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 0, // Disable snapshot + StateScheme: scheme, } ) if snapshots { @@ -2008,7 +2021,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { t.Fatalf("Failed to import canonical chain start: %v", err) } if tt.commitBlock > 0 { - chain.stateCache.TrieDB().Commit(canonblocks[tt.commitBlock-1].Root(), false) + chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false) if snapshots { if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { t.Fatalf("Failed to flatten snapshots: %v", err) @@ -2018,14 +2031,17 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { t.Fatalf("Failed to import canonical chain tail: %v", err) } - // Manually dereference anything not committed to not have to work with 128+ tries - for _, block := range sideblocks { - chain.stateCache.TrieDB().Dereference(block.Root()) - } - for _, block := range canonblocks { - chain.stateCache.TrieDB().Dereference(block.Root()) + // Reopen the trie database without persisting in-memory dirty nodes. + chain.triedb.Close() + dbconfig := &trie.Config{} + if scheme == rawdb.PathScheme { + dbconfig.PathDB = pathdb.Defaults + } else { + dbconfig.HashDB = hashdb.Defaults } - chain.stateCache.Purge() + chain.triedb = trie.NewDatabase(chain.db, dbconfig) + chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb) + // Force run a freeze cycle type freezer interface { Freeze(threshold uint64) error diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index 4f02c4305180..14d030998879 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -24,6 +24,7 @@ import ( "fmt" "math/big" "os" + "path" "strings" "testing" "time" @@ -39,6 +40,7 @@ import ( // snapshotTestBasic wraps the common testing fields in the snapshot tests. type snapshotTestBasic struct { + scheme string // Disk scheme used for storing trie nodes chainBlocks int // Number of blocks to generate for the canonical chain snapshotBlock uint64 // Block number of the relevant snapshot disk layer commitBlock uint64 // Block number for which to commit the state to disk @@ -51,6 +53,7 @@ type snapshotTestBasic struct { // share fields, set in runtime datadir string + ancient string db ethdb.Database genDb ethdb.Database engine consensus.Engine @@ -60,10 +63,11 @@ type snapshotTestBasic struct { func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) { // Create a temporary persistent database datadir := t.TempDir() + ancient := path.Join(datadir, "ancient") db, err := rawdb.Open(rawdb.OpenOptions{ Directory: datadir, - AncientsDirectory: datadir, + AncientsDirectory: ancient, }) if err != nil { t.Fatalf("Failed to create persistent database: %v", err) @@ -75,13 +79,8 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo Config: params.AllEthashProtocolChanges, } engine = ethash.NewFullFaker() - - // Snapshot is enabled, the first snapshot is created from the Genesis. - // The snapshot memory allowance is 256MB, it means no snapshot flush - // will happen during the block insertion. - cacheConfig = defaultCacheConfig ) - chain, err := NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -102,7 +101,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo startPoint = point if basic.commitBlock > 0 && basic.commitBlock == point { - chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), false) + chain.TrieDB().Commit(blocks[point-1].Root(), false) } if basic.snapshotBlock > 0 && basic.snapshotBlock == point { // Flushing the entire snap tree into the disk, the @@ -121,6 +120,7 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo // Set runtime fields basic.datadir = datadir + basic.ancient = ancient basic.db = db basic.genDb = genDb basic.engine = engine @@ -210,6 +210,7 @@ func (basic *snapshotTestBasic) teardown() { basic.db.Close() basic.genDb.Close() os.RemoveAll(basic.datadir) + os.RemoveAll(basic.ancient) } // snapshotTest is a test case type for normal snapshot recovery. @@ -226,7 +227,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { // Restart the chain normally chain.Stop() - newchain, err := NewBlockChain(snaptest.db, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -235,7 +236,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { snaptest.verify(t, newchain, blocks) } -// crashSnapshotTest is a test case type for innormal snapshot recovery. +// crashSnapshotTest is a test case type for irregular snapshot recovery. // It can be used for testing that restart Geth after the crash. type crashSnapshotTest struct { snapshotTestBasic @@ -251,13 +252,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { db := chain.db db.Close() chain.stopWithoutSaving() + chain.triedb.Close() // Start a new blockchain back up and see where the repair leads us newdb, err := rawdb.Open(rawdb.OpenOptions{ Directory: snaptest.datadir, - AncientsDirectory: snaptest.datadir, + AncientsDirectory: snaptest.ancient, }) - if err != nil { t.Fatalf("Failed to reopen persistent database: %v", err) } @@ -267,13 +268,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { // the crash, we do restart twice here: one after the crash and one // after the normal stop. It's used to ensure the broken snapshot // can be detected all the time. - newchain, err := NewBlockChain(newdb, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } newchain.Stop() - newchain, err = NewBlockChain(newdb, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -300,7 +301,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { // Insert blocks without enabling snapshot if gapping is required. chain.Stop() - gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, func(i int, b *BlockGen) {}) + gappedBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, func(i int, b *BlockGen) {}) // Insert a few more blocks without enabling snapshot var cacheConfig = &CacheConfig{ @@ -309,6 +310,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { TrieTimeLimit: 5 * time.Minute, TriesInMemory: 128, SnapshotLimit: 0, + StateScheme: snaptest.scheme, } newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { @@ -318,7 +320,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { newchain.Stop() // Restart the chain with enabling the snapshot - newchain, err = NewBlockChain(snaptest.db, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -346,7 +348,7 @@ func (snaptest *setHeadSnapshotTest) test(t *testing.T) { chain.SetHead(snaptest.setHead) chain.Stop() - newchain, err := NewBlockChain(snaptest.db, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -381,16 +383,17 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { TrieTimeLimit: 5 * time.Minute, SnapshotLimit: 0, TriesInMemory: 128, + StateScheme: snaptest.scheme, } newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } - newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, func(i int, b *BlockGen) {}) + newBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, func(i int, b *BlockGen) {}) newchain.InsertChain(newBlocks) newchain.Stop() - // Restart the chain, the wiper should starts working + // Restart the chain, the wiper should start working config = &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, @@ -398,6 +401,7 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { SnapshotLimit: 256, SnapshotWait: false, // Don't wait rebuild TriesInMemory: 128, + StateScheme: snaptest.scheme, } tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { @@ -405,14 +409,15 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { } // Simulate the blockchain crash. + tmp.triedb.Close() tmp.stopWithoutSaving() - newchain, err = NewBlockChain(snaptest.db, nil, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } - defer newchain.Stop() snaptest.verify(t, newchain, blocks) + newchain.Stop() } // Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot @@ -436,20 +441,23 @@ func TestRestartWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C8 // Expected snapshot disk : G - test := &snapshotTest{ - snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 8, - expSnapshotBottom: 0, // Initial disk layer built from genesis - }, - } - test.test(t) - test.teardown() + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &snapshotTest{ + snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 8, + expSnapshotBottom: 0, // Initial disk layer built from genesis + }, + } + test.test(t) + test.teardown() + } } // Tests a Geth was crashed and restarts with a broken snapshot. In this case the @@ -475,20 +483,24 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : C4 - test := &crashSnapshotTest{ - snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() + //for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + for _, scheme := range []string{rawdb.HashScheme} { + test := &crashSnapshotTest{ + snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() + } } // Tests a Geth was crashed and restarts with a broken snapshot. In this case the @@ -514,20 +526,24 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C2 // Expected snapshot disk : C4 - test := &crashSnapshotTest{ - snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 2, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 2, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() + //for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + for _, scheme := range []string{rawdb.HashScheme} { + test := &crashSnapshotTest{ + snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 2, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 2, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() + } } // Tests a Geth was crashed and restarts with a broken snapshot. In this case @@ -553,20 +569,27 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : C4 - test := &crashSnapshotTest{ - snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 6, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + expHead := uint64(0) + if scheme == rawdb.PathScheme { + expHead = uint64(4) + } + test := &crashSnapshotTest{ + snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 6, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: expHead, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() + } } // Tests a Geth was running with snapshot enabled. Then restarts without @@ -590,21 +613,24 @@ func TestGappedNewSnapshot(t *testing.T) { // Expected head fast block: C10 // Expected head block : C10 // Expected snapshot disk : C10 - test := &gappedSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 10, - expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD - }, - gapped: 2, - } - test.test(t) - test.teardown() + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &gappedSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD + }, + gapped: 2, + } + test.test(t) + test.teardown() + } } // Tests the Geth was running with snapshot enabled and resetHead is applied. @@ -628,21 +654,24 @@ func TestSetHeadWithNewSnapshot(t *testing.T) { // Expected head fast block: C4 // Expected head block : C4 // Expected snapshot disk : G - test := &setHeadSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 4, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - expSnapshotBottom: 0, // The initial disk layer is built from the genesis - }, - setHead: 4, - } - test.test(t) - test.teardown() + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &setHeadSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 4, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + expSnapshotBottom: 0, // The initial disk layer is built from the genesis + }, + setHead: 4, + } + test.test(t) + test.teardown() + } } // Tests the Geth was running with a complete snapshot and then imports a few @@ -666,19 +695,22 @@ func TestRecoverSnapshotFromWipingCrash(t *testing.T) { // Expected head fast block: C10 // Expected head block : C8 // Expected snapshot disk : C10 - test := &wipeCrashSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 10, - expSnapshotBottom: 10, - }, - newBlocks: 2, - } - test.test(t) - test.teardown() + for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { + test := &wipeCrashSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + scheme: scheme, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, + }, + newBlocks: 2, + } + test.test(t) + test.teardown() + } } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 56a5c7763e5e..6bf6d2d8819a 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1807,7 +1807,7 @@ func TestTrieForkGC(t *testing.T) { chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } - if nodes, _ := chain.TrieDB().Size(); nodes > 0 { + if _, nodes, _, _ := chain.TrieDB().Size(); nodes > 0 { t.Fatalf("stale tries still alive after garbase collection") } } diff --git a/core/chain_makers.go b/core/chain_makers.go index 3143954cedb7..905ac1ead523 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -281,7 +281,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) chainreader := &fakeChainReader{config: config} - genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { + genblock := func(i int, parent *types.Block, triedb *trie.Database, statedb *state.StateDB) (*types.Block, types.Receipts) { b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b.header = makeHeader(chainreader, parent, statedb, b.engine) @@ -325,19 +325,23 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse if err != nil { panic(fmt.Sprintf("state write error: %v", err)) } - if err := statedb.Database().TrieDB().Commit(root, false); err != nil { + if err = triedb.Commit(root, false); err != nil { panic(fmt.Sprintf("trie write error: %v", err)) } return block, b.receipts } return nil, nil } + // Forcibly use hash-based state scheme for retaining all nodes in disk. + triedb := trie.NewDatabase(db, trie.HashDefaults) + defer triedb.Close() + for i := 0; i < n; i++ { - statedb, err := state.New(parent.Root(), state.NewDatabase(db), nil) + statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { panic(err) } - block, receipt := genblock(i, parent, statedb) + block, receipt := genblock(i, parent, triedb, statedb) blocks[i] = block receipts[i] = receipt parent = block @@ -350,7 +354,9 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse // then generate chain on top. func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts) { db := rawdb.NewMemoryDatabase() - _, err := genesis.Commit(db, trie.NewDatabase(db)) + triedb := trie.NewDatabase(db, trie.HashDefaults) + defer triedb.Close() + _, err := genesis.Commit(db, triedb) if err != nil { panic(err) } diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index 4f7355527553..db220cf24a7f 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) func TestGenerateWithdrawalChain(t *testing.T) { @@ -74,8 +75,7 @@ func TestGenerateWithdrawalChain(t *testing.T) { Storage: storage, Code: common.Hex2Bytes("600154600354"), } - - genesis := gspec.MustCommit(gendb) + genesis := gspec.MustCommit(gendb, trie.NewDatabase(gendb, trie.HashDefaults)) chain, _ := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(address), address, big.NewInt(1000), params.TxGas, new(big.Int).Add(gen.BaseFee(), common.Big1), nil), signer, key) @@ -146,6 +146,7 @@ func ExampleGenerateChain() { addr2 = crypto.PubkeyToAddress(key2.PublicKey) addr3 = crypto.PubkeyToAddress(key3.PublicKey) db = rawdb.NewMemoryDatabase() + genDb = rawdb.NewMemoryDatabase() ) // Ensure that key1 has some funds in the genesis block. @@ -153,13 +154,13 @@ func ExampleGenerateChain() { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, } - genesis := gspec.MustCommit(db) + genesis := gspec.MustCommit(genDb, trie.NewDatabase(genDb, trie.HashDefaults)) // This call generates a chain of 5 blocks. The function runs for // each block and adds different features to gen based on the // block index. signer := types.HomesteadSigner{} - chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 5, func(i int, gen *BlockGen) { + chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), genDb, 5, func(i int, gen *BlockGen) { switch i { case 0: // In block 1, addr1 sends addr2 some ether. @@ -188,7 +189,7 @@ func ExampleGenerateChain() { }) // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) + blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer blockchain.Stop() if i, err := blockchain.InsertChain(chain); err != nil { diff --git a/core/dao_test.go b/core/dao_test.go index f2e8dfe8f9aa..b9a899ef2f92 100644 --- a/core/dao_test.go +++ b/core/dao_test.go @@ -83,7 +83,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import contra-fork chain for expansion: %v", err) } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { + if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { t.Fatalf("failed to commit contra-fork head for expansion: %v", err) } bc.Stop() @@ -106,7 +106,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import pro-fork chain for expansion: %v", err) } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { + if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { t.Fatalf("failed to commit pro-fork head for expansion: %v", err) } bc.Stop() @@ -131,7 +131,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import contra-fork chain for expansion: %v", err) } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { + if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { t.Fatalf("failed to commit contra-fork head for expansion: %v", err) } blocks, _ = GenerateChain(&proConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) @@ -149,7 +149,7 @@ func TestDAOForkRangeExtradata(t *testing.T) { if _, err := bc.InsertChain(blocks); err != nil { t.Fatalf("failed to import pro-fork chain for expansion: %v", err) } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, false); err != nil { + if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { t.Fatalf("failed to commit pro-fork head for expansion: %v", err) } blocks, _ = GenerateChain(&conConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) diff --git a/core/eip3529tests/eip3529_test_util.go b/core/eip3529tests/eip3529_test_util.go index abae9fb8950c..c2d63f349d6b 100644 --- a/core/eip3529tests/eip3529_test_util.go +++ b/core/eip3529tests/eip3529_test_util.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) func newGwei(n int64) *big.Int { @@ -42,7 +43,7 @@ func TestGasUsage(t *testing.T, config *params.ChainConfig, engine consensus.Eng }, }, } - genesis = gspec.MustCommit(db) + genesis = gspec.MustCommit(db, trie.NewDatabase(db, nil)) ) blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, b *core.BlockGen) { @@ -61,7 +62,7 @@ func TestGasUsage(t *testing.T, config *params.ChainConfig, engine consensus.Eng // Import the canonical chain diskdb := rawdb.NewMemoryDatabase() - gspec.MustCommit(diskdb) + gspec.MustCommit(diskdb, trie.NewDatabase(diskdb, nil)) chain, err := core.NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index 8964558845d3..85b0daa96b84 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -98,6 +98,32 @@ func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) I return ID{Hash: checksumToBytes(hash), Next: 0} } +// NextForkHash calculates the forkHash from genesis to the next fork block number or time +func NextForkHash(config *params.ChainConfig, genesis common.Hash, head uint64, time uint64) [4]byte { + // Calculate the starting checksum from the genesis hash + hash := crc32.ChecksumIEEE(genesis[:]) + + // Calculate the next fork checksum + forksByBlock, forksByTime := gatherForks(config) + for _, fork := range forksByBlock { + if fork > head { + // Checksum the previous hash and nextFork number and return + return checksumToBytes(checksumUpdate(hash, fork)) + } + // Fork already passed, checksum the previous hash and the fork number + hash = checksumUpdate(hash, fork) + } + for _, fork := range forksByTime { + if fork > time { + // Checksum the previous hash and nextFork time and return + return checksumToBytes(checksumUpdate(hash, fork)) + } + // Fork already passed, checksum the previous hash and the fork time + hash = checksumUpdate(hash, fork) + } + return checksumToBytes(hash) +} + // NewIDWithChain calculates the Ethereum fork ID from an existing chain instance. func NewIDWithChain(chain Blockchain) ID { head := chain.CurrentHeader() diff --git a/core/genesis.go b/core/genesis.go index 2e576b544289..c613c44545d9 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -331,10 +331,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen applyOverrides(genesis.Config) return genesis.Config, block.Hash(), nil } - // We have the genesis block in database(perhaps in ancient database) - // but the corresponding state is missing. + // The genesis block is present(perhaps in ancient database) while the + // state database is not initialized yet. It can happen that the node + // is initialized with an external ancient store. Commit genesis state + // in this case. header := rawdb.ReadHeader(db, stored, 0) - if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { + if header.Root != types.EmptyRootHash && !triedb.Initialized(header.Root) { if genesis == nil { genesis = DefaultGenesisBlock() } @@ -587,10 +589,8 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // MustCommit writes the genesis block and state to db, panicking on error. // The block is committed as the canonical head block. -// Note the state changes will be committed in hash-based scheme, use Commit -// if path-scheme is preferred. -func (g *Genesis) MustCommit(db ethdb.Database) *types.Block { - block, err := g.Commit(db, trie.NewDatabase(db)) +func (g *Genesis) MustCommit(db ethdb.Database, triedb *trie.Database) *types.Block { + block, err := g.Commit(db, triedb) if err != nil { panic(err) } diff --git a/core/genesis_test.go b/core/genesis_test.go index d07ccaf7dd01..9902c5a3062c 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -30,9 +30,15 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) func TestSetupGenesis(t *testing.T) { + testSetupGenesis(t, rawdb.HashScheme) + testSetupGenesis(t, rawdb.PathScheme) +} + +func testSetupGenesis(t *testing.T, scheme string) { var ( customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50") customg = Genesis{ @@ -44,6 +50,7 @@ func TestSetupGenesis(t *testing.T) { oldcustomg = customg ) oldcustomg.Config = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(2)} + tests := []struct { name string fn func(ethdb.Database) (*params.ChainConfig, common.Hash, error) @@ -54,7 +61,7 @@ func TestSetupGenesis(t *testing.T) { { name: "genesis without ChainConfig", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlock(db, trie.NewDatabase(db), new(Genesis)) + return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), new(Genesis)) }, wantErr: errGenesisNoConfig, wantConfig: params.AllEthashProtocolChanges, @@ -62,7 +69,7 @@ func TestSetupGenesis(t *testing.T) { { name: "no block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlock(db, trie.NewDatabase(db), nil) + return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil) }, wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, @@ -70,8 +77,8 @@ func TestSetupGenesis(t *testing.T) { { name: "mainnet block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - DefaultGenesisBlock().MustCommit(db) - return SetupGenesisBlock(db, trie.NewDatabase(db), nil) + DefaultGenesisBlock().MustCommit(db, trie.NewDatabase(db, newDbConfig(scheme))) + return SetupGenesisBlock(db, trie.NewDatabase(db, newDbConfig(scheme)), nil) }, wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, @@ -79,8 +86,9 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == nil", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - customg.MustCommit(db) - return SetupGenesisBlock(db, trie.NewDatabase(db), nil) + tdb := trie.NewDatabase(db, newDbConfig(scheme)) + customg.Commit(db, tdb) + return SetupGenesisBlock(db, tdb, nil) }, wantHash: customghash, wantConfig: customg.Config, @@ -88,8 +96,9 @@ func TestSetupGenesis(t *testing.T) { { name: "compatible config in DB", fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - oldcustomg.MustCommit(db) - return SetupGenesisBlock(db, trie.NewDatabase(db), &customg) + tdb := trie.NewDatabase(db, newDbConfig(scheme)) + oldcustomg.Commit(db, tdb) + return SetupGenesisBlock(db, tdb, &customg) }, wantHash: customghash, wantConfig: customg.Config, @@ -99,16 +108,17 @@ func TestSetupGenesis(t *testing.T) { fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { // Commit the 'old' genesis block with Homestead transition at #2. // Advance to block #4, past the homestead transition block of customg. - genesis := oldcustomg.MustCommit(db) + tdb := trie.NewDatabase(db, newDbConfig(scheme)) + oldcustomg.Commit(db, tdb) - bc, _ := NewBlockChain(db, nil, &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) + bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) defer bc.Stop() - blocks, _ := GenerateChain(oldcustomg.Config, genesis, ethash.NewFaker(), db, 4, nil) + _, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil) bc.InsertChain(blocks) // This should return a compatibility error. - return SetupGenesisBlock(db, trie.NewDatabase(db), &customg) + return SetupGenesisBlock(db, tdb, &customg) }, wantHash: customghash, wantConfig: customg.Config, @@ -154,7 +164,8 @@ func TestGenesisHashes(t *testing.T) { {DefaultGenesisBlock(), params.MainnetGenesisHash}, } { // Test via MustCommit - if have := c.genesis.MustCommit(rawdb.NewMemoryDatabase()).Hash(); have != c.want { + db := rawdb.NewMemoryDatabase() + if have := c.genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)).Hash(); have != c.want { t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) } // Test via ToBlock @@ -172,7 +183,7 @@ func TestGenesis_Commit(t *testing.T) { } db := rawdb.NewMemoryDatabase() - genesisBlock := genesis.MustCommit(db) + genesisBlock := genesis.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) if genesis.Difficulty != nil { t.Fatalf("assumption wrong") @@ -221,6 +232,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) { } } } + func TestConfigOrDefault(t *testing.T) { defaultGenesis := DefaultGenesisBlock() if defaultGenesis.Config.PlanckBlock != nil { @@ -273,3 +285,10 @@ func TestSetDefaultBlockValues(t *testing.T) { t.Errorf("ChainID should not have been modified. ChainID = %v", genesis.Config.ChainID) } } + +func newDbConfig(scheme string) *trie.Config { + if scheme == rawdb.HashScheme { + return trie.HashDefaults + } + return &trie.Config{PathDB: pathdb.Defaults} +} diff --git a/core/headerchain_test.go b/core/headerchain_test.go index 08d19f695072..2c0323e6f74d 100644 --- a/core/headerchain_test.go +++ b/core/headerchain_test.go @@ -73,7 +73,7 @@ func TestHeaderInsertion(t *testing.T) { db = rawdb.NewMemoryDatabase() gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges} ) - gspec.Commit(db, trie.NewDatabase(db)) + gspec.Commit(db, trie.NewDatabase(db, nil)) hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false }) if err != nil { t.Fatal(err) diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index f607dd534e09..ae49932fc25f 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -435,12 +435,12 @@ func checkReceiptsRLP(have, want types.Receipts) error { func TestAncientStorage(t *testing.T) { // Freezer style fast import the chain. frdir := t.TempDir() - db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false, false, false, false) if err != nil { t.Fatalf("failed to create database with ancient backend") } defer db.Close() + // Create a test block block := types.NewBlockWithHeader(&types.Header{ Number: big.NewInt(0), diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 9ce58e7d27b9..da0659658348 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -256,11 +256,31 @@ func ReadStateHistory(db ethdb.AncientReaderOp, id uint64) ([]byte, []byte, []by // history starts from one(zero for initial state). func WriteStateHistory(db ethdb.AncientWriter, id uint64, meta []byte, accountIndex []byte, storageIndex []byte, accounts []byte, storages []byte) { db.ModifyAncients(func(op ethdb.AncientWriteOp) error { - op.AppendRaw(stateHistoryMeta, id-1, meta) - op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex) - op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex) - op.AppendRaw(stateHistoryAccountData, id-1, accounts) - op.AppendRaw(stateHistoryStorageData, id-1, storages) + err := op.AppendRaw(stateHistoryMeta, id-1, meta) + if err != nil { + log.Error("WriteStateHistory failed", "err", err) + return err + } + err = op.AppendRaw(stateHistoryAccountIndex, id-1, accountIndex) + if err != nil { + log.Error("WriteStateHistory failed", "err", err) + return err + } + err = op.AppendRaw(stateHistoryStorageIndex, id-1, storageIndex) + if err != nil { + log.Error("WriteStateHistory failed", "err", err) + return err + } + err = op.AppendRaw(stateHistoryAccountData, id-1, accounts) + if err != nil { + log.Error("WriteStateHistory failed", "err", err) + return err + } + err = op.AppendRaw(stateHistoryStorageData, id-1, storages) + if err != nil { + log.Error("WriteStateHistory failed", "err", err) + return err + } return nil }) } diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index 12f1ecdf833e..ea437b8114eb 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -36,7 +36,7 @@ import ( // // Now this scheme is still kept for backward compatibility, and it will be used // for archive node and some other tries(e.g. light trie). -const HashScheme = "hashScheme" +const HashScheme = "hash" // PathScheme is the new path-based state scheme with which trie nodes are stored // in the disk with node path as the database key. This scheme will only store one @@ -44,7 +44,7 @@ const HashScheme = "hashScheme" // is native. At the same time, this scheme will put adjacent trie nodes in the same // area of the disk with good data locality property. But this scheme needs to rely // on extra state diffs to survive deep reorg. -const PathScheme = "pathScheme" +const PathScheme = "path" // hasher is used to compute the sha256 hash of the provided data. type hasher struct{ sha crypto.KeccakState } @@ -89,6 +89,16 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) return h.hash(data) == hash } +// ExistsAccountTrieNode checks the presence of the account trie node with the +// specified node path, regardless of the node hash. +func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool { + has, err := db.Has(accountTrieNodeKey(path)) + if err != nil { + return false + } + return has +} + // WriteAccountTrieNode writes the provided account trie node into database. func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) { if err := db.Put(accountTrieNodeKey(path), node); err != nil { @@ -127,6 +137,16 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [ return h.hash(data) == hash } +// ExistsStorageTrieNode checks the presence of the storage trie node with the +// specified account hash and node path, regardless of the node hash. +func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool { + has, err := db.Has(storageTrieNodeKey(accountHash, path)) + if err != nil { + return false + } + return has +} + // WriteStorageTrieNode writes the provided storage trie node into database. func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) { if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil { @@ -263,3 +283,25 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, owner common.Hash, path []byte, has panic(fmt.Sprintf("Unknown scheme %v", scheme)) } } + +// ReadStateScheme reads the state scheme of persistent state, or none +// if the state is not present in database. +func ReadStateScheme(db ethdb.Reader) string { + // Check if state in path-based scheme is present + blob, _ := ReadAccountTrieNode(db, nil) + if len(blob) != 0 { + return PathScheme + } + // In a hash-based scheme, the genesis state is consistently stored + // on the disk. To assess the scheme of the persistent state, it + // suffices to inspect the scheme of the genesis state. + header := ReadHeader(db, ReadCanonicalHash(db, 0), 0) + if header == nil { + return "" // empty datadir + } + blob = ReadLegacyTrieNode(db, header.Root) + if len(blob) == 0 { + return "" // no state in disk + } + return HashScheme +} diff --git a/core/rawdb/ancient_scheme.go b/core/rawdb/ancient_scheme.go index 6432a3486bc0..1f22a5c34276 100644 --- a/core/rawdb/ancient_scheme.go +++ b/core/rawdb/ancient_scheme.go @@ -58,7 +58,7 @@ const ( stateHistoryStorageData = "storage.data" ) -var stateHistoryFreezerNoSnappy = map[string]bool{ +var stateFreezerNoSnappy = map[string]bool{ stateHistoryMeta: true, stateHistoryAccountIndex: false, stateHistoryStorageIndex: false, @@ -75,7 +75,7 @@ var ( // freezers the collections of all builtin freezers. var freezers = []string{chainFreezerName, stateFreezerName} -// NewStateHistoryFreezer initializes the freezer for state history. -func NewStateHistoryFreezer(ancientDir string, readOnly bool, offset uint64) (*ResettableFreezer, error) { - return NewResettableFreezer(filepath.Join(ancientDir, stateFreezerName), "eth/db/state", readOnly, offset, stateHistoryTableSize, stateHistoryFreezerNoSnappy) +// NewStateFreezer initializes the freezer for state history. +func NewStateFreezer(ancientDir string, readOnly bool, offset uint64) (*ResettableFreezer, error) { + return NewResettableFreezer(filepath.Join(ancientDir, stateFreezerName), "eth/db/state", readOnly, offset, stateHistoryTableSize, stateFreezerNoSnappy) } diff --git a/core/rawdb/ancient_utils.go b/core/rawdb/ancient_utils.go index 363a911aeea7..392ac7963144 100644 --- a/core/rawdb/ancient_utils.go +++ b/core/rawdb/ancient_utils.go @@ -18,9 +18,12 @@ package rawdb import ( "fmt" + "path/filepath" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" ) type tableSize struct { @@ -50,36 +53,61 @@ func (info *freezerInfo) size() common.StorageSize { return total } +func inspect(name string, order map[string]bool, reader ethdb.AncientReader) (freezerInfo, error) { + info := freezerInfo{name: name} + for t := range order { + size, err := reader.AncientSize(t) + if err != nil { + return freezerInfo{}, err + } + info.sizes = append(info.sizes, tableSize{name: t, size: common.StorageSize(size)}) + } + // Retrieve the number of last stored item + ancients, err := reader.Ancients() + if err != nil { + return freezerInfo{}, err + } + info.head = ancients - 1 + + // Retrieve the number of first stored item + tail, err := reader.Tail() + if err != nil { + return freezerInfo{}, err + } + info.tail = tail + return info, nil +} + // inspectFreezers inspects all freezers registered in the system. func inspectFreezers(db ethdb.Database) ([]freezerInfo, error) { var infos []freezerInfo for _, freezer := range freezers { switch freezer { case chainFreezerName: - // Chain ancient store is a bit special. It's always opened along - // with the key-value store, inspect the chain store directly. - info := freezerInfo{name: freezer} - // Retrieve storage size of every contained table. - for table := range chainFreezerNoSnappy { - size, err := db.AncientSize(table) - if err != nil { - return nil, err - } - info.sizes = append(info.sizes, tableSize{name: table, size: common.StorageSize(size)}) + info, err := inspect(chainFreezerName, chainFreezerNoSnappy, db) + if err != nil { + return nil, err + } + infos = append(infos, info) + + case stateFreezerName: + if ReadStateScheme(db) != PathScheme { + continue + } + datadir, err := db.AncientDatadir() + if err != nil { + return nil, err } - // Retrieve the number of last stored item - ancients, err := db.Ancients() + f, err := NewStateFreezer(datadir, true, 0) if err != nil { return nil, err } - info.head = ancients - 1 + defer f.Close() - // Retrieve the number of first stored item - tail, err := db.Tail() + info, err := inspect(stateFreezerName, stateFreezerNoSnappy, f) if err != nil { return nil, err } - info.tail = tail infos = append(infos, info) default: @@ -119,3 +147,23 @@ func InspectFreezerTable(ancient string, freezerName string, tableName string, s table.dumpIndexStdout(start, end) return nil } + +func ResetStateFreezerTableOffset(ancient string, virtualTail uint64) error { + path, tables := filepath.Join(ancient, stateFreezerName), stateFreezerNoSnappy + + for name, disableSnappy := range tables { + log.Info("Handle table", "name", name, "disableSnappy", disableSnappy) + table, err := newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, false) + if err != nil { + log.Error("New table failed", "error", err) + return err + } + // Reset the metadata of the freezer table + err = table.ResetItemsOffset(virtualTail) + if err != nil { + log.Error("Reset items offset of the table", "name", name, "error", err) + return err + } + } + return nil +} diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 3416b79b55ce..40a034923d15 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -581,6 +581,28 @@ func AncientInspect(db ethdb.Database) error { return nil } +func PruneHashTrieNodeInDataBase(db ethdb.Database) error { + it := db.NewIterator([]byte{}, []byte{}) + defer it.Release() + + total_num := 0 + for it.Next() { + var key = it.Key() + switch { + case IsLegacyTrieNode(key, it.Value()): + db.Delete(key) + total_num++ + if total_num%100000 == 0 { + log.Info("Pruning hash-base state trie nodes", "Complete progress: ", total_num) + } + default: + continue + } + } + log.Info("Pruning hash-base state trie nodes", "Complete progress", total_num) + return nil +} + // InspectDatabase traverses the entire database and checks the size // of all different categories of data. func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { @@ -599,7 +621,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { tds stat numHashPairings stat hashNumPairings stat - tries stat + legacyTries stat + stateLookups stat + accountTries stat + storageTries stat codes stat txLookups stat accountSnaps stat @@ -640,8 +665,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { numHashPairings.Add(size) case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): hashNumPairings.Add(size) - case len(key) == common.HashLength: - tries.Add(size) + case IsLegacyTrieNode(key, it.Value()): + legacyTries.Add(size) + case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength: + stateLookups.Add(size) + case IsAccountTrieNode(key): + accountTries.Add(size) + case IsStorageTrieNode(key): + storageTries.Add(size) case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength: codes.Add(size) case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): @@ -678,7 +709,8 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, - uncleanShutdownKey, badBlockKey, transitionStatusKey, + uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, + persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, } { if bytes.Equal(key, meta) { metadata.Add(size) @@ -707,7 +739,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()}, {"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()}, {"Key-Value store", "Contract codes", codes.Size(), codes.Count()}, - {"Key-Value store", "Trie nodes", tries.Size(), tries.Count()}, + {"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()}, + {"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()}, + {"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()}, + {"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()}, {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()}, {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()}, {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index 369fd4b694ab..87b33240b815 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -220,6 +220,9 @@ func (t *freezerTable) repair() error { } // Ensure the index is a multiple of indexEntrySize bytes if overflow := stat.Size() % indexEntrySize; overflow != 0 { + if t.readonly { + return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize) + } truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path } // Retrieve the file sizes and prepare for truncation @@ -278,6 +281,9 @@ func (t *freezerTable) repair() error { // Keep truncating both files until they come in sync contentExp = int64(lastIndex.offset) for contentExp != contentSize { + if t.readonly { + return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum) + } verbose = true // Truncate the head file to the last offset pointer if contentExp < contentSize { @@ -949,3 +955,33 @@ func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) { } fmt.Fprintf(w, "|--------------------------|\n") } + +func (t *freezerTable) ResetItemsOffset(virtualTail uint64) error { + stat, err := t.index.Stat() + if err != nil { + return err + } + + if stat.Size() == 0 { + return fmt.Errorf("Stat size is zero when ResetVirtualTail.") + } + + var firstIndex indexEntry + + buffer := make([]byte, indexEntrySize) + + t.index.ReadAt(buffer, 0) + firstIndex.unmarshalBinary(buffer) + + firstIndex.offset = uint32(virtualTail) + t.index.WriteAt(firstIndex.append(nil), 0) + + var firstIndex2 indexEntry + buffer2 := make([]byte, indexEntrySize) + t.index.ReadAt(buffer2, 0) + firstIndex2.unmarshalBinary(buffer2) + + log.Info("Reset Index", "filenum", t.index.Name(), "offset", firstIndex2.offset) + + return nil +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 1de8500ace4c..9e7f8500cbda 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -67,6 +67,9 @@ var ( // snapshotSyncStatusKey tracks the snapshot sync status across restarts. snapshotSyncStatusKey = []byte("SnapshotSyncStatus") + // skeletonSyncStatusKey tracks the skeleton sync status across restarts. + skeletonSyncStatusKey = []byte("SkeletonSyncStatus") + // trieJournalKey tracks the in-memory trie node layers across restarts. trieJournalKey = []byte("TrieJournal") @@ -212,7 +215,11 @@ func accountSnapshotKey(hash common.Hash) []byte { // storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash func storageSnapshotKey(accountHash, storageHash common.Hash) []byte { - return append(append(SnapshotStoragePrefix, accountHash.Bytes()...), storageHash.Bytes()...) + buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength) + n := copy(buf, SnapshotStoragePrefix) + n += copy(buf[n:], accountHash.Bytes()) + copy(buf[n:], storageHash.Bytes()) + return buf } // storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash @@ -271,7 +278,11 @@ func accountTrieNodeKey(path []byte) []byte { // storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath. func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte { - return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...) + buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path)) + n := copy(buf, trieNodeStoragePrefix) + n += copy(buf[n:], accountHash.Bytes()) + copy(buf[n:], path) + return buf } // IsLegacyTrieNode reports whether a provided database entry is a legacy trie @@ -285,9 +296,10 @@ func IsLegacyTrieNode(key []byte, val []byte) bool { return bytes.Equal(key, crypto.Keccak256(val)) } -// IsAccountTrieNode reports whether a provided database entry is an account -// trie node in path-based state scheme. -func IsAccountTrieNode(key []byte) (bool, []byte) { +// ResolveAccountTrieNodeKey reports whether a provided database entry is an +// account trie node in path-based state scheme, and returns the resolved +// node path if so. +func ResolveAccountTrieNodeKey(key []byte) (bool, []byte) { if !bytes.HasPrefix(key, trieNodeAccountPrefix) { return false, nil } @@ -300,9 +312,17 @@ func IsAccountTrieNode(key []byte) (bool, []byte) { return true, key[len(trieNodeAccountPrefix):] } -// IsStorageTrieNode reports whether a provided database entry is a storage +// IsAccountTrieNode reports whether a provided database entry is an account // trie node in path-based state scheme. -func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) { +func IsAccountTrieNode(key []byte) bool { + ok, _ := ResolveAccountTrieNodeKey(key) + return ok +} + +// ResolveStorageTrieNode reports whether a provided database entry is a storage +// trie node in path-based state scheme, and returns the resolved account hash +// and node path if so. +func ResolveStorageTrieNode(key []byte) (bool, common.Hash, []byte) { if !bytes.HasPrefix(key, trieNodeStoragePrefix) { return false, common.Hash{}, nil } @@ -318,3 +338,10 @@ func IsStorageTrieNode(key []byte) (bool, common.Hash, []byte) { accountHash := common.BytesToHash(key[len(trieNodeStoragePrefix) : len(trieNodeStoragePrefix)+common.HashLength]) return true, accountHash, key[len(trieNodeStoragePrefix)+common.HashLength:] } + +// IsStorageTrieNode reports whether a provided database entry is a storage +// trie node in path-based state scheme. +func IsStorageTrieNode(key []byte) bool { + ok, _, _ := ResolveStorageTrieNode(key) + return ok +} diff --git a/core/remote_state_verifier.go b/core/remote_state_verifier.go index fe1617544c1b..c2c10c2be38a 100644 --- a/core/remote_state_verifier.go +++ b/core/remote_state_verifier.go @@ -351,8 +351,9 @@ func (vt *verifyTask) sendVerifyRequest(n int) { } if n < len(validPeers) && n > 0 { - rand.Seed(time.Now().UnixNano()) - rand.Shuffle(len(validPeers), func(i, j int) { validPeers[i], validPeers[j] = validPeers[j], validPeers[i] }) + // rand.Seed(time.Now().UnixNano()) + r := rand.New(rand.NewSource(time.Now().UnixNano())) + r.Shuffle(len(validPeers), func(i, j int) { validPeers[i], validPeers[j] = validPeers[j], validPeers[i] }) } else { n = len(validPeers) } diff --git a/core/state/database.go b/core/state/database.go index a7081b717ffd..071881b43ea9 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -19,7 +19,6 @@ package state import ( "errors" "fmt" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" @@ -29,25 +28,14 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" - exlru "github.com/hashicorp/golang-lru" //ex: external ) const ( // Number of codehash->size associations to keep. codeSizeCacheSize = 100000 - // Number of state trie in cache - accountTrieCacheSize = 32 - - // Number of storage Trie in cache - storageTrieCacheSize = 2000 - // Cache size granted for caching clean code. codeCacheSize = 64 * 1024 * 1024 - - purgeInterval = 600 - - maxAccountTrieSize = 1024 * 1024 ) // Database wraps access to tries and contract code. @@ -70,18 +58,9 @@ type Database interface { // DiskDB returns the underlying key-value disk database. DiskDB() ethdb.KeyValueStore - // TrieDB retrieves the low level trie database used for data storage. + // TrieDB returns the underlying trie database for managing trie nodes. TrieDB() *trie.Database - // Cache the account trie tree - CacheAccount(root common.Hash, t Trie) - - // Cache the storage trie tree - CacheStorage(addrHash common.Hash, root common.Hash, t Trie) - - // Purge cache - Purge() - // NoTries returns whether the database has tries storage. NoTries() bool } @@ -173,7 +152,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { disk: db, codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: trie.NewDatabaseWithConfig(db, config), + triedb: trie.NewDatabase(db, config), noTries: noTries, } } @@ -191,53 +170,12 @@ func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { } } -func NewDatabaseWithConfigAndCache(db ethdb.Database, config *trie.Config) Database { - atc, _ := exlru.New(accountTrieCacheSize) - stc, _ := exlru.New(storageTrieCacheSize) - noTries := config != nil && config.NoTries - - database := &cachingDB{ - disk: db, - codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: trie.NewDatabaseWithConfig(db, config), - accountTrieCache: atc, - storageTrieCache: stc, - noTries: noTries, - } - if !noTries { - go database.purgeLoop() - } - return database -} - type cachingDB struct { - disk ethdb.KeyValueStore - codeSizeCache *lru.Cache[common.Hash, int] - codeCache *lru.SizeConstrainedCache[common.Hash, []byte] - triedb *trie.Database - accountTrieCache *exlru.Cache - storageTrieCache *exlru.Cache - noTries bool -} - -type triePair struct { - root common.Hash - trie Trie -} - -func (db *cachingDB) purgeLoop() { - for { - time.Sleep(purgeInterval * time.Second) - _, accounts, ok := db.accountTrieCache.GetOldest() - if !ok { - continue - } - tr := accounts.(*trie.SecureTrie).GetRawTrie() - if tr.Size() > maxAccountTrieSize { - db.Purge() - } - } + disk ethdb.KeyValueStore + codeSizeCache *lru.Cache[common.Hash, int] + codeCache *lru.SizeConstrainedCache[common.Hash, []byte] + triedb *trie.Database + noTries bool } // OpenTrie opens the main account trie at a specific root hash. @@ -245,11 +183,6 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { if db.noTries { return trie.NewEmptyTrie(), nil } - if db.accountTrieCache != nil { - if tr, exist := db.accountTrieCache.Get(root); exist { - return tr.(Trie).(*trie.SecureTrie).Copy(), nil - } - } tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) if err != nil { return nil, err @@ -262,16 +195,6 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre if db.noTries { return trie.NewEmptyTrie(), nil } - if db.storageTrieCache != nil { - if tries, exist := db.storageTrieCache.Get(crypto.Keccak256Hash(address.Bytes())); exist { - triesPairs := tries.([3]*triePair) - for _, triePair := range triesPairs { - if triePair != nil && triePair.root == root { - return triePair.trie.(*trie.SecureTrie).Copy(), nil - } - } - } - } tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb) if err != nil { @@ -280,46 +203,10 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre return tr, nil } -func (db *cachingDB) CacheAccount(root common.Hash, t Trie) { - if db.accountTrieCache == nil { - return - } - tr := t.(*trie.SecureTrie) - db.accountTrieCache.Add(root, tr.ResetCopy()) -} - -func (db *cachingDB) CacheStorage(addrHash common.Hash, root common.Hash, t Trie) { - if db.storageTrieCache == nil { - return - } - tr := t.(*trie.SecureTrie) - if tries, exist := db.storageTrieCache.Get(addrHash); exist { - triesArray := tries.([3]*triePair) - newTriesArray := [3]*triePair{ - {root: root, trie: tr.ResetCopy()}, - triesArray[0], - triesArray[1], - } - db.storageTrieCache.Add(addrHash, newTriesArray) - } else { - triesArray := [3]*triePair{{root: root, trie: tr.ResetCopy()}, nil, nil} - db.storageTrieCache.Add(addrHash, triesArray) - } -} - func (db *cachingDB) NoTries() bool { return db.noTries } -func (db *cachingDB) Purge() { - if db.storageTrieCache != nil { - db.storageTrieCache.Purge() - } - if db.accountTrieCache != nil { - db.accountTrieCache.Purge() - } -} - // CopyTrie returns an independent copy of the given trie. func (db *cachingDB) CopyTrie(t Trie) Trie { if t == nil { diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go index c8e1a181eb6f..73cc22490b6e 100644 --- a/core/state/iterator_test.go +++ b/core/state/iterator_test.go @@ -26,9 +26,14 @@ import ( // Tests that the node iterator indeed walks over the entire database contents. func TestNodeIteratorCoverage(t *testing.T) { + testNodeIteratorCoverage(t, rawdb.HashScheme) + testNodeIteratorCoverage(t, rawdb.PathScheme) +} + +func testNodeIteratorCoverage(t *testing.T, scheme string) { // Create some arbitrary test state to iterate - db, sdb, root, _ := makeTestState() - sdb.TrieDB().Commit(root, false) + db, sdb, ndb, root, _ := makeTestState(scheme) + ndb.Commit(root, false) state, err := New(root, sdb, nil) if err != nil { @@ -48,7 +53,7 @@ func TestNodeIteratorCoverage(t *testing.T) { ) it := db.NewIterator(nil, nil) for it.Next() { - ok, hash := isTrieNode(sdb.TrieDB().Scheme(), it.Key(), it.Value()) + ok, hash := isTrieNode(scheme, it.Key(), it.Value()) if !ok { continue } @@ -90,11 +95,11 @@ func isTrieNode(scheme string, key, val []byte) (bool, common.Hash) { return true, common.BytesToHash(key) } } else { - ok, _ := rawdb.IsAccountTrieNode(key) + ok := rawdb.IsAccountTrieNode(key) if ok { return true, crypto.Keccak256Hash(val) } - ok, _, _ = rawdb.IsStorageTrieNode(key) + ok = rawdb.IsStorageTrieNode(key) if ok { return true, crypto.Keccak256Hash(val) } diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index a84caa5c4901..9d47b7020ab0 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -101,13 +101,16 @@ func NewPruner(db ethdb.Database, config Config, triesInMemory uint64) (*Pruner, if headBlock == nil { return nil, errors.New("failed to load head block") } + // Offline pruning is only supported in legacy hash based scheme. + triedb := trie.NewDatabase(db, trie.HashDefaults) + snapconfig := snapshot.Config{ CacheSize: 256, Recovery: false, NoBuild: true, AsyncBuild: false, } - snaptree, err := snapshot.New(snapconfig, db, trie.NewDatabase(db), headBlock.Root(), int(triesInMemory), false) + snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root(), int(triesInMemory), false) if err != nil { return nil, err // The relevant snapshot(s) might not exist } @@ -685,7 +688,9 @@ func RecoverPruning(datadir string, db ethdb.Database, triesInMemory uint64) err NoBuild: true, AsyncBuild: false, } - snaptree, err := snapshot.New(snapconfig, db, trie.NewDatabase(db), headBlock.Root(), int(triesInMemory), false) + // Offline pruning is only supported in legacy hash based scheme. + triedb := trie.NewDatabase(db, trie.HashDefaults) + snaptree, err := snapshot.New(snapconfig, db, triedb, headBlock.Root(), int(triesInMemory), false) if err != nil { return err // The relevant snapshot(s) might not exist } @@ -727,7 +732,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if genesis == nil { return errors.New("missing genesis block") } - t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db)) + t, err := trie.NewStateTrie(trie.StateTrieID(genesis.Root()), trie.NewDatabase(db, trie.HashDefaults)) if err != nil { return err } @@ -751,7 +756,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { } if acc.Root != types.EmptyRootHash { id := trie.StorageTrieID(genesis.Root(), common.BytesToHash(accIter.LeafKey()), acc.Root) - storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db)) + storageTrie, err := trie.NewStateTrie(id, trie.NewDatabase(db, trie.HashDefaults)) if err != nil { return err } diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index b1ae98758113..fbd83e3b5231 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -356,7 +356,8 @@ func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefi var resolver trie.NodeResolver if len(result.keys) > 0 { mdb := rawdb.NewMemoryDatabase() - tdb := trie.NewDatabase(mdb) + tdb := trie.NewDatabase(mdb, trie.HashDefaults) + defer tdb.Close() snapTrie := trie.NewEmpty(tdb) for i, key := range result.keys { snapTrie.Update(key, result.vals[i]) diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index c503676241f9..07016b675ce8 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -30,6 +30,8 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/trie/trienode" "golang.org/x/crypto/sha3" ) @@ -45,10 +47,15 @@ func hashData(input []byte) common.Hash { // Tests that snapshot generation from an empty database. func TestGeneration(t *testing.T) { + testGeneration(t, rawdb.HashScheme) + testGeneration(t, rawdb.PathScheme) +} + +func testGeneration(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - var helper = newHelper() + var helper = newHelper(scheme) stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false) helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) @@ -79,10 +86,15 @@ func TestGeneration(t *testing.T) { // Tests that snapshot generation with existent flat state. func TestGenerateExistentState(t *testing.T) { + testGenerateExistentState(t, rawdb.HashScheme) + testGenerateExistentState(t, rawdb.PathScheme) +} + +func testGenerateExistentState(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - var helper = newHelper() + var helper = newHelper(scheme) stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) @@ -148,9 +160,15 @@ type testHelper struct { nodes *trienode.MergedNodeSet } -func newHelper() *testHelper { +func newHelper(scheme string) *testHelper { diskdb := rawdb.NewMemoryDatabase() - triedb := trie.NewDatabase(diskdb) + config := &trie.Config{} + if scheme == rawdb.PathScheme { + config.PathDB = &pathdb.Config{} // disable caching + } else { + config.HashDB = &hashdb.Config{} // disable caching + } + triedb := trie.NewDatabase(diskdb, config) accTrie, _ := trie.NewStateTrie(trie.StateTrieID(types.EmptyRootHash), triedb) return &testHelper{ diskdb: diskdb, @@ -233,7 +251,12 @@ func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) { // - extra slots in the middle // - extra slots in the end func TestGenerateExistentStateWithWrongStorage(t *testing.T) { - helper := newHelper() + testGenerateExistentStateWithWrongStorage(t, rawdb.HashScheme) + testGenerateExistentStateWithWrongStorage(t, rawdb.PathScheme) +} + +func testGenerateExistentStateWithWrongStorage(t *testing.T, scheme string) { + helper := newHelper(scheme) // Account one, empty root but non-empty database helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) @@ -325,7 +348,12 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { // - wrong accounts // - extra accounts func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { - helper := newHelper() + testGenerateExistentStateWithWrongAccounts(t, rawdb.HashScheme) + testGenerateExistentStateWithWrongAccounts(t, rawdb.PathScheme) +} + +func testGenerateExistentStateWithWrongAccounts(t *testing.T, scheme string) { + helper := newHelper(scheme) helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -380,10 +408,15 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { // Tests that snapshot generation errors out correctly in case of a missing trie // node in the account trie. func TestGenerateCorruptAccountTrie(t *testing.T) { + testGenerateCorruptAccountTrie(t, rawdb.HashScheme) + testGenerateCorruptAccountTrie(t, rawdb.PathScheme) +} + +func testGenerateCorruptAccountTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // without any storage slots to keep the test smaller. - helper := newHelper() + helper := newHelper(scheme) helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074 helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 @@ -391,9 +424,11 @@ func TestGenerateCorruptAccountTrie(t *testing.T) { root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978 - // Delete an account trie leaf and ensure the generator chokes - helper.triedb.Commit(root, false) - helper.diskdb.Delete(common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7").Bytes()) + // Delete an account trie node and ensure the generator chokes + targetPath := []byte{0xc} + targetHash := common.HexToHash("0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7") + + rawdb.DeleteTrieNode(helper.diskdb, common.Hash{}, targetPath, targetHash, scheme) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) select { @@ -414,11 +449,19 @@ func TestGenerateCorruptAccountTrie(t *testing.T) { // trie node for a storage trie. It's similar to internal corruption but it is // handled differently inside the generator. func TestGenerateMissingStorageTrie(t *testing.T) { + testGenerateMissingStorageTrie(t, rawdb.HashScheme) + testGenerateMissingStorageTrie(t, rawdb.PathScheme) +} + +func testGenerateMissingStorageTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - helper := newHelper() - + var ( + acc1 = hashData([]byte("acc-1")) + acc3 = hashData([]byte("acc-3")) + helper = newHelper(scheme) + ) stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 @@ -427,8 +470,9 @@ func TestGenerateMissingStorageTrie(t *testing.T) { root := helper.Commit() - // Delete a storage trie root and ensure the generator chokes - helper.diskdb.Delete(stRoot.Bytes()) + // Delete storage trie root of account one and three. + rawdb.DeleteTrieNode(helper.diskdb, acc1, nil, stRoot, scheme) + rawdb.DeleteTrieNode(helper.diskdb, acc3, nil, stRoot, scheme) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) select { @@ -448,10 +492,15 @@ func TestGenerateMissingStorageTrie(t *testing.T) { // Tests that snapshot generation errors out correctly in case of a missing trie // node in a storage trie. func TestGenerateCorruptStorageTrie(t *testing.T) { + testGenerateCorruptStorageTrie(t, rawdb.HashScheme) + testGenerateCorruptStorageTrie(t, rawdb.PathScheme) +} + +func testGenerateCorruptStorageTrie(t *testing.T, scheme string) { // We can't use statedb to make a test trie (circular dependency), so make // a fake one manually. We're going with a small account trie of 3 accounts, // two of which also has the same 3-slot storage trie attached. - helper := newHelper() + helper := newHelper(scheme) stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e @@ -461,8 +510,11 @@ func TestGenerateCorruptStorageTrie(t *testing.T) { root := helper.Commit() - // Delete a storage trie leaf and ensure the generator chokes - helper.diskdb.Delete(common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371").Bytes()) + // Delete a node in the storage trie. + targetPath := []byte{0x4} + targetHash := common.HexToHash("0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371") + rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-1")), targetPath, targetHash, scheme) + rawdb.DeleteTrieNode(helper.diskdb, hashData([]byte("acc-3")), targetPath, targetHash, scheme) snap := generateSnapshot(helper.diskdb, helper.triedb, 16, root) select { @@ -481,7 +533,12 @@ func TestGenerateCorruptStorageTrie(t *testing.T) { // Tests that snapshot generation when an extra account with storage exists in the snap state. func TestGenerateWithExtraAccounts(t *testing.T) { - helper := newHelper() + testGenerateWithExtraAccounts(t, rawdb.HashScheme) + testGenerateWithExtraAccounts(t, rawdb.PathScheme) +} + +func testGenerateWithExtraAccounts(t *testing.T, scheme string) { + helper := newHelper(scheme) { // Account one in the trie stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), @@ -549,10 +606,15 @@ func enableLogging() { // Tests that snapshot generation when an extra account with storage exists in the snap state. func TestGenerateWithManyExtraAccounts(t *testing.T) { + testGenerateWithManyExtraAccounts(t, rawdb.HashScheme) + testGenerateWithManyExtraAccounts(t, rawdb.PathScheme) +} + +func testGenerateWithManyExtraAccounts(t *testing.T, scheme string) { if false { enableLogging() } - helper := newHelper() + helper := newHelper(scheme) { // Account one in the trie stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), @@ -605,11 +667,16 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { // So in trie, we iterate 2 entries 0x03, 0x07. We create the 0x07 in the database and abort the procedure, because the trie is exhausted. // But in the database, we still have the stale storage slots 0x04, 0x05. They are not iterated yet, but the procedure is finished. func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { + testGenerateWithExtraBeforeAndAfter(t, rawdb.HashScheme) + testGenerateWithExtraBeforeAndAfter(t, rawdb.PathScheme) +} + +func testGenerateWithExtraBeforeAndAfter(t *testing.T, scheme string) { accountCheckRange = 3 if false { enableLogging() } - helper := newHelper() + helper := newHelper(scheme) { acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) @@ -642,11 +709,16 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { // TestGenerateWithMalformedSnapdata tests what happes if we have some junk // in the snapshot database, which cannot be parsed back to an account func TestGenerateWithMalformedSnapdata(t *testing.T) { + testGenerateWithMalformedSnapdata(t, rawdb.HashScheme) + testGenerateWithMalformedSnapdata(t, rawdb.PathScheme) +} + +func testGenerateWithMalformedSnapdata(t *testing.T, scheme string) { accountCheckRange = 3 if false { enableLogging() } - helper := newHelper() + helper := newHelper(scheme) { acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) @@ -679,10 +751,15 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) { } func TestGenerateFromEmptySnap(t *testing.T) { + testGenerateFromEmptySnap(t, rawdb.HashScheme) + testGenerateFromEmptySnap(t, rawdb.PathScheme) +} + +func testGenerateFromEmptySnap(t *testing.T, scheme string) { //enableLogging() accountCheckRange = 10 storageCheckRange = 20 - helper := newHelper() + helper := newHelper(scheme) // Add 1K accounts to the trie for i := 0; i < 400; i++ { stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -714,8 +791,13 @@ func TestGenerateFromEmptySnap(t *testing.T) { // This hits a case where the snap verification passes, but there are more elements in the trie // which we must also add. func TestGenerateWithIncompleteStorage(t *testing.T) { + testGenerateWithIncompleteStorage(t, rawdb.HashScheme) + testGenerateWithIncompleteStorage(t, rawdb.PathScheme) +} + +func testGenerateWithIncompleteStorage(t *testing.T, scheme string) { storageCheckRange = 4 - helper := newHelper() + helper := newHelper(scheme) stKeys := []string{"1", "2", "3", "4", "5", "6", "7", "8"} stVals := []string{"v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8"} // We add 8 accounts, each one is missing exactly one of the storage slots. This means @@ -813,7 +895,12 @@ func populateDangling(disk ethdb.KeyValueStore) { // // This test will populate some dangling storages to see if they can be cleaned up. func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { - var helper = newHelper() + testGenerateCompleteSnapshotWithDanglingStorage(t, rawdb.HashScheme) + testGenerateCompleteSnapshotWithDanglingStorage(t, rawdb.PathScheme) +} + +func testGenerateCompleteSnapshotWithDanglingStorage(t *testing.T, scheme string) { + var helper = newHelper(scheme) stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) @@ -848,7 +935,12 @@ func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { // // This test will populate some dangling storages to see if they can be cleaned up. func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { - var helper = newHelper() + testGenerateBrokenSnapshotWithDanglingStorage(t, rawdb.HashScheme) + testGenerateBrokenSnapshotWithDanglingStorage(t, rawdb.PathScheme) +} + +func testGenerateBrokenSnapshotWithDanglingStorage(t *testing.T, scheme string) { + var helper = newHelper(scheme) stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) diff --git a/core/state/state_object.go b/core/state/state_object.go index fc3c2990b854..704216c988db 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -150,17 +150,17 @@ func (s *stateObject) touch() { func (s *stateObject) getTrie() (Trie, error) { if s.trie == nil { // Try fetching from prefetcher first - if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { - // When the miner is creating the pending state, there is no prefetcher - s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) - } - if s.trie == nil { - tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root) - if err != nil { - return nil, err - } - s.trie = tr + // if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { + // When the miner is creating the pending state, there is no prefetcher + // s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) + // } + // if s.trie == nil { + tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root) + if err != nil { + return nil, err } + s.trie = tr + // } } return s.trie, nil } @@ -470,9 +470,6 @@ func (s *stateObject) commit() (*trienode.NodeSet, error) { return nil, err } s.data.Root = root - if s.data.Root != types.EmptyRootHash { - s.db.db.CacheStorage(s.addrHash, s.data.Root, s.trie) - } // Update original account data after commit s.origin = s.data.Copy() diff --git a/core/state/statedb.go b/core/state/statedb.go index a7c6d5e91a04..18275735e80f 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -43,7 +43,12 @@ import ( "github.com/ethereum/go-ethereum/trie/triestate" ) -const defaultNumOfSlots = 100 +const ( + defaultNumOfSlots = 100 + // storageDeleteLimit denotes the highest permissible memory allocation + // employed for contract storage deletion. + storageDeleteLimit = 512 * 1024 * 1024 +) type revision struct { id int @@ -1358,63 +1363,134 @@ func (s *StateDB) clearJournalAndRefund() { s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries } -// deleteStorage iterates the storage trie belongs to the account and mark all -// slots inside as deleted. -func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { - start := time.Now() +// fastDeleteStorage is the function that efficiently deletes the storage trie +// of a specific account. It leverages the associated state snapshot for fast +// storage iteration and constructs trie node deletion markers by creating +// stack trie with iterated slots. +func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { + iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{}) + if err != nil { + return false, 0, nil, nil, err + } + defer iter.Release() + + var ( + size common.StorageSize + nodes = trienode.NewNodeSet(addrHash) + slots = make(map[common.Hash][]byte) + ) + stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { + nodes.AddNode(path, trienode.NewDeleted()) + size += common.StorageSize(len(path)) + }) + for iter.Next() { + if size > storageDeleteLimit { + return true, size, nil, nil, nil + } + slot := common.CopyBytes(iter.Slot()) + if err := iter.Error(); err != nil { // error might occur after Slot function + return false, 0, nil, nil, err + } + size += common.StorageSize(common.HashLength + len(slot)) + slots[iter.Hash()] = slot + + if err := stack.Update(iter.Hash().Bytes(), slot); err != nil { + return false, 0, nil, nil, err + } + } + if err := iter.Error(); err != nil { // error might occur during iteration + return false, 0, nil, nil, err + } + if stack.Hash() != root { + return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash()) + } + return false, size, slots, nodes, nil +} + +// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage," +// employed when the associated state snapshot is not available. It iterates the +// storage slots along with all internal trie nodes via trie directly. +func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root) if err != nil { - return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) + return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) } // skip deleting storages for EmptyTrie if _, ok := tr.(*trie.EmptyTrie); ok { - return false, nil, nil, nil + return false, 0, nil, nil, nil } it, err := tr.NodeIterator(nil) if err != nil { - return false, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) + return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) } var ( - set = trienode.NewNodeSet(addrHash) - slots = make(map[common.Hash][]byte) - stateSize common.StorageSize - nodeSize common.StorageSize + size common.StorageSize + nodes = trienode.NewNodeSet(addrHash) + slots = make(map[common.Hash][]byte) ) for it.Next(true) { - // arbitrary stateSize limit, make it configurable - if stateSize+nodeSize > 512*1024*1024 { - log.Info("Skip large storage deletion", "address", addr.Hex(), "states", stateSize, "nodes", nodeSize) - if metrics.EnabledExpensive { - slotDeletionSkip.Inc(1) - } - return true, nil, nil, nil + if size > storageDeleteLimit { + return true, size, nil, nil, nil } if it.Leaf() { slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob()) - stateSize += common.StorageSize(common.HashLength + len(it.LeafBlob())) + size += common.StorageSize(common.HashLength + len(it.LeafBlob())) continue } if it.Hash() == (common.Hash{}) { continue } - nodeSize += common.StorageSize(len(it.Path())) - set.AddNode(it.Path(), trienode.NewDeleted()) + size += common.StorageSize(len(it.Path())) + nodes.AddNode(it.Path(), trienode.NewDeleted()) } if err := it.Error(); err != nil { + return false, 0, nil, nil, err + } + return false, size, slots, nodes, nil +} + +// deleteStorage is designed to delete the storage trie of a designated account. +// It could potentially be terminated if the storage size is excessively large, +// potentially leading to an out-of-memory panic. The function will make an attempt +// to utilize an efficient strategy if the associated state snapshot is reachable; +// otherwise, it will resort to a less-efficient approach. +func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { + var ( + start = time.Now() + err error + aborted bool + size common.StorageSize + slots map[common.Hash][]byte + nodes *trienode.NodeSet + ) + // The fast approach can be failed if the snapshot is not fully + // generated, or it's internally corrupted. Fallback to the slow + // one just in case. + if s.snap != nil { + aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root) + } + if s.snap == nil || err != nil { + aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root) + } + if err != nil { return false, nil, nil, err } if metrics.EnabledExpensive { - if int64(len(slots)) > slotDeletionMaxCount.Value() { - slotDeletionMaxCount.Update(int64(len(slots))) + if aborted { + slotDeletionSkip.Inc(1) } - if int64(stateSize+nodeSize) > slotDeletionMaxSize.Value() { - slotDeletionMaxSize.Update(int64(stateSize + nodeSize)) + n := int64(len(slots)) + if n > slotDeletionMaxCount.Value() { + slotDeletionMaxCount.Update(n) + } + if int64(size) > slotDeletionMaxSize.Value() { + slotDeletionMaxSize.Update(int64(size)) } slotDeletionTimer.UpdateSince(start) - slotDeletionCount.Mark(int64(len(slots))) - slotDeletionSize.Mark(int64(stateSize + nodeSize)) + slotDeletionCount.Mark(n) + slotDeletionSize.Mark(int64(size)) } - return false, slots, set, nil + return aborted, slots, nodes, nil } // handleDestruction processes all destruction markers and deletes the account @@ -1631,9 +1707,6 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc return err } } - if root != types.EmptyRootHash { - s.db.CacheAccount(root, s.trie) - } origin := s.originalRoot if origin == (common.Hash{}) { @@ -1748,6 +1821,7 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc go commmitTrie() } else { defer s.StopPrefetcher() + commitFuncs = append(commitFuncs, commmitTrie) } commitRes := make(chan error, len(commitFuncs)) for _, f := range commitFuncs { @@ -1763,10 +1837,6 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc return common.Hash{}, nil, r } } - // commitFuncs[1] and commmitTrie concurrent map `storages` iteration and map write - if err := commmitTrie(); err != nil { - return common.Hash{}, nil, err - } root := s.stateRoot if s.pipeCommit { @@ -1777,7 +1847,20 @@ func (s *StateDB) Commit(block uint64, failPostCommitFunc func(), postCommitFunc if root == (common.Hash{}) { root = types.EmptyRootHash } - + //origin := s.originalRoot + //if origin == (common.Hash{}) { + // origin = types.EmptyRootHash + //} + //if root != origin { + // start := time.Now() + // if err := s.db.TrieDB().Update(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil { + // return common.Hash{}, nil, err + // } + // s.originalRoot = root + // if metrics.EnabledExpensive { + // s.TrieDBCommits += time.Since(start) + // } + //} // Clear all internal flags at the end of commit operation. s.accounts = make(map[common.Hash][]byte) s.storages = make(map[common.Hash]map[common.Hash][]byte) diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go index e877c1a736e7..07a27d7b7a10 100644 --- a/core/state/statedb_fuzz_test.go +++ b/core/state/statedb_fuzz_test.go @@ -31,11 +31,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/firehose" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/trie/triestate" ) @@ -180,16 +182,28 @@ func (test *stateTest) run() bool { storageList = append(storageList, copy2DSet(states.Storages)) } disk = rawdb.NewMemoryDatabase() - tdb = trie.NewDatabaseWithConfig(disk, &trie.Config{OnCommit: onCommit}) + tdb = trie.NewDatabase(disk, &trie.Config{OnCommit: onCommit, PathDB: pathdb.Defaults}) sdb = NewDatabaseWithNodeDB(disk, tdb) byzantium = rand.Intn(2) == 0 ) + defer disk.Close() + defer tdb.Close() + + var snaps *snapshot.Tree + if rand.Intn(3) == 0 { + snaps, _ = snapshot.New(snapshot.Config{ + CacheSize: 1, + Recovery: false, + NoBuild: false, + AsyncBuild: false, + }, disk, tdb, types.EmptyRootHash, 128, false) + } for i, actions := range test.actions { root := types.EmptyRootHash if i != 0 { root = roots[len(roots)-1] } - state, err := New(root, sdb, nil) + state, err := New(root, sdb, snaps) if err != nil { panic(err) } @@ -366,8 +380,7 @@ func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Datab return nil } -// TODO(Nathan): enable this case after enabling pbss -func testStateChanges(t *testing.T) { +func TestStateChanges(t *testing.T) { config := &quick.Config{MaxCount: 1000} err := quick.Check((*stateTest).run, config) if cerr, ok := err.(*quick.CheckError); ok { diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index b53739dcf7d0..b2fad5292adb 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -19,7 +19,6 @@ package state import ( "bytes" "encoding/binary" - "errors" "fmt" "math" "math/big" @@ -37,14 +36,21 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/firehose" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/holiman/uint256" ) // Tests that updating a state trie does not leak any database writes prior to // actually committing the state. func TestUpdateLeaks(t *testing.T) { // Create an empty state database - db := rawdb.NewMemoryDatabase() - state, _ := New(types.EmptyRootHash, NewDatabase(db), nil) + var ( + db = rawdb.NewMemoryDatabase() + tdb = trie.NewDatabase(db, nil) + ) + state, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(db, tdb), nil) // Update it with some accounts for i := byte(0); i < 255; i++ { @@ -60,7 +66,7 @@ func TestUpdateLeaks(t *testing.T) { } root := state.IntermediateRoot(false) - if err := state.Database().TrieDB().Commit(root, false); err != nil { + if err := tdb.Commit(root, false); err != nil { t.Errorf("can not commit trie %v to persistent database", root.Hex()) } @@ -78,8 +84,10 @@ func TestIntermediateLeaks(t *testing.T) { // Create two state databases, one transitioning to the final state, the other final from the beginning transDb := rawdb.NewMemoryDatabase() finalDb := rawdb.NewMemoryDatabase() - transState, _ := New(types.EmptyRootHash, NewDatabase(transDb), nil) - finalState, _ := New(types.EmptyRootHash, NewDatabase(finalDb), nil) + transNdb := trie.NewDatabase(transDb, nil) + finalNdb := trie.NewDatabase(finalDb, nil) + transState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(transDb, transNdb), nil) + finalState, _ := New(types.EmptyRootHash, NewDatabaseWithNodeDB(finalDb, finalNdb), nil) modify := func(state *StateDB, addr common.Address, i, tweak byte) { state.SetBalance(addr, big.NewInt(int64(11*i)+int64(tweak)), firehose.NoOpContext, "test") @@ -113,7 +121,7 @@ func TestIntermediateLeaks(t *testing.T) { if err != nil { t.Fatalf("failed to commit transition state: %v", err) } - if err = transState.Database().TrieDB().Commit(transRoot, false); err != nil { + if err = transNdb.Commit(transRoot, false); err != nil { t.Errorf("can not commit trie %v to persistent database", transRoot.Hex()) } @@ -123,7 +131,7 @@ func TestIntermediateLeaks(t *testing.T) { if err != nil { t.Fatalf("failed to commit final state: %v", err) } - if err = finalState.Database().TrieDB().Commit(finalRoot, false); err != nil { + if err = finalNdb.Commit(finalRoot, false); err != nil { t.Errorf("can not commit trie %v to persistent database", finalRoot.Hex()) } @@ -712,9 +720,6 @@ func TestCommitCopy(t *testing.T) { if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("unexpected storage slot: have %x", val) } - if !errors.Is(copied.Error(), trie.ErrCommitted) { - t.Fatalf("unexpected state error, %v", copied.Error()) - } } // TestDeleteCreateRevert tests a weird state transition corner case that we hit @@ -760,9 +765,28 @@ func TestDeleteCreateRevert(t *testing.T) { // the Commit operation fails with an error // If we are missing trie nodes, we should not continue writing to the trie func TestMissingTrieNodes(t *testing.T) { + testMissingTrieNodes(t, rawdb.HashScheme) + testMissingTrieNodes(t, rawdb.PathScheme) +} + +func testMissingTrieNodes(t *testing.T, scheme string) { // Create an initial state with a few accounts - memDb := rawdb.NewMemoryDatabase() - db := NewDatabase(memDb) + var ( + triedb *trie.Database + memDb = rawdb.NewMemoryDatabase() + ) + if scheme == rawdb.PathScheme { + triedb = trie.NewDatabase(memDb, &trie.Config{PathDB: &pathdb.Config{ + CleanCacheSize: 0, + DirtyCacheSize: 0, + }}) // disable caching + } else { + triedb = trie.NewDatabase(memDb, &trie.Config{HashDB: &hashdb.Config{ + CleanCacheSize: 0, + }}) // disable caching + } + db := NewDatabaseWithNodeDB(memDb, triedb) + var root common.Hash state, _ := New(types.EmptyRootHash, db, nil) addr := common.BytesToAddress([]byte("so")) @@ -777,7 +801,7 @@ func TestMissingTrieNodes(t *testing.T) { root, _, _ = state.Commit(0, nil) t.Logf("root: %x", root) // force-flush - state.Database().TrieDB().Cap(0) + triedb.Commit(root, false) } // Create a new state on the old root state, _ = New(root, db, nil) @@ -986,7 +1010,8 @@ func TestFlushOrderDataLoss(t *testing.T) { // Create a state trie with many accounts and slots var ( memdb = rawdb.NewMemoryDatabase() - statedb = NewDatabase(memdb) + triedb = trie.NewDatabase(memdb, nil) + statedb = NewDatabaseWithNodeDB(memdb, triedb) state, _ = New(types.EmptyRootHash, statedb, nil) ) for a := byte(0); a < 10; a++ { @@ -1000,11 +1025,11 @@ func TestFlushOrderDataLoss(t *testing.T) { if err != nil { t.Fatalf("failed to commit state trie: %v", err) } - statedb.TrieDB().Reference(root, common.Hash{}) - if err := statedb.TrieDB().Cap(1024); err != nil { + triedb.Reference(root, common.Hash{}) + if err := triedb.Cap(1024); err != nil { t.Fatalf("failed to cap trie dirty cache: %v", err) } - if err := statedb.TrieDB().Commit(root, false); err != nil { + if err := triedb.Commit(root, false); err != nil { t.Fatalf("failed to commit state trie: %v", err) } // Reopen the state trie from flushed disk and verify it @@ -1058,7 +1083,7 @@ func TestStateDBTransientStorage(t *testing.T) { func TestResetObject(t *testing.T) { var ( disk = rawdb.NewMemoryDatabase() - tdb = trie.NewDatabase(disk) + tdb = trie.NewDatabase(disk, nil) db = NewDatabaseWithNodeDB(disk, tdb) snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash, 128, false) state, _ = New(types.EmptyRootHash, db, snaps) @@ -1090,3 +1115,57 @@ func TestResetObject(t *testing.T) { t.Fatalf("Unexpected storage slot value %v", slot) } } + +func TestDeleteStorage(t *testing.T) { + var ( + disk = rawdb.NewMemoryDatabase() + tdb = trie.NewDatabase(disk, nil) + db = NewDatabaseWithNodeDB(disk, tdb) + snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, types.EmptyRootHash, 128, false) + state, _ = New(types.EmptyRootHash, db, snaps) + addr = common.HexToAddress("0x1") + ) + // Initialize account and populate storage + state.SetBalance(addr, big.NewInt(1)) + state.CreateAccount(addr) + for i := 0; i < 1000; i++ { + slot := common.Hash(uint256.NewInt(uint64(i)).Bytes32()) + value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32()) + state.SetState(addr, slot, value) + } + root, _, _ := state.Commit(0, nil) + // Init phase done, create two states, one with snap and one without + fastState, _ := New(root, db, snaps) + slowState, _ := New(root, db, nil) + + obj := fastState.GetOrNewStateObject(addr) + storageRoot := obj.data.Root + + _, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) + if err != nil { + t.Fatal(err) + } + + _, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) + if err != nil { + t.Fatal(err) + } + check := func(set *trienode.NodeSet) string { + var a []string + set.ForEachWithOrder(func(path string, n *trienode.Node) { + if n.Hash != (common.Hash{}) { + t.Fatal("delete should have empty hashes") + } + if len(n.Blob) != 0 { + t.Fatal("delete should have have empty blobs") + } + a = append(a, fmt.Sprintf("%x", path)) + }) + return strings.Join(a, ",") + } + slowRes := check(slowNodes) + fastRes := check(fastNodes) + if slowRes != fastRes { + t.Fatalf("difference found:\nfast: %v\nslow: %v\n", fastRes, slowRes) + } +} diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 608f9b77d8c0..6c566be48d89 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -29,6 +29,8 @@ import ( "github.com/ethereum/go-ethereum/firehose" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // testAccount is the data associated with an account used by the state tests. @@ -40,10 +42,17 @@ type testAccount struct { } // makeTestState create a sample test state to test node-wise reconstruction. -func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { +func makeTestState(scheme string) (ethdb.Database, Database, *trie.Database, common.Hash, []*testAccount) { // Create an empty state + config := &trie.Config{Preimages: true} + if scheme == rawdb.PathScheme { + config.PathDB = pathdb.Defaults + } else { + config.HashDB = hashdb.Defaults + } db := rawdb.NewMemoryDatabase() - sdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) + nodeDb := trie.NewDatabase(db, config) + sdb := NewDatabaseWithNodeDB(db, nodeDb) state, _ := New(types.EmptyRootHash, sdb, nil) // Fill it with some arbitrary data @@ -68,7 +77,6 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { obj.SetState(hash, hash, firehose.NoOpContext) } } - state.updateStateObject(obj) accounts = append(accounts, acc) } state.Finalise(false) @@ -76,18 +84,22 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { root, _, _ := state.Commit(0, nil) // Return the generated state - return db, sdb, root, accounts + return db, sdb, nodeDb, root, accounts } // checkStateAccounts cross references a reconstructed state with an expected // account array. -func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accounts []*testAccount) { +func checkStateAccounts(t *testing.T, db ethdb.Database, scheme string, root common.Hash, accounts []*testAccount) { + var config trie.Config + if scheme == rawdb.PathScheme { + config.PathDB = pathdb.Defaults + } // Check root availability and state contents - state, err := New(root, NewDatabase(db), nil) + state, err := New(root, NewDatabaseWithConfig(db, &config), nil) if err != nil { t.Fatalf("failed to create state trie at %x: %v", root, err) } - if err := checkStateConsistency(db, root); err != nil { + if err := checkStateConsistency(db, scheme, root); err != nil { t.Fatalf("inconsistent state trie at %x: %v", root, err) } for i, acc := range accounts { @@ -104,8 +116,12 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accou } // checkStateConsistency checks that all data of a state root is present. -func checkStateConsistency(db ethdb.Database, root common.Hash) error { - state, err := New(root, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil) +func checkStateConsistency(db ethdb.Database, scheme string, root common.Hash) error { + config := &trie.Config{Preimages: true} + if scheme == rawdb.PathScheme { + config.PathDB = pathdb.Defaults + } + state, err := New(root, NewDatabaseWithConfig(db, config), nil) if err != nil { return err } @@ -117,8 +133,14 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error { // Tests that an empty state is not scheduled for syncing. func TestEmptyStateSync(t *testing.T) { - db := trie.NewDatabase(rawdb.NewMemoryDatabase()) - sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, db.Scheme()) + dbA := trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) + dbB := trie.NewDatabase(rawdb.NewMemoryDatabase(), &trie.Config{PathDB: pathdb.Defaults}) + + sync := NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbA.Scheme()) + if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { + t.Errorf("content requested for empty state: %v, %v, %v", nodes, paths, codes) + } + sync = NewStateSync(types.EmptyRootHash, rawdb.NewMemoryDatabase(), nil, dbB.Scheme()) if paths, nodes, codes := sync.Missing(1); len(paths) != 0 || len(nodes) != 0 || len(codes) != 0 { t.Errorf("content requested for empty state: %v, %v, %v", nodes, paths, codes) } @@ -127,22 +149,28 @@ func TestEmptyStateSync(t *testing.T) { // Tests that given a root hash, a state can sync iteratively on a single thread, // requesting retrieval tasks and returning all of them in one go. func TestIterativeStateSyncIndividual(t *testing.T) { - testIterativeStateSync(t, 1, false, false) + testIterativeStateSync(t, 1, false, false, rawdb.HashScheme) + testIterativeStateSync(t, 1, false, false, rawdb.PathScheme) } func TestIterativeStateSyncBatched(t *testing.T) { - testIterativeStateSync(t, 100, false, false) + testIterativeStateSync(t, 100, false, false, rawdb.HashScheme) + testIterativeStateSync(t, 100, false, false, rawdb.PathScheme) } func TestIterativeStateSyncIndividualFromDisk(t *testing.T) { - testIterativeStateSync(t, 1, true, false) + testIterativeStateSync(t, 1, true, false, rawdb.HashScheme) + testIterativeStateSync(t, 1, true, false, rawdb.PathScheme) } func TestIterativeStateSyncBatchedFromDisk(t *testing.T) { - testIterativeStateSync(t, 100, true, false) + testIterativeStateSync(t, 100, true, false, rawdb.HashScheme) + testIterativeStateSync(t, 100, true, false, rawdb.PathScheme) } func TestIterativeStateSyncIndividualByPath(t *testing.T) { - testIterativeStateSync(t, 1, false, true) + testIterativeStateSync(t, 1, false, true, rawdb.HashScheme) + testIterativeStateSync(t, 1, false, true, rawdb.PathScheme) } func TestIterativeStateSyncBatchedByPath(t *testing.T) { - testIterativeStateSync(t, 100, false, true) + testIterativeStateSync(t, 100, false, true, rawdb.HashScheme) + testIterativeStateSync(t, 100, false, true, rawdb.PathScheme) } // stateElement represents the element in the state trie(bytecode or trie node). @@ -153,17 +181,17 @@ type stateElement struct { syncPath trie.SyncPath } -func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { +func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, scheme string) { // Create a random state to copy - srcDisk, srcDb, srcRoot, srcAccounts := makeTestState() + srcDisk, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) if commit { - srcDb.TrieDB().Commit(srcRoot, false) + ndb.Commit(srcRoot, false) } - srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), srcDb.TrieDB()) + srcTrie, _ := trie.New(trie.StateTrieID(srcRoot), ndb) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) var ( nodeElements []stateElement @@ -178,9 +206,11 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { }) } for i := 0; i < len(codes); i++ { - codeElements = append(codeElements, stateElement{ - code: codes[i], - }) + codeElements = append(codeElements, stateElement{code: codes[i]}) + } + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not existent, %#x", srcRoot) } for len(nodeElements)+len(codeElements) > 0 { var ( @@ -208,7 +238,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { t.Fatalf("failed to decode account on path %x: %v", node.syncPath[0], err) } id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root) - stTrie, err := trie.New(id, srcDb.TrieDB()) + stTrie, err := trie.New(id, ndb) if err != nil { t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err) } @@ -219,7 +249,8 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { nodeResults[i] = trie.NodeSyncResult{Path: node.path, Data: data} } } else { - data, err := srcDb.TrieDB().Node(node.hash) + owner, inner := trie.ResolvePath([]byte(node.path)) + data, err := reader.Node(owner, inner, node.hash) if err != nil { t.Fatalf("failed to retrieve node data for key %v", []byte(node.path)) } @@ -263,18 +294,23 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) { copyPreimages(srcDisk, dstDb) // Cross check that the two states are in sync - checkStateAccounts(t, dstDb, srcRoot, srcAccounts) + checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts) } // Tests that the trie scheduler can correctly reconstruct the state even if only // partial results are returned, and the others sent only later. func TestIterativeDelayedStateSync(t *testing.T) { + testIterativeDelayedStateSync(t, rawdb.HashScheme) + testIterativeDelayedStateSync(t, rawdb.PathScheme) +} + +func testIterativeDelayedStateSync(t *testing.T, scheme string) { // Create a random state to copy - srcDisk, srcDb, srcRoot, srcAccounts := makeTestState() + srcDisk, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) var ( nodeElements []stateElement @@ -289,9 +325,11 @@ func TestIterativeDelayedStateSync(t *testing.T) { }) } for i := 0; i < len(codes); i++ { - codeElements = append(codeElements, stateElement{ - code: codes[i], - }) + codeElements = append(codeElements, stateElement{code: codes[i]}) + } + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not existent, %#x", srcRoot) } for len(nodeElements)+len(codeElements) > 0 { // Sync only half of the scheduled nodes @@ -316,7 +354,8 @@ func TestIterativeDelayedStateSync(t *testing.T) { if len(nodeElements) > 0 { nodeResults := make([]trie.NodeSyncResult, len(nodeElements)/2+1) for i, element := range nodeElements[:len(nodeResults)] { - data, err := srcDb.TrieDB().Node(element.hash) + owner, inner := trie.ResolvePath([]byte(element.path)) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve contract bytecode for %x", element.code) } @@ -356,22 +395,28 @@ func TestIterativeDelayedStateSync(t *testing.T) { copyPreimages(srcDisk, dstDb) // Cross check that the two states are in sync - checkStateAccounts(t, dstDb, srcRoot, srcAccounts) + checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts) } // Tests that given a root hash, a trie can sync iteratively on a single thread, // requesting retrieval tasks and returning all of them in one go, however in a // random order. -func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) } -func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) } +func TestIterativeRandomStateSyncIndividual(t *testing.T) { + testIterativeRandomStateSync(t, 1, rawdb.HashScheme) + testIterativeRandomStateSync(t, 1, rawdb.PathScheme) +} +func TestIterativeRandomStateSyncBatched(t *testing.T) { + testIterativeRandomStateSync(t, 100, rawdb.HashScheme) + testIterativeRandomStateSync(t, 100, rawdb.PathScheme) +} -func testIterativeRandomStateSync(t *testing.T, count int) { +func testIterativeRandomStateSync(t *testing.T, count int, scheme string) { // Create a random state to copy - srcDisk, srcDb, srcRoot, srcAccounts := makeTestState() + srcDisk, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) nodeQueue := make(map[string]stateElement) codeQueue := make(map[common.Hash]struct{}) @@ -386,6 +431,10 @@ func testIterativeRandomStateSync(t *testing.T, count int) { for _, hash := range codes { codeQueue[hash] = struct{}{} } + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not existent, %#x", srcRoot) + } for len(nodeQueue)+len(codeQueue) > 0 { // Fetch all the queued nodes in a random order if len(codeQueue) > 0 { @@ -406,7 +455,8 @@ func testIterativeRandomStateSync(t *testing.T, count int) { if len(nodeQueue) > 0 { results := make([]trie.NodeSyncResult, 0, len(nodeQueue)) for path, element := range nodeQueue { - data, err := srcDb.TrieDB().Node(element.hash) + owner, inner := trie.ResolvePath([]byte(element.path)) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x %v %v", element.hash, []byte(element.path), element.path) } @@ -418,7 +468,6 @@ func testIterativeRandomStateSync(t *testing.T, count int) { } } } - // Feed the retrieved results back and queue new tasks batch := dstDb.NewBatch() if err := sched.Commit(batch); err != nil { t.Fatalf("failed to commit data: %v", err) @@ -444,18 +493,23 @@ func testIterativeRandomStateSync(t *testing.T, count int) { copyPreimages(srcDisk, dstDb) // Cross check that the two states are in sync - checkStateAccounts(t, dstDb, srcRoot, srcAccounts) + checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts) } // Tests that the trie scheduler can correctly reconstruct the state even if only // partial results are returned (Even those randomly), others sent only later. func TestIterativeRandomDelayedStateSync(t *testing.T) { + testIterativeRandomDelayedStateSync(t, rawdb.HashScheme) + testIterativeRandomDelayedStateSync(t, rawdb.PathScheme) +} + +func testIterativeRandomDelayedStateSync(t *testing.T, scheme string) { // Create a random state to copy - srcDisk, srcDb, srcRoot, srcAccounts := makeTestState() + srcDisk, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) nodeQueue := make(map[string]stateElement) codeQueue := make(map[common.Hash]struct{}) @@ -470,6 +524,10 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { for _, hash := range codes { codeQueue[hash] = struct{}{} } + reader, err := ndb.Reader(srcRoot) + if err != nil { + t.Fatalf("state is not existent, %#x", srcRoot) + } for len(nodeQueue)+len(codeQueue) > 0 { // Sync only half of the scheduled nodes, even those in random order if len(codeQueue) > 0 { @@ -498,7 +556,8 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { for path, element := range nodeQueue { delete(nodeQueue, path) - data, err := srcDb.TrieDB().Node(element.hash) + owner, inner := trie.ResolvePath([]byte(element.path)) + data, err := reader.Node(owner, inner, element.hash) if err != nil { t.Fatalf("failed to retrieve node data for %x", element.hash) } @@ -538,14 +597,19 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { copyPreimages(srcDisk, dstDb) // Cross check that the two states are in sync - checkStateAccounts(t, dstDb, srcRoot, srcAccounts) + checkStateAccounts(t, dstDb, ndb.Scheme(), srcRoot, srcAccounts) } // Tests that at any point in time during a sync, only complete sub-tries are in // the database. func TestIncompleteStateSync(t *testing.T) { + testIncompleteStateSync(t, rawdb.HashScheme) + testIncompleteStateSync(t, rawdb.PathScheme) +} + +func testIncompleteStateSync(t *testing.T, scheme string) { // Create a random state to copy - db, srcDb, srcRoot, srcAccounts := makeTestState() + db, srcDb, ndb, srcRoot, srcAccounts := makeTestState(scheme) // isCodeLookup to save some hashing var isCode = make(map[common.Hash]struct{}) @@ -558,14 +622,14 @@ func TestIncompleteStateSync(t *testing.T) { // Create a destination state and sync with the scheduler dstDb := rawdb.NewMemoryDatabase() - sched := NewStateSync(srcRoot, dstDb, nil, srcDb.TrieDB().Scheme()) + sched := NewStateSync(srcRoot, dstDb, nil, ndb.Scheme()) var ( addedCodes []common.Hash addedPaths []string addedHashes []common.Hash ) - reader, err := srcDb.TrieDB().Reader(srcRoot) + reader, err := ndb.Reader(srcRoot) if err != nil { t.Fatalf("state is not available %x", srcRoot) } @@ -652,12 +716,11 @@ func TestIncompleteStateSync(t *testing.T) { for _, node := range addedCodes { val := rawdb.ReadCode(dstDb, node) rawdb.DeleteCode(dstDb, node) - if err := checkStateConsistency(dstDb, srcRoot); err == nil { + if err := checkStateConsistency(dstDb, ndb.Scheme(), srcRoot); err == nil { t.Errorf("trie inconsistency not caught, missing: %x", node) } rawdb.WriteCode(dstDb, node, val) } - scheme := srcDb.TrieDB().Scheme() for i, path := range addedPaths { owner, inner := trie.ResolvePath([]byte(path)) hash := addedHashes[i] @@ -666,7 +729,7 @@ func TestIncompleteStateSync(t *testing.T) { t.Error("missing trie node") } rawdb.DeleteTrieNode(dstDb, owner, inner, hash, scheme) - if err := checkStateConsistency(dstDb, srcRoot); err == nil { + if err := checkStateConsistency(dstDb, scheme, srcRoot); err == nil { t.Errorf("trie inconsistency not caught, missing: %v", path) } rawdb.WriteTrieNode(dstDb, owner, inner, hash, val, scheme) diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go index 2bdddc75b46d..7588a8082c06 100644 --- a/core/state_prefetcher.go +++ b/core/state_prefetcher.go @@ -60,7 +60,9 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c for i := 0; i < prefetchThread; i++ { go func() { newStatedb := statedb.CopyDoPrefetch() - newStatedb.EnableWriteOnSharedStorage() + if header.Number.Uint64() < 33968300 { + newStatedb.EnableWriteOnSharedStorage() + } gaspool := new(GasPool).AddGas(block.GasLimit()) blockContext := NewEVMBlockContext(header, p.bc, nil) evm := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, *cfg, firehose.NoOpContext) @@ -107,7 +109,9 @@ func (p *statePrefetcher) PrefetchMining(txs TransactionsByPriceAndNonce, header go func(startCh <-chan *types.Transaction, stopCh <-chan struct{}) { idx := 0 newStatedb := statedb.CopyDoPrefetch() - newStatedb.EnableWriteOnSharedStorage() + if header.Number.Uint64() < 33968300 { + newStatedb.EnableWriteOnSharedStorage() + } gaspool := new(GasPool).AddGas(gasLimit) blockContext := NewEVMBlockContext(header, p.bc, nil) evm := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg, firehose.NoOpContext) diff --git a/core/state_prefetcher_test.go b/core/state_prefetcher_test.go index 30d90a489899..b7224a0b36e4 100644 --- a/core/state_prefetcher_test.go +++ b/core/state_prefetcher_test.go @@ -1,16 +1,15 @@ package core import ( - "math/big" - "testing" - "time" - "bytes" "context" "errors" "fmt" + "math/big" "runtime/pprof" "strings" + "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" @@ -20,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" "github.com/google/pprof/profile" ) @@ -37,7 +37,8 @@ func TestPrefetchLeaking(t *testing.T) { Alloc: GenesisAlloc{address: {Balance: funds}}, BaseFee: big.NewInt(params.InitialBaseFee), } - genesis = gspec.MustCommit(gendb) + triedb = trie.NewDatabase(gendb, nil) + genesis = gspec.MustCommit(gendb, triedb) signer = types.LatestSigner(gspec.Config) ) blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1, func(i int, block *BlockGen) { @@ -51,7 +52,7 @@ func TestPrefetchLeaking(t *testing.T) { } }) archiveDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(archiveDb) + gspec.MustCommit(archiveDb, triedb) archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) defer archive.Stop() diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 16ddec683ad5..7d9c2d4e92fd 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -319,11 +319,11 @@ func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending // events to the given channel. func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - subs := make([]event.Subscription, len(p.subpools)) - for i, subpool := range p.subpools { + subs := make([]event.Subscription, 0, len(p.subpools)) + for _, subpool := range p.subpools { sub := subpool.SubscribeTransactions(ch) if sub != nil { // sub will be nil when subpool have been shut down - subs[i] = sub + subs = append(subs, sub) } } return p.subs.Track(event.JoinSubscriptions(subs...)) @@ -332,11 +332,11 @@ func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscrip // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending // events to the given channel. func (p *TxPool) SubscribeReannoTxsEvent(ch chan<- core.ReannoTxsEvent) event.Subscription { - subs := make([]event.Subscription, len(p.subpools)) - for i, subpool := range p.subpools { + subs := make([]event.Subscription, 0, len(p.subpools)) + for _, subpool := range p.subpools { sub := subpool.SubscribeReannoTxsEvent(ch) if sub != nil { // sub will be nil when subpool have been shut down - subs[i] = sub + subs = append(subs, sub) } } return p.subs.Track(event.JoinSubscriptions(subs...)) diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go index c5b9f690d801..d2a98ed7bf55 100644 --- a/core/types/hashing_test.go +++ b/core/types/hashing_test.go @@ -39,7 +39,7 @@ func TestDeriveSha(t *testing.T) { t.Fatal(err) } for len(txs) < 1000 { - exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) + exp := types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) got := types.DeriveSha(txs, trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp) @@ -86,7 +86,7 @@ func BenchmarkDeriveSha200(b *testing.B) { b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { - exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) + exp = types.DeriveSha(txs, trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) } }) @@ -107,7 +107,7 @@ func TestFuzzDeriveSha(t *testing.T) { rndSeed := mrand.Int() for i := 0; i < 10; i++ { seed := rndSeed + i - exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) + exp := types.DeriveSha(newDummy(i), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { printList(newDummy(seed)) @@ -135,7 +135,7 @@ func TestDerivableList(t *testing.T) { }, } for i, tc := range tcs[1:] { - exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase()))) + exp := types.DeriveSha(flatList(tc), trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil))) got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil)) if !bytes.Equal(got[:], exp[:]) { t.Fatalf("case %d: got %x exp %x", i, got, exp) diff --git a/eth/api_backend.go b/eth/api_backend.go index e02013107b5c..4e2ad716f1db 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -427,7 +427,7 @@ func (b *EthAPIBackend) StartMining() error { } func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { - return b.eth.StateAtBlock(ctx, block, reexec, base, readOnly, preferDisk) + return b.eth.stateAtBlock(ctx, block, reexec, base, readOnly, preferDisk) } func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { diff --git a/eth/api_debug.go b/eth/api_debug.go index 89a22bc6a4e6..459355ae7813 100644 --- a/eth/api_debug.go +++ b/eth/api_debug.go @@ -323,7 +323,7 @@ func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]c if startBlock.Number().Uint64() >= endBlock.Number().Uint64() { return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64()) } - triedb := api.eth.BlockChain().StateCache().TrieDB() + triedb := api.eth.BlockChain().TrieDB() oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb) if err != nil { diff --git a/eth/backend.go b/eth/backend.go index 012586968d20..133b2f2a86a3 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -63,6 +63,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // Config contains the configuration options of the ETH protocol. @@ -129,7 +130,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice) config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) } - if config.NoPruning && config.TrieDirtyCache > 0 { + // Redistribute memory allocation from in-memory trie node garbage collection + // to other caches when an archive node is requested. + if config.StateScheme == rawdb.HashScheme && config.NoPruning && config.TrieDirtyCache > 0 { if config.SnapshotCache > 0 { config.TrieCleanCache += config.TrieDirtyCache * 3 / 5 config.SnapshotCache += config.TrieDirtyCache * 2 / 5 @@ -138,6 +141,13 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { } config.TrieDirtyCache = 0 } + // Optimize memory distribution by reallocating surplus allowance from the + // dirty cache to the clean cache. + if config.StateScheme == rawdb.PathScheme && config.TrieDirtyCache > pathdb.MaxDirtyBufferSize/1024/1024 { + log.Info("Capped dirty cache size", "provided", common.StorageSize(config.TrieDirtyCache)*1024*1024, "adjusted", common.StorageSize(pathdb.MaxDirtyBufferSize)) + log.Info("Clean cache size", "provided", common.StorageSize(config.TrieCleanCache)*1024*1024) + config.TrieDirtyCache = pathdb.MaxDirtyBufferSize / 1024 / 1024 + } log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) // Assemble the Ethereum object @@ -146,8 +156,10 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } - if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, config.TriesInMemory); err != nil { - log.Error("Failed to recover state", "error", err) + if config.StateScheme == rawdb.HashScheme { + if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, config.TriesInMemory); err != nil { + log.Error("Failed to recover state", "error", err) + } } chainConfig, genesisHash, err := core.LoadChainConfig(chainDb, config.Genesis) if err != nil { @@ -211,6 +223,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { SnapshotLimit: config.SnapshotCache, TriesInMemory: config.TriesInMemory, Preimages: config.Preimages, + StateHistory: config.StateHistory, + StateScheme: config.StateScheme, + PathSyncFlush: config.PathSyncFlush, } ) bcOps := make([]core.BlockChainOption, 0) @@ -234,7 +249,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.OverrideVerkle != nil { overrides.OverrideVerkle = config.OverrideVerkle } - eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit, bcOps...) + eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TransactionHistory, bcOps...) if err != nil { return nil, err } @@ -532,7 +547,7 @@ func (s *Ethereum) StartMining() error { } // If mining is started, we can disable the transaction rejection mechanism // introduced to speed sync times. - s.handler.acceptTxs.Store(true) + s.handler.enableSyncedFeatures() go s.miner.Start() } @@ -566,7 +581,7 @@ func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) IsListening() bool { return true } // Always listening func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader } func (s *Ethereum) Synced() bool { return s.handler.acceptTxs.Load() } -func (s *Ethereum) SetSynced() { s.handler.acceptTxs.Store(true) } +func (s *Ethereum) SetSynced() { s.handler.enableSyncedFeatures() } func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning } func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer } func (s *Ethereum) Merger() *consensus.Merger { return s.merger } diff --git a/eth/downloader/fetchers_concurrent.go b/eth/downloader/fetchers_concurrent.go index 649aa2761596..2e485d028ded 100644 --- a/eth/downloader/fetchers_concurrent.go +++ b/eth/downloader/fetchers_concurrent.go @@ -92,6 +92,10 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { }() ordering := make(map[*eth.Request]int) timeouts := prque.New[int64, *eth.Request](func(data *eth.Request, index int) { + if index < 0 { + delete(ordering, data) + return + } ordering[data] = index }) @@ -245,14 +249,16 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { req.Close() if index, live := ordering[req]; live { - timeouts.Remove(index) - if index == 0 { - if !timeout.Stop() { - <-timeout.C - } - if timeouts.Size() > 0 { - _, exp := timeouts.Peek() - timeout.Reset(time.Until(time.Unix(0, -exp))) + if index >= 0 && index < timeouts.Size() { + timeouts.Remove(index) + if index == 0 { + if !timeout.Stop() { + <-timeout.C + } + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } } } delete(ordering, req) @@ -333,14 +339,16 @@ func (d *Downloader) concurrentFetch(queue typedQueue, beaconMode bool) error { // reschedule the timeout timer. index, live := ordering[res.Req] if live { - timeouts.Remove(index) - if index == 0 { - if !timeout.Stop() { - <-timeout.C - } - if timeouts.Size() > 0 { - _, exp := timeouts.Peek() - timeout.Reset(time.Until(time.Unix(0, -exp))) + if index >= 0 && index < timeouts.Size() { + timeouts.Remove(index) + if index == 0 { + if !timeout.Stop() { + <-timeout.C + } + if timeouts.Size() > 0 { + _, exp := timeouts.Peek() + timeout.Reset(time.Until(time.Unix(0, -exp))) + } } } delete(ordering, res.Req) diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go index d791723b05b6..1bf03411d1d0 100644 --- a/eth/downloader/testchain_test.go +++ b/eth/downloader/testchain_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) // Test chain parameters. @@ -43,7 +44,7 @@ var ( Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, BaseFee: big.NewInt(params.InitialBaseFee), } - testGenesis = testGspec.MustCommit(testDB) + testGenesis = testGspec.MustCommit(testDB, trie.NewDatabase(testDB, trie.HashDefaults)) ) // The common prefix of all test chains: diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 499b181d2843..15ed20d70295 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/parlia" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/eth/downloader" @@ -60,6 +61,9 @@ var Defaults = Config{ SyncMode: downloader.SnapSync, NetworkId: 1, TxLookupLimit: 2350000, + TransactionHistory: 2350000, + StateHistory: params.FullImmutabilityThreshold, + StateScheme: rawdb.HashScheme, LightPeers: 100, DatabaseCache: 512, TrieCleanCache: 154, @@ -114,7 +118,12 @@ type Config struct { PipeCommit bool RangeLimit bool - TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. + // Deprecated, use 'TransactionHistory' instead. + TxLookupLimit uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. + TransactionHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose tx indices are reserved. + StateHistory uint64 `toml:",omitempty"` // The maximum number of blocks from head whose state histories are reserved. + StateScheme string `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top + PathSyncFlush bool `toml:",omitempty"` // State scheme used to store ethereum state and merkle trie nodes on top // RequiredBlocks is a set of block number -> hash mappings which must be in the // canonical chain of all remote peers. Setting the option makes geth verify the diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 88ded21384cf..9752d6fd8a36 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -33,6 +33,9 @@ func (c Config) MarshalTOML() (interface{}, error) { PipeCommit bool RangeLimit bool TxLookupLimit uint64 `toml:",omitempty"` + TransactionHistory uint64 `toml:",omitempty"` + StateHistory uint64 `toml:",omitempty"` + StateScheme string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` LightServ int `toml:",omitempty"` LightIngress int `toml:",omitempty"` @@ -85,6 +88,9 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.PipeCommit = c.PipeCommit enc.RangeLimit = c.RangeLimit enc.TxLookupLimit = c.TxLookupLimit + enc.TransactionHistory = c.TransactionHistory + enc.StateHistory = c.StateHistory + enc.StateScheme = c.StateScheme enc.RequiredBlocks = c.RequiredBlocks enc.LightServ = c.LightServ enc.LightIngress = c.LightIngress @@ -141,6 +147,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { PipeCommit *bool RangeLimit *bool TxLookupLimit *uint64 `toml:",omitempty"` + TransactionHistory *uint64 `toml:",omitempty"` + StateHistory *uint64 `toml:",omitempty"` + StateScheme *string `toml:",omitempty"` RequiredBlocks map[uint64]common.Hash `toml:"-"` LightServ *int `toml:",omitempty"` LightIngress *int `toml:",omitempty"` @@ -228,6 +237,15 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.TxLookupLimit != nil { c.TxLookupLimit = *dec.TxLookupLimit } + if dec.TransactionHistory != nil { + c.TransactionHistory = *dec.TransactionHistory + } + if dec.StateHistory != nil { + c.StateHistory = *dec.StateHistory + } + if dec.StateScheme != nil { + c.StateScheme = *dec.StateScheme + } if dec.RequiredBlocks != nil { c.RequiredBlocks = dec.RequiredBlocks } diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index e620d2b60856..bc0f308c0e13 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -44,7 +44,7 @@ var ( Alloc: core.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, BaseFee: big.NewInt(params.InitialBaseFee), } - genesis = gspec.MustCommit(testdb) + genesis = gspec.MustCommit(testdb, trie.NewDatabase(testdb, trie.HashDefaults)) unknownBlock = types.NewBlock(&types.Header{Root: types.EmptyRootHash, GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil)) ) diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index bf1ff3804f86..dfd988e04e5b 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -86,7 +86,7 @@ func BenchmarkFilters(b *testing.B) { // The test txs are not properly signed, can't simply create a chain // and then import blocks. TODO(rjl493456442) try to get rid of the // manual database writes. - gspec.MustCommit(db) + gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) for i, block := range chain { rawdb.WriteBlock(db, block) @@ -184,7 +184,7 @@ func TestFilters(t *testing.T) { // Hack: GenerateChainWithGenesis creates a new db. // Commit the genesis manually and use GenerateChain. - _, err = gspec.Commit(db, trie.NewDatabase(db)) + _, err = gspec.Commit(db, trie.NewDatabase(db, nil)) if err != nil { t.Fatal(err) } diff --git a/eth/handler.go b/eth/handler.go index 782c930c3c62..e59bbb488404 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -163,6 +163,7 @@ type handler struct { // channels for fetcher, syncer, txsyncLoop quitSync chan struct{} + stopCh chan struct{} chainSync *chainSyncer wg sync.WaitGroup @@ -197,6 +198,7 @@ func newHandler(config *handlerConfig) (*handler, error) { quitSync: make(chan struct{}), handlerDoneCh: make(chan struct{}), handlerStartCh: make(chan struct{}), + stopCh: make(chan struct{}), } if config.Sync == downloader.FullSync { // The database seems empty as the current block is the genesis. Yet the snap @@ -237,8 +239,7 @@ func newHandler(config *handlerConfig) (*handler, error) { // round on next sync cycle if h.snapSync.Load() { log.Info("Snap sync complete, auto disabling") - h.snapSync.Store(false) - } + h.snapSync.Store(false) } // If we've successfully finished a sync cycle, accept transactions from // the network h.acceptTxs.Store(true) @@ -327,7 +328,7 @@ func newHandler(config *handlerConfig) (*handler, error) { } n, err := h.chain.InsertChain(blocks) if err == nil { - h.acceptTxs.Store(true) // Mark initial sync done on any fetcher import + h.enableSyncedFeatures() // Mark initial sync done on any fetcher import } return n, err } @@ -365,6 +366,8 @@ func (h *handler) protoTracker() { <-h.handlerDoneCh } return + case <-h.stopCh: + return } } } @@ -729,6 +732,8 @@ func (h *handler) startMaliciousVoteMonitor() { h.maliciousVoteMonitor.ConflictDetect(event.Vote, pendingBlockNumber) case <-h.voteMonitorSub.Err(): return + case <-h.stopCh: + return } } } @@ -743,7 +748,7 @@ func (h *handler) Stop() { h.voteMonitorSub.Unsubscribe() } } - + close(h.stopCh) // Quit chainSync and txsync64. // After this is done, no new peers will be accepted. close(h.quitSync) @@ -908,10 +913,18 @@ func (h *handler) BroadcastVote(vote *types.VoteEnvelope) { func (h *handler) minedBroadcastLoop() { defer h.wg.Done() - for obj := range h.minedBlockSub.Chan() { - if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok { - h.BroadcastBlock(ev.Block, true) // First propagate block to peers - h.BroadcastBlock(ev.Block, false) // Only then announce to the rest + for { + select { + case obj := <-h.minedBlockSub.Chan(): + if obj == nil { + continue + } + if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok { + h.BroadcastBlock(ev.Block, true) // First propagate block to peers + h.BroadcastBlock(ev.Block, false) // Only then announce to the rest + } + case <-h.stopCh: + return } } } @@ -925,6 +938,8 @@ func (h *handler) txBroadcastLoop() { h.BroadcastTransactions(event.Txs) case <-h.txsSub.Err(): return + case <-h.stopCh: + return } } } @@ -938,6 +953,8 @@ func (h *handler) txReannounceLoop() { h.ReannounceTransactions(event.Txs) case <-h.reannoTxsSub.Err(): return + case <-h.stopCh: + return } } } @@ -956,3 +973,14 @@ func (h *handler) voteBroadcastLoop() { } } } + +// enableSyncedFeatures enables the post-sync functionalities when the initial +// sync is finished. +func (h *handler) enableSyncedFeatures() { + h.acceptTxs.Store(true) + // In the bsc scenario, pathdb.MaxDirtyBufferSize (256MB) will be used. + // The performance is better than DefaultDirtyBufferSize (64MB). + //if h.chain.TrieDB().Scheme() == rawdb.PathScheme { + // h.chain.TrieDB().SetBufferSize(pathdb.DefaultDirtyBufferSize) + //} +} diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 7f51d4f5cdd3..87ed1874e9ed 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" @@ -95,11 +96,15 @@ type TxPool interface { // MakeProtocols constructs the P2P protocol definitions for `eth`. func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { - protocols := make([]p2p.Protocol, len(ProtocolVersions)) - for i, version := range ProtocolVersions { + protocols := make([]p2p.Protocol, 0, len(ProtocolVersions)) + for _, version := range ProtocolVersions { version := version // Closure - protocols[i] = p2p.Protocol{ + // Path scheme does not support GetNodeData, don't advertise eth66 on it + if version <= ETH66 && backend.Chain().TrieDB().Scheme() == rawdb.PathScheme { + continue + } + protocols = append(protocols, p2p.Protocol{ Name: ProtocolName, Version: version, Length: protocolLengths[version], @@ -119,7 +124,7 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2 }, Attributes: []enr.Entry{currentENREntry(backend.Chain())}, DialCandidates: dnsdisc, - } + }) } return protocols } diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 6272ccd84551..f2f8ee2d2b33 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -112,7 +112,7 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, panic(err) } for _, block := range bs { - chain.StateCache().TrieDB().Commit(block.Root(), false) + chain.TrieDB().Commit(block.Root(), false) } txconfig := legacypool.DefaultConfig txconfig.Journal = "" // Don't litter the disk with test journals diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go index f9fbf72b7b1c..cdac0da21a26 100644 --- a/eth/protocols/eth/handlers.go +++ b/eth/protocols/eth/handlers.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -246,6 +247,10 @@ func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { // ServiceGetNodeDataQuery assembles the response to a node data query. It is // exposed to allow external packages to test protocol behavior. func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [][]byte { + // Request nodes by hash is not supported in path-based scheme. + if chain.TrieDB().Scheme() == rawdb.PathScheme { + return nil + } // Gather state data until the fetch or network limits is reached var ( bytes int @@ -257,7 +262,7 @@ func ServiceGetNodeDataQuery(chain *core.BlockChain, query GetNodeDataPacket) [] break } // Retrieve the requested state entry - entry, err := chain.TrieNode(hash) + entry, err := chain.TrieDB().Node(hash) if len(entry) == 0 || err != nil { // Read the contract code with prefix only to save unnecessary lookups. entry, err = chain.ContractCodeWithPrefix(hash) diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go index 1c6d80d354ce..b2fd03766eca 100644 --- a/eth/protocols/snap/handler.go +++ b/eth/protocols/snap/handler.go @@ -284,7 +284,7 @@ func ServiceGetAccountRangeQuery(chain *core.BlockChain, req *GetAccountRangePac req.Bytes = softResponseLimit } // Retrieve the requested state and bail out if non existent - tr, err := trie.New(trie.StateTrieID(req.Root), chain.StateCache().TrieDB()) + tr, err := trie.New(trie.StateTrieID(req.Root), chain.TrieDB()) if err != nil { return nil, nil } @@ -414,7 +414,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP if origin != (common.Hash{}) || (abort && len(storage) > 0) { // Request started at a non-zero hash or was capped prematurely, add // the endpoint Merkle proofs - accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.StateCache().TrieDB()) + accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), chain.TrieDB()) if err != nil { return nil, nil } @@ -423,7 +423,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP return nil, nil } id := trie.StorageTrieID(req.Root, account, acc.Root) - stTrie, err := trie.NewStateTrie(id, chain.StateCache().TrieDB()) + stTrie, err := trie.NewStateTrie(id, chain.TrieDB()) if err != nil { return nil, nil } @@ -487,7 +487,7 @@ func ServiceGetTrieNodesQuery(chain *core.BlockChain, req *GetTrieNodesPacket, s req.Bytes = softResponseLimit } // Make sure we have the state associated with the request - triedb := chain.StateCache().TrieDB() + triedb := chain.TrieDB() accTrie, err := trie.NewStateTrie(trie.StateTrieID(req.Root), triedb) if err != nil { diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 0aa6fd8730dd..1514ad4e1344 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/trie/trienode" "golang.org/x/crypto/sha3" "golang.org/x/exp/slices" @@ -561,6 +562,11 @@ func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Has func TestSyncBloatedProof(t *testing.T) { t.Parallel() + testSyncBloatedProof(t, rawdb.HashScheme) + testSyncBloatedProof(t, rawdb.PathScheme) +} + +func testSyncBloatedProof(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -570,7 +576,7 @@ func TestSyncBloatedProof(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) source := newTestPeer("source", t, term) source.accountTrie = sourceAccountTrie.Copy() source.accountValues = elems @@ -638,6 +644,11 @@ func setupSyncer(scheme string, peers ...*testPeer) *Syncer { func TestSync(t *testing.T) { t.Parallel() + testSync(t, rawdb.HashScheme) + testSync(t, rawdb.PathScheme) +} + +func testSync(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -647,7 +658,7 @@ func TestSync(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -659,7 +670,7 @@ func TestSync(t *testing.T) { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a @@ -667,6 +678,11 @@ func TestSync(t *testing.T) { func TestSyncTinyTriePanic(t *testing.T) { t.Parallel() + testSyncTinyTriePanic(t, rawdb.HashScheme) + testSyncTinyTriePanic(t, rawdb.PathScheme) +} + +func testSyncTinyTriePanic(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -676,7 +692,7 @@ func TestSyncTinyTriePanic(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(1, scheme) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -690,13 +706,18 @@ func TestSyncTinyTriePanic(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSync tests a basic sync with multiple peers func TestMultiSync(t *testing.T) { t.Parallel() + testMultiSync(t, rawdb.HashScheme) + testMultiSync(t, rawdb.PathScheme) +} + +func testMultiSync(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -706,7 +727,7 @@ func TestMultiSync(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -720,13 +741,18 @@ func TestMultiSync(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorage tests basic sync using accounts + storage + code func TestSyncWithStorage(t *testing.T) { t.Parallel() + testSyncWithStorage(t, rawdb.HashScheme) + testSyncWithStorage(t, rawdb.PathScheme) +} + +func testSyncWithStorage(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -736,7 +762,7 @@ func TestSyncWithStorage(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 3, 3000, true, false) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -752,13 +778,18 @@ func TestSyncWithStorage(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all func TestMultiSyncManyUseless(t *testing.T) { t.Parallel() + testMultiSyncManyUseless(t, rawdb.HashScheme) + testMultiSyncManyUseless(t, rawdb.PathScheme) +} + +func testMultiSyncManyUseless(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -768,7 +799,7 @@ func TestMultiSyncManyUseless(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) @@ -801,11 +832,18 @@ func TestMultiSyncManyUseless(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { + t.Parallel() + + testMultiSyncManyUselessWithLowTimeout(t, rawdb.HashScheme) + testMultiSyncManyUselessWithLowTimeout(t, rawdb.PathScheme) +} + +func testMultiSyncManyUselessWithLowTimeout(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -815,7 +853,7 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) @@ -853,11 +891,18 @@ func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all func TestMultiSyncManyUnresponsive(t *testing.T) { + t.Parallel() + + testMultiSyncManyUnresponsive(t, rawdb.HashScheme) + testMultiSyncManyUnresponsive(t, rawdb.PathScheme) +} + +func testMultiSyncManyUnresponsive(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -867,7 +912,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer { source := newTestPeer(name, t, term) @@ -903,7 +948,7 @@ func TestMultiSyncManyUnresponsive(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } func checkStall(t *testing.T, term func()) chan struct{} { @@ -925,6 +970,11 @@ func checkStall(t *testing.T, term func()) chan struct{} { func TestSyncBoundaryAccountTrie(t *testing.T) { t.Parallel() + testSyncBoundaryAccountTrie(t, rawdb.HashScheme) + testSyncBoundaryAccountTrie(t, rawdb.PathScheme) +} + +func testSyncBoundaryAccountTrie(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -934,7 +984,7 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(3000) + nodeScheme, sourceAccountTrie, elems := makeBoundaryAccountTrie(scheme, 3000) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -952,7 +1002,7 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is @@ -960,6 +1010,11 @@ func TestSyncBoundaryAccountTrie(t *testing.T) { func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { t.Parallel() + testSyncNoStorageAndOneCappedPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCappedPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCappedPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -969,7 +1024,7 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) mkSource := func(name string, slow bool) *testPeer { source := newTestPeer(name, t, term) @@ -994,7 +1049,7 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver @@ -1002,6 +1057,11 @@ func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { t.Parallel() + testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCodeCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCodeCorruptPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1011,7 +1071,7 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1034,12 +1094,17 @@ func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { t.Parallel() + testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneAccountCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneAccountCorruptPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1049,7 +1114,7 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) mkSource := func(name string, accFn accountHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1072,7 +1137,7 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes @@ -1080,6 +1145,11 @@ func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { t.Parallel() + testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.HashScheme) + testSyncNoStorageAndOneCodeCappedPeer(t, rawdb.PathScheme) +} + +func testSyncNoStorageAndOneCodeCappedPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1089,7 +1159,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(3000, scheme) mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1123,7 +1193,7 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { if threshold := 100; counter > threshold { t.Logf("Error, expected < %d invocations, got %d", threshold, counter) } - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the @@ -1131,6 +1201,11 @@ func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { func TestSyncBoundaryStorageTrie(t *testing.T) { t.Parallel() + testSyncBoundaryStorageTrie(t, rawdb.HashScheme) + testSyncBoundaryStorageTrie(t, rawdb.PathScheme) +} + +func testSyncBoundaryStorageTrie(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1140,7 +1215,7 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 10, 1000, false, true) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -1160,7 +1235,7 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is @@ -1168,6 +1243,11 @@ func TestSyncBoundaryStorageTrie(t *testing.T) { func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { t.Parallel() + testSyncWithStorageAndOneCappedPeer(t, rawdb.HashScheme) + testSyncWithStorageAndOneCappedPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndOneCappedPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1177,7 +1257,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 300, 1000, false, false) mkSource := func(name string, slow bool) *testPeer { source := newTestPeer(name, t, term) @@ -1202,7 +1282,7 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is @@ -1210,6 +1290,11 @@ func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { func TestSyncWithStorageAndCorruptPeer(t *testing.T) { t.Parallel() + testSyncWithStorageAndCorruptPeer(t, rawdb.HashScheme) + testSyncWithStorageAndCorruptPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndCorruptPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1219,7 +1304,7 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1243,12 +1328,17 @@ func TestSyncWithStorageAndCorruptPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { t.Parallel() + testSyncWithStorageAndNonProvingPeer(t, rawdb.HashScheme) + testSyncWithStorageAndNonProvingPeer(t, rawdb.PathScheme) +} + +func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1258,7 +1348,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(scheme, 100, 3000, true, false) mkSource := func(name string, handler storageHandlerFunc) *testPeer { source := newTestPeer(name, t, term) @@ -1281,7 +1371,7 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { t.Fatalf("sync failed: %v", err) } close(done) - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } // TestSyncWithStorage tests basic sync using accounts + storage + code, against @@ -1290,6 +1380,12 @@ func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { // did not mark the account for healing. func TestSyncWithStorageMisbehavingProve(t *testing.T) { t.Parallel() + + testSyncWithStorageMisbehavingProve(t, rawdb.HashScheme) + testSyncWithStorageMisbehavingProve(t, rawdb.PathScheme) +} + +func testSyncWithStorageMisbehavingProve(t *testing.T, scheme string) { var ( once sync.Once cancel = make(chan struct{}) @@ -1299,7 +1395,7 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false) + nodeScheme, sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(scheme, 10, 30, false) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -1314,7 +1410,7 @@ func TestSyncWithStorageMisbehavingProve(t *testing.T) { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) } type kv struct { @@ -1364,9 +1460,9 @@ func getCodeByHash(hash common.Hash) []byte { } // makeAccountTrieNoStorage spits out a trie, along with the leafs -func makeAccountTrieNoStorage(n int) (string, *trie.Trie, []*kv) { +func makeAccountTrieNoStorage(n int, scheme string) (string, *trie.Trie, []*kv) { var ( - db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) accTrie = trie.NewEmpty(db) entries []*kv ) @@ -1396,12 +1492,12 @@ func makeAccountTrieNoStorage(n int) (string, *trie.Trie, []*kv) { // makeBoundaryAccountTrie constructs an account trie. Instead of filling // accounts normally, this function will fill a few accounts which have // boundary hash. -func makeBoundaryAccountTrie(n int) (string, *trie.Trie, []*kv) { +func makeBoundaryAccountTrie(scheme string, n int) (string, *trie.Trie, []*kv) { var ( entries []*kv boundaries []common.Hash - db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) accTrie = trie.NewEmpty(db) ) // Initialize boundaries @@ -1457,9 +1553,9 @@ func makeBoundaryAccountTrie(n int) (string, *trie.Trie, []*kv) { // makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts // has a unique storage set. -func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { +func makeAccountTrieWithStorageWithUniqueStorage(scheme string, accounts, slots int, code bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { var ( - db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) accTrie = trie.NewEmpty(db) entries []*kv storageRoots = make(map[common.Hash]common.Hash) @@ -1512,9 +1608,9 @@ func makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) } // makeAccountTrieWithStorage spits out a trie, along with the leafs -func makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { +func makeAccountTrieWithStorage(scheme string, accounts, slots int, code, boundary bool) (string, *trie.Trie, []*kv, map[common.Hash]*trie.Trie, map[common.Hash][]*kv) { var ( - db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + db = trie.NewDatabase(rawdb.NewMemoryDatabase(), newDbConfig(scheme)) accTrie = trie.NewEmpty(db) entries []*kv storageRoots = make(map[common.Hash]common.Hash) @@ -1656,9 +1752,9 @@ func makeBoundaryStorageTrie(owner common.Hash, n int, db *trie.Database) (commo return root, nodes, entries } -func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { +func verifyTrie(scheme string, db ethdb.KeyValueStore, root common.Hash, t *testing.T) { t.Helper() - triedb := trie.NewDatabase(rawdb.NewDatabase(db)) + triedb := trie.NewDatabase(rawdb.NewDatabase(db), newDbConfig(scheme)) accTrie, err := trie.New(trie.StateTrieID(root), triedb) if err != nil { t.Fatal(err) @@ -1700,6 +1796,13 @@ func verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) { // TestSyncAccountPerformance tests how efficient the snap algo is at minimizing // state healing func TestSyncAccountPerformance(t *testing.T) { + t.Parallel() + + testSyncAccountPerformance(t, rawdb.HashScheme) + testSyncAccountPerformance(t, rawdb.PathScheme) +} + +func testSyncAccountPerformance(t *testing.T, scheme string) { // Set the account concurrency to 1. This _should_ result in the // range root to become correct, and there should be no healing needed defer func(old int) { accountConcurrency = old }(accountConcurrency) @@ -1714,7 +1817,7 @@ func TestSyncAccountPerformance(t *testing.T) { }) } ) - nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + nodeScheme, sourceAccountTrie, elems := makeAccountTrieNoStorage(100, scheme) mkSource := func(name string) *testPeer { source := newTestPeer(name, t, term) @@ -1727,7 +1830,7 @@ func TestSyncAccountPerformance(t *testing.T) { if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { t.Fatalf("sync failed: %v", err) } - verifyTrie(syncer.db, sourceAccountTrie.Hash(), t) + verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t) // The trie root will always be requested, since it is added when the snap // sync cycle starts. When popping the queue, we do not look it up again. // Doing so would bring this number down to zero in this artificial testcase, @@ -1787,3 +1890,10 @@ func TestSlotEstimation(t *testing.T) { } } } + +func newDbConfig(scheme string) *trie.Config { + if scheme == rawdb.HashScheme { + return &trie.Config{} + } + return &trie.Config{PathDB: pathdb.Defaults} +} diff --git a/eth/protocols/trust/handler_test.go b/eth/protocols/trust/handler_test.go index 55252c0d63e1..144f4e602aaf 100644 --- a/eth/protocols/trust/handler_test.go +++ b/eth/protocols/trust/handler_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -53,7 +54,7 @@ func newTestBackendWithGenerator(blocks int) *testBackend { BaseFee: big.NewInt(0), } copy(genspec.ExtraData[32:], testAddr[:]) - genesis := genspec.MustCommit(db) + genesis := genspec.MustCommit(db, trie.NewDatabase(db, nil)) chain, _ := core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) generator := func(i int, block *core.BlockGen) { diff --git a/eth/protocols/trust/peer_test.go b/eth/protocols/trust/peer_test.go index ab229a1b3216..88831893034f 100644 --- a/eth/protocols/trust/peer_test.go +++ b/eth/protocols/trust/peer_test.go @@ -1,7 +1,7 @@ package trust import ( - "math/rand" + "crypto/rand" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" diff --git a/eth/state_accessor.go b/eth/state_accessor.go index c68aeb2ffaac..cf056a110857 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -39,31 +40,11 @@ import ( // for releasing state. var noopReleaser = tracers.StateReleaseFunc(func() {}) -// StateAtBlock retrieves the state database associated with a certain block. -// If no state is locally available for the given block, a number of blocks -// are attempted to be reexecuted to generate the desired state. The optional -// base layer statedb can be provided which is regarded as the statedb of the -// parent block. -// -// An additional release function will be returned if the requested state is -// available. Release is expected to be invoked when the returned state is no longer needed. -// Its purpose is to prevent resource leaking. Though it can be noop in some cases. -// -// Parameters: -// - block: The block for which we want the state(state = block.Root) -// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state -// - base: If the caller is tracing multiple blocks, the caller can provide the parent -// state continuously from the callsite. -// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should -// be made from caller, e.g. perform Commit or other 'save-to-disk' changes. -// Otherwise, the trash generated by caller may be persisted permanently. -// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is -// provided, it would be preferable to start from a fresh state, if we have it -// on disk. -func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { +func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { var ( current *types.Block database state.Database + triedb *trie.Database report = true origin = block.NumberU64() ) @@ -74,9 +55,9 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe // on top to prevent garbage collection and return a release // function to deref it. if statedb, err = eth.blockchain.StateAt(block.Root()); err == nil { - statedb.Database().TrieDB().Reference(block.Root(), common.Hash{}) + eth.blockchain.TrieDB().Reference(block.Root(), common.Hash{}) return statedb, func() { - statedb.Database().TrieDB().Dereference(block.Root()) + eth.blockchain.TrieDB().Dereference(block.Root()) }, nil } } @@ -87,14 +68,16 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe if preferDisk { // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. - database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) + // TODO(rjl493456442), clean cache is disabled to prevent memory leak, + // please re-enable it for better performance. + database = state.NewDatabaseWithConfig(eth.chainDb, trie.HashDefaults) if statedb, err = state.New(block.Root(), database, nil); err == nil { log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number()) return statedb, noopReleaser, nil } } // The optional base statedb is given, mark the start point as parent block - statedb, database, report = base, base.Database(), false + statedb, database, triedb, report = base, base.Database(), base.Database().TrieDB(), false current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if current == nil { return nil, nil, fmt.Errorf("missing parent block %v %d", block.ParentHash(), block.NumberU64()-1) @@ -105,7 +88,10 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. - database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) + // TODO(rjl493456442), clean cache is disabled to prevent memory leak, + // please re-enable it for better performance. + triedb = trie.NewDatabase(eth.chainDb, trie.HashDefaults) + database = state.NewDatabaseWithNodeDB(eth.chainDb, triedb) // If we didn't check the live database, do check state over ephemeral database, // otherwise we would rewind past a persisted block (specific corner case is @@ -183,17 +169,58 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe } // Hold the state reference and also drop the parent state // to prevent accumulating too many nodes in memory. - database.TrieDB().Reference(root, common.Hash{}) + triedb.Reference(root, common.Hash{}) if parent != (common.Hash{}) { - database.TrieDB().Dereference(parent) + triedb.Dereference(parent) } parent = root } if report { - nodes, imgs := database.TrieDB().Size() - log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) + diff, nodes, immutablenodes, imgs := triedb.Size() + log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "layer", diff, "nodes", nodes, "immutablenodes", immutablenodes, "preimages", imgs) + } + return statedb, func() { triedb.Dereference(block.Root()) }, nil +} + +func (eth *Ethereum) pathState(block *types.Block) (*state.StateDB, func(), error) { + // Check if the requested state is available in the live chain. + statedb, err := eth.blockchain.StateAt(block.Root()) + if err == nil { + return statedb, noopReleaser, nil + } + // TODO historic state is not supported in path-based scheme. + // Fully archive node in pbss will be implemented by relying + // on state history, but needs more work on top. + return nil, nil, errors.New("historical state not available in path scheme yet") +} + +// stateAtBlock retrieves the state database associated with a certain block. +// If no state is locally available for the given block, a number of blocks +// are attempted to be reexecuted to generate the desired state. The optional +// base layer statedb can be provided which is regarded as the statedb of the +// parent block. +// +// An additional release function will be returned if the requested state is +// available. Release is expected to be invoked when the returned state is no +// longer needed. Its purpose is to prevent resource leaking. Though it can be +// noop in some cases. +// +// Parameters: +// - block: The block for which we want the state(state = block.Root) +// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state +// - base: If the caller is tracing multiple blocks, the caller can provide the parent +// state continuously from the callsite. +// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should +// be made from caller, e.g. perform Commit or other 'save-to-disk' changes. +// Otherwise, the trash generated by caller may be persisted permanently. +// - preferDisk: This arg can be used by the caller to signal that even though the 'base' is +// provided, it would be preferable to start from a fresh state, if we have it +// on disk. +func (eth *Ethereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { + if eth.blockchain.TrieDB().Scheme() == rawdb.HashScheme { + return eth.hashState(ctx, block, reexec, base, readOnly, preferDisk) } - return statedb, func() { database.TrieDB().Dereference(block.Root()) }, nil + return eth.pathState(block) } // stateAtTransaction returns the execution environment of a certain transaction. @@ -209,7 +236,7 @@ func (eth *Ethereum) stateAtTransaction(ctx context.Context, block *types.Block, } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, release, err := eth.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := eth.stateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, vm.BlockContext{}, nil, nil, err } diff --git a/eth/sync.go b/eth/sync.go index 0693be9efd44..91b21b9463e7 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -133,6 +133,8 @@ func (cs *chainSyncer) loop() { <-cs.doneCh } return + case <-cs.handler.stopCh: + return } } } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 1e263b23cc16..f53e2f46a1a1 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -373,8 +373,8 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed // if the relevant state is available in disk. var preferDisk bool if statedb != nil { - s1, s2 := statedb.Database().TrieDB().Size() - preferDisk = s1+s2 > defaultTracechainMemLimit + s1, s2, s3, s4 := statedb.Database().TrieDB().Size() + preferDisk = s1+s2+s3+s4 > defaultTracechainMemLimit } statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk) if err != nil { diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index 41b3fd47a0a4..f6e2839c75dc 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -138,8 +138,10 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { GasLimit: uint64(test.Context.GasLimit), BaseFee: test.Genesis.BaseFee, } - _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme) ) + triedb.Close() + tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) if err != nil { t.Fatalf("failed to create call tracer: %v", err) @@ -238,7 +240,8 @@ func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme) + defer triedb.Close() b.ReportAllocs() b.ResetTimer() @@ -364,7 +367,7 @@ func TestInternals(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), + triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), core.GenesisAlloc{ to: core.GenesisAccount{ Code: tc.code, @@ -372,7 +375,9 @@ func TestInternals(t *testing.T) { origin: core.GenesisAccount{ Balance: big.NewInt(500000000000000), }, - }, false) + }, false, rawdb.HashScheme) + defer triedb.Close() + evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Tracer: tc.tracer}, firehose.NoOpContext) msg := &core.Message{ To: &to, diff --git a/eth/tracers/internal/tracetest/flat_calltrace_test.go b/eth/tracers/internal/tracetest/flat_calltrace_test.go index 7db56b16c116..ed7c1921b0e8 100644 --- a/eth/tracers/internal/tracetest/flat_calltrace_test.go +++ b/eth/tracers/internal/tracetest/flat_calltrace_test.go @@ -101,7 +101,8 @@ func flatCallTracerTestRunner(tracerName string, filename string, dirPath string Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme) + defer triedb.Close() // Create the tracer, the EVM environment and run it tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go index 230be1361015..c4989e2d2fbc 100644 --- a/eth/tracers/internal/tracetest/prestate_test.go +++ b/eth/tracers/internal/tracetest/prestate_test.go @@ -109,8 +109,10 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) { GasLimit: uint64(test.Context.GasLimit), BaseFee: test.Genesis.BaseFee, } - _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + triedb, _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false, rawdb.HashScheme) ) + defer triedb.Close() + tracer, err := tracers.DefaultDirectory.New(tracerName, new(tracers.Context), test.TracerConfig) if err != nil { t.Fatalf("failed to create call tracer: %v", err) diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 5097f791119c..b2b58b405ad0 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -80,7 +80,9 @@ func BenchmarkTransactionTrace(b *testing.B) { Code: []byte{}, Balance: big.NewInt(500000000000000), } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false) + triedb, _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false, rawdb.HashScheme) + defer triedb.Close() + // Create the tracer, the EVM environment and run it tracer := logger.NewStructLogger(&logger.Config{ Debug: false, diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 3ee8aa04bc6a..45e0e9f413e2 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -39,6 +39,7 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/trie" ) // Verify that Client implements the ethereum interfaces. @@ -316,7 +317,7 @@ func generateTestChain() []*types.Block { signer := types.HomesteadSigner{} // Create a database pre-initialize with a genesis block db := rawdb.NewMemoryDatabase() - genesis.MustCommit(db) + genesis.MustCommit(db, trie.NewDatabase(db, nil)) chain, _ := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil, core.EnablePersistDiff(860000)) generate := func(i int, block *core.BlockGen) { block.OffsetTime(5) @@ -352,7 +353,7 @@ func generateTestChain() []*types.Block { } } } - gblock := genesis.MustCommit(db) + gblock := genesis.MustCommit(db, trie.NewDatabase(db, nil)) engine := ethash.NewFaker() blocks, _ := core.GenerateChain(genesis.Config, gblock, engine, db, testBlockNum, generate) blocks = append([]*types.Block{gblock}, blocks...) diff --git a/go.mod b/go.mod index b6d9ebc7089f..d7ba83eb7bba 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/ethereum/go-ethereum -go 1.19 +go 1.20 require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 @@ -13,14 +13,14 @@ require ( github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/cp v1.1.1 github.com/cloudflare/cloudflare-go v0.14.0 - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 + github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 github.com/cometbft/cometbft v0.37.0 github.com/consensys/gnark-crypto v0.10.0 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 - github.com/docker/docker v20.10.19+incompatible - github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 + github.com/docker/docker v20.10.24+incompatible + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 github.com/ethereum/c-kzg-4844 v0.3.1 github.com/fatih/color v1.13.0 github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e @@ -31,10 +31,10 @@ require ( github.com/gofrs/flock v0.8.1 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/golang-jwt/jwt/v4 v4.3.0 - github.com/golang/protobuf v1.5.2 + github.com/golang/protobuf v1.5.3 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/gofuzz v1.2.0 - github.com/google/pprof v0.0.0-20230207041349-798e818bf904 + github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.5.0 github.com/graph-gophers/graphql-go v1.3.0 @@ -43,7 +43,7 @@ require ( github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.2.3 - github.com/huin/goupnp v1.0.3 + github.com/huin/goupnp v1.3.0 github.com/influxdata/influxdb-client-go/v2 v2.4.0 github.com/influxdata/influxdb1-client v0.0.0-20220302092344-a9ab5670611c github.com/jackpal/go-nat-pmp v1.0.2 @@ -53,7 +53,7 @@ require ( github.com/kylelemons/godebug v1.1.0 github.com/logrusorgru/aurora v2.0.3+incompatible github.com/mattn/go-colorable v0.1.13 - github.com/mattn/go-isatty v0.0.16 + github.com/mattn/go-isatty v0.0.18 github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.5 github.com/panjf2000/ants/v2 v2.4.5 @@ -61,11 +61,11 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/tsdb v0.10.0 github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 - github.com/prysmaticlabs/prysm/v4 v4.0.2 + github.com/prysmaticlabs/prysm/v4 v4.0.8 github.com/rs/cors v1.8.2 github.com/shirou/gopsutil v3.21.11+incompatible github.com/status-im/keycard-go v0.2.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1 github.com/tendermint/go-amino v0.14.1 @@ -73,14 +73,15 @@ require ( github.com/tendermint/tendermint v0.31.15 github.com/tidwall/wal v1.1.7 github.com/tyler-smith/go-bip39 v1.1.0 - github.com/urfave/cli/v2 v2.24.1 + github.com/urfave/cli/v2 v2.25.7 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 github.com/willf/bitset v1.1.3 - golang.org/x/crypto v0.9.0 + go.uber.org/automaxprocs v1.5.2 + golang.org/x/crypto v0.12.0 golang.org/x/exp v0.0.0-20230810033253-352e893a4cad golang.org/x/sync v0.3.0 - golang.org/x/sys v0.9.0 - golang.org/x/text v0.9.0 + golang.org/x/sys v0.11.0 + golang.org/x/text v0.12.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.1 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce @@ -91,7 +92,6 @@ require ( github.com/fatih/structs v1.1.0 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 go.uber.org/atomic v1.10.0 - go.uber.org/automaxprocs v1.3.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 ) @@ -99,7 +99,7 @@ require ( contrib.go.opencensus.io/exporter/jaeger v0.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3 // indirect - github.com/BurntSushi/toml v1.2.1 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/aristanetworks/goarista v0.0.0-20200805130819-fd197cf57d96 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2 // indirect @@ -113,12 +113,12 @@ require ( github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/chzyer/readline v1.5.0 // indirect + github.com/chzyer/readline v1.5.1 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/consensys/bavard v0.1.13 // indirect - github.com/containerd/cgroups v1.0.4 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cosmos/gogoproto v1.4.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect @@ -142,11 +142,11 @@ require ( github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-playground/locales v0.14.0 // indirect - github.com/go-playground/universal-translator v0.18.0 // indirect - github.com/go-playground/validator/v10 v10.11.1 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.13.0 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect - github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -158,10 +158,10 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.0.1 // indirect github.com/gtank/merlin v0.1.1 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.1 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect - github.com/ipfs/go-cid v0.3.2 // indirect + github.com/ipfs/go-cid v0.4.1 // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect @@ -169,17 +169,17 @@ require ( github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 // indirect github.com/kilic/bls12-381 v0.1.0 // indirect - github.com/klauspost/compress v1.15.15 // indirect - github.com/klauspost/cpuid/v2 v2.2.1 // indirect - github.com/koron/go-ssdp v0.0.3 // indirect + github.com/klauspost/compress v1.16.4 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/leodido/go-urn v1.2.1 // indirect + github.com/leodido/go-urn v1.2.3 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect - github.com/libp2p/go-libp2p v0.26.2 // indirect - github.com/libp2p/go-libp2p-asn-util v0.2.0 // indirect + github.com/libp2p/go-libp2p v0.27.8 // indirect + github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-libp2p-pubsub v0.9.3 // indirect github.com/libp2p/go-mplex v0.7.0 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect @@ -192,7 +192,7 @@ require ( github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.50 // indirect + github.com/miekg/dns v1.1.53 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect @@ -208,16 +208,16 @@ require ( github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr v0.8.0 // indirect + github.com/multiformats/go-multiaddr v0.9.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect - github.com/multiformats/go-multibase v0.1.1 // indirect - github.com/multiformats/go-multicodec v0.7.0 // indirect + github.com/multiformats/go-multibase v0.2.0 // indirect + github.com/multiformats/go-multicodec v0.8.1 // indirect github.com/multiformats/go-multihash v0.2.1 // indirect github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/onsi/ginkgo/v2 v2.5.1 // indirect + github.com/onsi/ginkgo/v2 v2.9.2 // indirect github.com/opencontainers/runtime-spec v1.0.2 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect @@ -226,24 +226,24 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/prom2json v1.3.0 // indirect github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d // indirect - github.com/prysmaticlabs/fastssz v0.0.0-20220628121656-93dfe28febab // indirect + github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 // indirect github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 // indirect github.com/prysmaticlabs/gohashtree v0.0.3-alpha // indirect github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c // indirect github.com/prysmaticlabs/prysm v0.0.0-20220124113610-e26cde5e091b // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-19 v0.2.1 // indirect - github.com/quic-go/qtls-go1-20 v0.1.1 // indirect + github.com/quic-go/qtls-go1-19 v0.3.3 // indirect + github.com/quic-go/qtls-go1-20 v0.2.3 // indirect github.com/quic-go/quic-go v0.33.0 // indirect github.com/quic-go/webtransport-go v0.5.2 // indirect - github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc // indirect + github.com/r3labs/sse/v2 v2.10.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/rivo/uniseg v0.4.3 // indirect + github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect @@ -255,9 +255,9 @@ require ( github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/tinylru v1.1.0 // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect - github.com/tklauser/numcpus v0.6.0 // indirect - github.com/trailofbits/go-mutexasserts v0.0.0-20200708152505-19999e7d3cef // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/trailofbits/go-mutexasserts v0.0.0-20230328101604-8cdbc5f3d279 // indirect github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect github.com/wealdtech/go-bytesutil v1.1.1 // indirect github.com/wealdtech/go-eth2-types/v2 v2.5.2 // indirect @@ -266,31 +266,30 @@ require ( github.com/yusufpapurcu/wmi v1.2.2 // indirect go.etcd.io/bbolt v1.3.7 // indirect go.opencensus.io v0.24.0 // indirect - go.uber.org/dig v1.15.0 // indirect - go.uber.org/fx v1.18.2 // indirect - go.uber.org/multierr v1.8.0 // indirect + go.uber.org/dig v1.16.1 // indirect + go.uber.org/fx v1.19.2 // indirect + go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/net v0.10.0 // indirect - golang.org/x/oauth2 v0.3.0 // indirect - golang.org/x/term v0.8.0 // indirect - google.golang.org/api v0.34.0 // indirect + golang.org/x/oauth2 v0.5.0 // indirect + golang.org/x/term v0.11.0 // indirect + google.golang.org/api v0.44.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect - google.golang.org/grpc v1.52.0 // indirect - google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect + google.golang.org/grpc v1.53.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apimachinery v0.18.3 // indirect - k8s.io/client-go v0.18.3 // indirect - k8s.io/klog v1.0.0 // indirect + k8s.io/apimachinery v0.20.0 // indirect + k8s.io/client-go v0.20.0 // indirect k8s.io/klog/v2 v2.80.0 // indirect - k8s.io/utils v0.0.0-20200520001619-278ece378a50 // indirect + k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect lukechampine.com/blake3 v1.1.7 // indirect nhooyr.io/websocket v1.8.7 // indirect rsc.io/tmplfunc v0.0.3 // indirect - sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.0.2 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index 7c8663fd976e..1aa1e0371002 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,11 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -57,19 +62,28 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= @@ -83,7 +97,9 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= @@ -124,6 +140,7 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= @@ -208,32 +225,36 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/logex v1.2.0 h1:+eqR0HfOetur4tgnC8ftU5imRnhi4te+BadWS95c5AM= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.0 h1:lSwwFrbNviGePhkewF1az4oLmcwqCZijQ2/Wi3BGHAI= github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/test v0.0.0-20210722231415-061457976a23 h1:dZ0/VyGgQdVGAss6Ju0dt5P0QltE0SFY5Woh6hbIfiQ= github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.14.0 h1:gFqGlGl/5f9UGXAaKapCGUfaTCgRKKnzu2VvzMZlOFA= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= -github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= @@ -246,8 +267,8 @@ github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1 github.com/consensys/gnark-crypto v0.10.0 h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA= github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -321,16 +342,17 @@ github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/ github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.19+incompatible h1:lzEmjivyNHFHMNAFLXORMBXyGIhw/UP4DvJwvyKYq64= -github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= -github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -353,6 +375,7 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= @@ -363,6 +386,7 @@ github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/ github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -381,6 +405,7 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= @@ -451,22 +476,28 @@ github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= -github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ= +github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4= github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -475,8 +506,9 @@ github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= @@ -513,8 +545,8 @@ github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/gddo v0.0.0-20200528160355-8d077c1d8f4c/go.mod h1:sam69Hju0uq+5uvLJUMDlsKlQ21Vrs1Kd/1YFPNYdOU= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -531,6 +563,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -549,8 +582,10 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -591,6 +626,7 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -599,8 +635,13 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20230207041349-798e818bf904 h1:4/hN5RUoecvl+RmJRE2YxKWtnnQls6rQjjW5oV7qg2U= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= +github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -616,6 +657,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= @@ -674,8 +716,8 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= -github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= +github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v0.0.0-20170914154624-68e816d1c783/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -696,12 +738,13 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= -github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= -github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/cgosymbolizer v0.0.0-20200424224625-be1b05b0b279/go.mod h1:a5aratAVTWyz+nJMmDsN8O4XTfaLfdAsB1ysCmZX5Bw= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= @@ -729,8 +772,8 @@ github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUP github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.3.2 h1:OGgOd+JCFM+y1DjWPmVH+2/4POtpDzwcr7VgnB7mZXc= -github.com/ipfs/go-cid v0.3.2/go.mod h1:gQ8pKqT/sUxGY+tIwy1RPpAojYu7jAyCp5Tz1svoupw= +github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= +github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= @@ -832,15 +875,15 @@ github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= +github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.1 h1:U33DW0aiEj633gHYw3LoDNfkDiYnE5Q8M/TKJn2f2jI= -github.com/klauspost/cpuid/v2 v2.2.1/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -850,8 +893,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJuqPYs= -github.com/koron/go-ssdp v0.0.3 h1:JivLMY45N76b4p/vsWGOKewBQu6uf39y8l+AQ7sDKx8= -github.com/koron/go-ssdp v0.0.3/go.mod h1:b2MxI6yh02pKrsyNoQUsk4+YNikaGhe4894J+Q5lDvA= +github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= +github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -861,6 +904,7 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -872,8 +916,8 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= +github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= @@ -893,11 +937,11 @@ github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= -github.com/libp2p/go-libp2p v0.26.2 h1:eHEoW/696FP7/6DxOvcrKfTD6Bi0DExxiMSZUJxswA0= -github.com/libp2p/go-libp2p v0.26.2/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8= +github.com/libp2p/go-libp2p v0.27.8 h1:IX5x/4yKwyPQeVS2AXHZ3J4YATM9oHBGH1gBc23jBAI= +github.com/libp2p/go-libp2p v0.27.8/go.mod h1:eCFFtd0s5i/EVKR7+5Ki8bM7qwkNW3TPTTSSW9sz8NE= github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= -github.com/libp2p/go-libp2p-asn-util v0.2.0 h1:rg3+Os8jbnO5DxkC7K/Utdi+DkY3q/d1/1q+8WeNAsw= -github.com/libp2p/go-libp2p-asn-util v0.2.0/go.mod h1:WoaWxbHKBymSN41hWSq/lGKJEca7TNm58+gGJi2WsLI= +github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= +github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= @@ -1055,8 +1099,9 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -1075,8 +1120,8 @@ github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/le github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw= +github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -1152,8 +1197,8 @@ github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4 github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= -github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= -github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= +github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ= +github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -1165,10 +1210,10 @@ github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= -github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= -github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= -github.com/multiformats/go-multicodec v0.7.0 h1:rTUjGOwjlhGHbEMbPoSUJowG1spZTVsITRANCjKTUAQ= -github.com/multiformats/go-multicodec v0.7.0/go.mod h1:GUC8upxSBE4oG+q3kWZRw/+6yC1BqO550bjhWsJbZlw= +github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= +github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= +github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= +github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1228,8 +1273,8 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.5.1 h1:auzK7OI497k6x4OvWq+TKAcpcSAlod0doAH72oIN0Jw= -github.com/onsi/ginkgo/v2 v2.5.1/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1237,7 +1282,7 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= @@ -1295,6 +1340,7 @@ github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUI github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= @@ -1329,8 +1375,8 @@ github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1353,8 +1399,8 @@ github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+ github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY= github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d h1:1dN7YAqMN3oAJ0LceWcyv/U4jHLh+5urnSnr4br6zg4= github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d/go.mod h1:kOmQ/zdobQf7HUohDTifDNFEZfNaSCIY5fkONPL+dWU= -github.com/prysmaticlabs/fastssz v0.0.0-20220628121656-93dfe28febab h1:Y3PcvUrnneMWLuypZpwPz8P70/DQsz6KgV9JveKpyZs= -github.com/prysmaticlabs/fastssz v0.0.0-20220628121656-93dfe28febab/go.mod h1:MA5zShstUwCQaE9faGHgCGvEWUbG87p4SAXINhmCkvg= +github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo= +github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44/go.mod h1:MA5zShstUwCQaE9faGHgCGvEWUbG87p4SAXINhmCkvg= github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s= github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw= github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4= @@ -1368,20 +1414,21 @@ github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20211014160335-757fae4f38c6/g github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294 h1:q9wE0ZZRdTUAAeyFP/w0SwBEnCqlVy2+on6X2/e+eAU= github.com/prysmaticlabs/prysm v0.0.0-20220124113610-e26cde5e091b h1:XULhE6PdzCYSe5OEVFhuixNqL3mYVOq/3M+SUGnKr1Y= github.com/prysmaticlabs/prysm v0.0.0-20220124113610-e26cde5e091b/go.mod h1:bFzDfaj4xtisRey9RPkMJOhOJVwmtH3FChV7NPKV1Nk= -github.com/prysmaticlabs/prysm/v4 v4.0.2 h1:OU1Vw/eDp9BdWUBtO9z/mT5llIvBDLuNYKKTVyFpxrc= -github.com/prysmaticlabs/prysm/v4 v4.0.2/go.mod h1:e7Xf3nTpWEzmoOpcORzjQ56ogT4uTC7BN8DTrcFD7cQ= +github.com/prysmaticlabs/prysm/v4 v4.0.8 h1:F6Rt5gpaxbW50aP63jMmSXE16JW42HaEzUT55L9laaM= +github.com/prysmaticlabs/prysm/v4 v4.0.8/go.mod h1:m01QCZ2qwuTpUQRfYj5gMkvEP+j6mPcMydG8mNcnYDY= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-19 v0.2.1 h1:aJcKNMkH5ASEJB9FXNeZCyTEIHU1J7MmHyz1Q1TSG1A= -github.com/quic-go/qtls-go1-19 v0.2.1/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.1.1 h1:KbChDlg82d3IHqaj2bn6GfKRj84Per2VGf5XV3wSwQk= -github.com/quic-go/qtls-go1-20 v0.1.1/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= +github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= +github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= +github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI= +github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0= github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA= github.com/quic-go/webtransport-go v0.5.2 h1:GA6Bl6oZY+g/flt00Pnu0XtivSD8vukOu3lYhJjnGEk= github.com/quic-go/webtransport-go v0.5.2/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= -github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc h1:zAsgcP8MhzAbhMnB1QQ2O7ZhWYVGYSR2iVcjzQuPV+o= github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc/go.mod h1:S8xSOnV3CgpNrWd0GQ/OoQfMtlg2uPRSuTzcSGrzwK8= +github.com/r3labs/sse/v2 v2.10.0 h1:hFEkLLFY4LDifoHdiCN/LlGBAdVJYsANaLqNYa1l/v0= +github.com/r3labs/sse/v2 v2.10.0/go.mod h1:Igau6Whc+F17QUgML1fYe1VPZzTV6EMCnYktEmkNJ7I= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1390,14 +1437,13 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= @@ -1494,7 +1540,9 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1505,8 +1553,9 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/supranational/blst v0.3.5/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= @@ -1537,15 +1586,16 @@ github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpY github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tjfoc/gmsm v1.3.0/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/trailofbits/go-mutexasserts v0.0.0-20200708152505-19999e7d3cef h1:8LRP+2JK8piIUU16ZDgWDXwjJcuJNTtCzadjTZj8Jf0= github.com/trailofbits/go-mutexasserts v0.0.0-20200708152505-19999e7d3cef/go.mod h1:+SV/613m53DNAmlXPTWGZhIyt4E/qDvn9g/lOPRiy0A= +github.com/trailofbits/go-mutexasserts v0.0.0-20230328101604-8cdbc5f3d279 h1:+LynomhWB+14Plp/bOONEAZCtvCZk4leRbTvNzNVkL0= +github.com/trailofbits/go-mutexasserts v0.0.0-20230328101604-8cdbc5f3d279/go.mod h1:GA3+Mq3kt3tYAfM0WZCu7ofy+GW9PuGysHfhr+6JX7s= github.com/twitchtv/twirp v7.1.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= @@ -1562,8 +1612,8 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU= -github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= @@ -1630,6 +1680,7 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= @@ -1643,12 +1694,13 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.3.0 h1:II28aZoGdaglS5vVNnspf28lnZpXScxtIozx1lAjdb0= go.uber.org/automaxprocs v1.3.0/go.mod h1:9CWT6lKIep8U41DDaPiH6eFscnTyjfTANNQNx6LrIcA= -go.uber.org/dig v1.15.0 h1:vq3YWr8zRj1eFGC7Gvf907hE0eRjPTZ1d3xHadD6liE= -go.uber.org/dig v1.15.0/go.mod h1:pKHs0wMynzL6brANhB2hLMro+zalv1osARTviTcqHLM= -go.uber.org/fx v1.18.2 h1:bUNI6oShr+OVFQeU8cDNbnN7VFsu+SsjHzUF51V/GAU= -go.uber.org/fx v1.18.2/go.mod h1:g0V1KMQ66zIRk8bLu3Ea5Jt2w/cHlOIp4wdRsgh0JaY= +go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= +go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/dig v1.16.1 h1:+alNIBsl0qfY0j6epRubp/9obgtrObRAc5aD+6jbWY8= +go.uber.org/dig v1.16.1/go.mod h1:557JTAUZT5bUK0SvCwikmLPPtdQhfvLYtO5tJgQSbnk= +go.uber.org/fx v1.19.2 h1:SyFgYQFr1Wl0AYstE8vyYIzP4bFz2URrScjwC4cwUvY= +go.uber.org/fx v1.19.2/go.mod h1:43G1VcqSzbIv77y00p1DRAsyZS8WdzuYdhZXmEUkMyQ= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -1658,8 +1710,8 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+ go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -1697,6 +1749,7 @@ golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1704,9 +1757,8 @@ golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1748,6 +1800,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -1803,7 +1857,9 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -1813,7 +1869,6 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1831,10 +1886,16 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= -golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1878,6 +1939,7 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1922,12 +1984,17 @@ golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1944,7 +2011,6 @@ golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1960,14 +2026,15 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1979,13 +2046,14 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -2011,6 +2079,7 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2052,13 +2121,16 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= @@ -2094,8 +2166,14 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.34.0 h1:k40adF3uR+6x/+hO5Dh4ZFUqFp67vxvbpafFiJxl10A= google.golang.org/api v0.34.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2149,7 +2227,16 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210426193834-eac7f76ac494/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= @@ -2179,14 +2266,16 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0-dev.0.20201218190559-666aea1fb34c/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.0.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -2203,8 +2292,8 @@ google.golang.org/protobuf v1.25.1-0.20201208041424-160c7477e0e8/go.mod h1:hFxJC google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk= -google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= @@ -2274,25 +2363,31 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= -k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk= +k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI= +k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/client-go v0.18.3 h1:QaJzz92tsN67oorwzmoB0a9r9ZVHuD5ryjbCKP0U22k= +k8s.io/apimachinery v0.20.0 h1:jjzbTJRXk0unNS71L7h3lxGDH/2HPxMPaQY+MjECKL8= +k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= +k8s.io/client-go v0.20.0 h1:Xlax8PKbZsjX4gFvNtt4F5MoJ1V5prDvCuoq9B7iax0= +k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.80.0 h1:lyJt0TWMPaGoODa8B8bUuxgHS3W/m/bNr2cca3brA/g= k8s.io/klog/v2 v2.80.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200520001619-278ece378a50 h1:ZtTUW5+ZWaoqjR3zOpRa7oFJ5d4aA22l4me/xArfOIc= k8s.io/utils v0.0.0-20200520001619-278ece378a50/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= @@ -2304,8 +2399,9 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index fd7d5c3fb086..b9b8449b3302 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2069,10 +2069,7 @@ func (s *TransactionAPI) GetTransactionReceiptsByBlockNumber(ctx context.Context txReceipts := make([]map[string]interface{}, 0, len(txs)) for idx, receipt := range receipts { tx := txs[idx] - var signer types.Signer = types.FrontierSigner{} - if tx.Protected() { - signer = types.NewEIP155Signer(tx.ChainId()) - } + signer := types.MakeSigner(s.b.ChainConfig(), block.Number(), block.Time()) from, _ := types.Sender(signer, tx) fields := map[string]interface{}{ @@ -2087,6 +2084,8 @@ func (s *TransactionAPI) GetTransactionReceiptsByBlockNumber(ctx context.Context "contractAddress": nil, "logs": receipt.Logs, "logsBloom": receipt.Bloom, + "type": hexutil.Uint(tx.Type()), + "effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice), } // Assign receipt status or post state. @@ -2125,7 +2124,7 @@ func (s *TransactionAPI) GetTransactionDataAndReceipt(ctx context.Context, hash receipt := receipts[index] // Derive the sender. - header, err := s.b.HeaderByHash(ctx, hash) + header, err := s.b.HeaderByHash(ctx, blockHash) if err != nil { return nil, err } @@ -2163,6 +2162,8 @@ func (s *TransactionAPI) GetTransactionDataAndReceipt(ctx context.Context, hash "contractAddress": nil, "logs": receipt.Logs, "logsBloom": receipt.Bloom, + "type": hexutil.Uint(tx.Type()), + "effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice), } // Assign receipt status or post state. diff --git a/internal/flags/categories.go b/internal/flags/categories.go index 610180cdf298..7a6b8d374c16 100644 --- a/internal/flags/categories.go +++ b/internal/flags/categories.go @@ -22,6 +22,7 @@ const ( EthCategory = "ETHEREUM" LightCategory = "LIGHT CLIENT" DevCategory = "DEVELOPER CHAIN" + StateCategory = "STATE HISTORY MANAGEMENT" EthashCategory = "ETHASH" TxPoolCategory = "TRANSACTION POOL (EVM)" BlobPoolCategory = "TRANSACTION POOL (BLOB)" diff --git a/les/client.go b/les/client.go index d0f08beb4c20..d95c0ff1595c 100644 --- a/les/client.go +++ b/les/client.go @@ -98,7 +98,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { if config.OverrideVerkle != nil { overrides.OverrideVerkle = config.OverrideVerkle } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, &overrides) + triedb := trie.NewDatabase(chainDb, trie.HashDefaults) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, triedb, config.Genesis, &overrides) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { return nil, genesisErr } diff --git a/les/handler_test.go b/les/handler_test.go index 564d3d2b4b3e..3b7e39d996a0 100644 --- a/les/handler_test.go +++ b/les/handler_test.go @@ -406,7 +406,7 @@ func testGetProofs(t *testing.T, protocol int) { accounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}} for i := uint64(0); i <= bc.CurrentBlock().Number.Uint64(); i++ { header := bc.GetHeaderByNumber(i) - trie, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db)) + trie, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB()) for _, acc := range accounts { req := ProofReq{ @@ -457,7 +457,7 @@ func testGetStaleProof(t *testing.T, protocol int) { var expected []rlp.RawValue if wantOK { proofsV2 := light.NewNodeSet() - t, _ := trie.New(trie.StateTrieID(header.Root), trie.NewDatabase(server.db)) + t, _ := trie.New(trie.StateTrieID(header.Root), server.backend.Blockchain().TrieDB()) t.Prove(account, proofsV2) expected = proofsV2.NodeList() } @@ -513,7 +513,7 @@ func testGetCHTProofs(t *testing.T, protocol int) { AuxData: [][]byte{rlp}, } root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash()) - trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.ChtTablePrefix)))) + trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.ChtTablePrefix)), trie.HashDefaults)) trie.Prove(key, &proofsV2.Proofs) // Assemble the requests for the different protocols requestsV2 := []HelperTrieReq{{ @@ -578,7 +578,7 @@ func testGetBloombitsProofs(t *testing.T, protocol int) { var proofs HelperTrieResps root := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash()) - trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.BloomTrieTablePrefix)))) + trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(server.db, string(rawdb.BloomTrieTablePrefix)), trie.HashDefaults)) trie.Prove(key, &proofs.Proofs) // Send the proof request and verify the response diff --git a/les/odr_test.go b/les/odr_test.go index 2a122bdbee7a..f2bedfec982b 100644 --- a/les/odr_test.go +++ b/les/odr_test.go @@ -105,7 +105,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainCon for _, addr := range acc { if bc != nil { header := bc.GetHeaderByHash(bhash) - st, err = state.New(header.Root, state.NewDatabase(db), nil) + st, err = state.New(header.Root, bc.StateCache(), nil) } else { header := lc.GetHeaderByHash(bhash) st = light.NewState(ctx, header, lc.Odr()) diff --git a/les/server_handler.go b/les/server_handler.go index ad18d3f4e47a..c31cc69cd56c 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -390,7 +390,8 @@ func (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie { if root == (common.Hash{}) { return nil } - trie, _ := trie.New(trie.TrieID(root), trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix))) + triedb := trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix), trie.HashDefaults) + trie, _ := trie.New(trie.TrieID(root), triedb) return trie } diff --git a/les/server_requests.go b/les/server_requests.go index 6641fab117b1..f2a747d2d2f6 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -303,9 +303,8 @@ func handleGetCode(msg Decoder) (serveRequestFn, uint64, uint64, error) { p.bumpInvalid() continue } - triedb := bc.StateCache().TrieDB() address := common.BytesToAddress(request.AccountAddress) - account, err := getAccount(triedb, header.Root, address) + account, err := getAccount(bc.TrieDB(), header.Root, address) if err != nil { p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", address, "err", err) p.bumpInvalid() @@ -424,7 +423,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { default: // Account key specified, open a storage trie address := common.BytesToAddress(request.AccountAddress) - account, err := getAccount(statedb.TrieDB(), root, address) + account, err := getAccount(bc.TrieDB(), root, address) if err != nil { p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", address, "err", err) p.bumpInvalid() diff --git a/les/test_helper.go b/les/test_helper.go index e5d85024e127..63f1b47b9b8e 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -49,6 +49,7 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -188,7 +189,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index BaseFee: big.NewInt(params.InitialBaseFee), } ) - genesis := gspec.MustCommit(db) + genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) chain, _ := light.NewLightChain(odr, gspec.Config, engine) client := &LightEthereum{ @@ -226,7 +227,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da BaseFee: big.NewInt(params.InitialBaseFee), } ) - genesis := gspec.MustCommit(db) + genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) // create a simulation backend and pre-commit several customized block to the database. simulation := backends.NewSimulatedBackendWithDatabase(db, gspec.Alloc, 100000000) diff --git a/light/lightchain_test.go b/light/lightchain_test.go index e3d756f80111..5694ca72c266 100644 --- a/light/lightchain_test.go +++ b/light/lightchain_test.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) // So we can deterministically seed different blockchains @@ -55,7 +56,7 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [ func newCanonical(n int) (ethdb.Database, *LightChain, error) { db := rawdb.NewMemoryDatabase() gspec := core.Genesis{Config: params.TestChainConfig} - genesis := gspec.MustCommit(db) + genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) blockchain, _ := NewLightChain(&dummyOdr{db: db, indexerConfig: TestClientIndexerConfig}, gspec.Config, ethash.NewFaker()) // Create and inject the requested chain @@ -75,7 +76,7 @@ func newTestLightChain() *LightChain { Difficulty: big.NewInt(1), Config: params.TestChainConfig, } - gspec.MustCommit(db) + gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFullFaker()) if err != nil { panic(err) diff --git a/light/odr_test.go b/light/odr_test.go index e32d21c5fbb3..1d9ad2b2c86f 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -37,6 +37,7 @@ import ( "github.com/ethereum/go-ethereum/firehose" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" ) var ( @@ -283,7 +284,7 @@ func testChainOdr(t *testing.T, protocol int, fn odrTestFn) { t.Fatal(err) } - gspec.MustCommit(ldb) + gspec.MustCommit(ldb, trie.NewDatabase(ldb, trie.HashDefaults)) odr := &testOdr{sdb: sdb, ldb: ldb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} lightchain, err := NewLightChain(odr, gspec.Config, ethash.NewFullFaker()) if err != nil { diff --git a/light/postprocess.go b/light/postprocess.go index 567814e2bfd7..13d75f8617ae 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -145,7 +145,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64, dis diskdb: db, odr: odr, trieTable: trieTable, - triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down + triedb: trie.NewDatabase(trieTable, trie.HashDefaults), sectionSize: size, disablePruning: disablePruning, } @@ -348,7 +348,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin diskdb: db, odr: odr, trieTable: trieTable, - triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down + triedb: trie.NewDatabase(trieTable, trie.HashDefaults), parentSize: parentSize, size: size, disablePruning: disablePruning, diff --git a/light/trie.go b/light/trie.go index 306fe392a6e8..66472edf8525 100644 --- a/light/trie.go +++ b/light/trie.go @@ -100,11 +100,6 @@ func (db *odrDatabase) TrieDB() *trie.Database { return nil } -func (db *odrDatabase) CacheAccount(_ common.Hash, _ state.Trie) {} - -func (db *odrDatabase) CacheStorage(_ common.Hash, _ common.Hash, _ state.Trie) {} - -func (db *odrDatabase) Purge() {} func (db *odrDatabase) DiskDB() ethdb.KeyValueStore { panic("not implemented") } @@ -224,7 +219,8 @@ func (t *odrTrie) do(key []byte, fn func() error) error { } else { id = trie.StateTrieID(t.id.StateRoot) } - t.trie, err = trie.New(id, trie.NewDatabase(t.db.backend.Database())) + triedb := trie.NewDatabase(t.db.backend.Database(), trie.HashDefaults) + t.trie, err = trie.New(id, triedb) } if err == nil { err = fn() @@ -260,7 +256,8 @@ func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator { } else { id = trie.StateTrieID(t.id.StateRoot) } - t, err := trie.New(id, trie.NewDatabase(t.db.backend.Database())) + triedb := trie.NewDatabase(t.db.backend.Database(), trie.HashDefaults) + t, err := trie.New(id, triedb) if err == nil { it.t.trie = t } diff --git a/light/trie_test.go b/light/trie_test.go index ad7d769c84a4..fe724e9eea56 100644 --- a/light/trie_test.go +++ b/light/trie_test.go @@ -50,7 +50,7 @@ func TestNodeIterator(t *testing.T) { panic(err) } - gspec.MustCommit(lightdb) + gspec.MustCommit(lightdb, trie.NewDatabase(lightdb, trie.HashDefaults)) ctx := context.Background() odr := &testOdr{sdb: fulldb, ldb: lightdb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} head := blockchain.CurrentHeader() diff --git a/light/txpool_test.go b/light/txpool_test.go index 1181e3889e5f..1eec7bc42779 100644 --- a/light/txpool_test.go +++ b/light/txpool_test.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" ) type testTxRelay struct { @@ -96,7 +97,7 @@ func TestTxPool(t *testing.T) { panic(err) } - gspec.MustCommit(ldb) + gspec.MustCommit(ldb, trie.NewDatabase(ldb, trie.HashDefaults)) odr := &testOdr{sdb: sdb, ldb: ldb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} relay := &testTxRelay{ send: make(chan int, 1), diff --git a/miner/miner_test.go b/miner/miner_test.go index 21db1ce4a9e6..489bc46a911a 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -288,8 +288,9 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { } // Create chainConfig chainDB := rawdb.NewMemoryDatabase() + triedb := trie.NewDatabase(chainDB, nil) genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345")) - chainConfig, _, err := core.SetupGenesisBlock(chainDB, trie.NewDatabase(chainDB), genesis) + chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis) if err != nil { t.Fatalf("can't create new chain config: %v", err) } @@ -300,7 +301,7 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { if err != nil { t.Fatalf("can't create new chain %v", err) } - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(chainDB), nil) + statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil) blockchain := &testBlockChain{chainConfig, statedb, 10000000, new(event.Feed)} pool := legacypool.New(testTxPoolConfig, blockchain) diff --git a/params/version.go b/params/version.go index 9ecbc562d178..546de6f73dbf 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 3 // Minor version component of the current release - VersionPatch = 0 // Patch version component of the current release + VersionPatch = 2 // Patch version component of the current release VersionMeta = "fh2.2" // Version metadata to append to the version string FirehoseVersionMajor = 2 diff --git a/tests/block_test.go b/tests/block_test.go index 2405da1cc733..ad522ad653a3 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -18,6 +18,8 @@ package tests import ( "testing" + + "github.com/ethereum/go-ethereum/core/rawdb" ) func TestBlockchain(t *testing.T) { @@ -48,11 +50,17 @@ func TestBlockchain(t *testing.T) { bt.skipLoad(`.*randomStatetest94.json.*`) bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { - if err := bt.checkFailure(t, test.Run(false, nil)); err != nil { - t.Errorf("test without snapshotter failed: %v", err) + if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme, nil)); err != nil { + t.Errorf("test in hash mode without snapshotter failed: %v", err) + } + if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme, nil)); err != nil { + t.Errorf("test in hash mode with snapshotter failed: %v", err) + } + if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme, nil)); err != nil { + t.Errorf("test in path mode without snapshotter failed: %v", err) } - if err := bt.checkFailure(t, test.Run(true, nil)); err != nil { - t.Errorf("test with snapshotter failed: %v", err) + if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil)); err != nil { + t.Errorf("test in path mode with snapshotter failed: %v", err) } }) // There is also a LegacyTests folder, containing blockchain tests generated diff --git a/tests/block_test_util.go b/tests/block_test_util.go index d3e525a387e3..712174884b0b 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -38,6 +38,9 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" ) // A BlockTest checks handling of entire blocks. @@ -100,16 +103,30 @@ type btHeaderMarshaling struct { BaseFeePerGas *math.HexOrDecimal256 } -func (t *BlockTest) Run(snapshotter bool, tracer vm.EVMLogger) error { +func (t *BlockTest) Run(snapshotter bool, scheme string, tracer vm.EVMLogger) error { config, ok := Forks[t.json.Network] if !ok { return UnsupportedForkError{t.json.Network} } - // import pre accounts & construct test genesis block & state root - db := rawdb.NewMemoryDatabase() + var ( + db = rawdb.NewMemoryDatabase() + tconf = &trie.Config{} + ) + if scheme == rawdb.PathScheme { + tconf.PathDB = pathdb.Defaults + } else { + tconf.HashDB = hashdb.Defaults + } + // Commit genesis state gspec := t.genesis(config) - gblock := gspec.MustCommit(db) + triedb := trie.NewDatabase(db, tconf) + gblock, err := gspec.Commit(db, triedb) + if err != nil { + return err + } + triedb.Close() // close the db to prevent memory leak + if gblock.Hash() != t.json.Genesis.Hash { return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6]) } @@ -119,7 +136,7 @@ func (t *BlockTest) Run(snapshotter bool, tracer vm.EVMLogger) error { // Wrap the original engine within the beacon-engine engine := beacon.New(ethash.NewFaker()) - cache := &core.CacheConfig{TrieCleanLimit: 0} + cache := &core.CacheConfig{TrieCleanLimit: 0, StateScheme: scheme} if snapshotter { cache.SnapshotLimit = 1 cache.SnapshotWait = true diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go index c62253a72622..c29bb2ef1228 100644 --- a/tests/fuzzers/les/les-fuzzer.go +++ b/tests/fuzzers/les/les-fuzzer.go @@ -88,8 +88,8 @@ func makechain() (bc *core.BlockChain, addresses []common.Address, txHashes []co } func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [][]byte) { - chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())) - bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())) + chtTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), trie.HashDefaults)) + bloomTrie = trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), trie.HashDefaults)) for i := 0; i < testChainLen; i++ { // The element in CHT is -> key := make([]byte, 8) diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go index aa81e5c9d9b1..ba490b761ff9 100644 --- a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go +++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go @@ -56,7 +56,7 @@ func (f *fuzzer) readInt() uint64 { } func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) { - trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase())) + trie := trie.NewEmpty(trie.NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) size := f.readInt() // Fill it with some fluff diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go index 391bdf300b72..3d6552409578 100644 --- a/tests/fuzzers/stacktrie/trie_fuzzer.go +++ b/tests/fuzzers/stacktrie/trie_fuzzer.go @@ -136,10 +136,10 @@ func (f *fuzzer) fuzz() int { // This spongeDb is used to check the sequence of disk-db-writes var ( spongeA = &spongeDb{sponge: sha3.NewLegacyKeccak256()} - dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA)) + dbA = trie.NewDatabase(rawdb.NewDatabase(spongeA), nil) trieA = trie.NewEmpty(dbA) spongeB = &spongeDb{sponge: sha3.NewLegacyKeccak256()} - dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB)) + dbB = trie.NewDatabase(rawdb.NewDatabase(spongeB), nil) trieB = trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) { rawdb.WriteTrieNode(spongeB, owner, path, hash, blob, dbB.Scheme()) }) diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go index fe9bf3d0fd3d..687f5efb1cea 100644 --- a/tests/fuzzers/trie/trie-fuzzer.go +++ b/tests/fuzzers/trie/trie-fuzzer.go @@ -143,7 +143,7 @@ func Fuzz(input []byte) int { func runRandTest(rt randTest) error { var ( - triedb = trie.NewDatabase(rawdb.NewMemoryDatabase()) + triedb = trie.NewDatabase(rawdb.NewMemoryDatabase(), nil) tr = trie.NewEmpty(triedb) origin = types.EmptyRootHash values = make(map[string]string) // tracks content of the trie diff --git a/tests/state_test.go b/tests/state_test.go index 88c519322217..eba023ea175c 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -30,6 +30,8 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/logger" @@ -79,21 +81,52 @@ func TestState(t *testing.T) { subtest := subtest key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index) - t.Run(key+"/trie", func(t *testing.T) { + t.Run(key+"/hash/trie", func(t *testing.T) { withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { - _, _, err := test.Run(subtest, vmconfig, false) - return st.checkFailure(t, err) + var result error + test.Run(subtest, vmconfig, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + result = st.checkFailure(t, err) + }) + return result }) }) - t.Run(key+"/snap", func(t *testing.T) { + t.Run(key+"/hash/snap", func(t *testing.T) { withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { - snaps, statedb, err := test.Run(subtest, vmconfig, true) - if snaps != nil && statedb != nil { - if _, err := snaps.Journal(statedb.IntermediateRoot(false)); err != nil { - return err + var result error + test.Run(subtest, vmconfig, true, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + if snaps != nil && state != nil { + if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil { + result = err + return + } } - } - return st.checkFailure(t, err) + result = st.checkFailure(t, err) + }) + return result + }) + }) + t.Run(key+"/path/trie", func(t *testing.T) { + withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { + var result error + test.Run(subtest, vmconfig, false, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + result = st.checkFailure(t, err) + }) + return result + }) + }) + t.Run(key+"/path/snap", func(t *testing.T) { + withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error { + var result error + test.Run(subtest, vmconfig, true, rawdb.PathScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) { + if snaps != nil && state != nil { + if _, err := snaps.Journal(state.IntermediateRoot(false)); err != nil { + result = err + return + } + } + result = st.checkFailure(t, err) + }) + return result }) }) } @@ -191,7 +224,8 @@ func runBenchmark(b *testing.B, t *StateTest) { vmconfig.ExtraEips = eips block := t.genesis(config).ToBlock() - _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false) + triedb, _, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, false, rawdb.HashScheme) + defer triedb.Close() var baseFee *big.Int if rules.IsLondon { diff --git a/tests/state_test_util.go b/tests/state_test_util.go index ed902777a102..53d29566ed0f 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -40,6 +40,8 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/triedb/hashdb" + "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "golang.org/x/crypto/sha3" ) @@ -188,43 +190,50 @@ func (t *StateTest) checkError(subtest StateSubtest, err error) error { } // Run executes a specific subtest and verifies the post-state and logs -func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, error) { - snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter) - if checkedErr := t.checkError(subtest, err); checkedErr != nil { - return snaps, statedb, checkedErr +func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string, postCheck func(err error, snaps *snapshot.Tree, state *state.StateDB)) (result error) { + triedb, snaps, statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter, scheme) + + // Invoke the callback at the end of function for further analysis. + defer func() { + postCheck(result, snaps, statedb) + + if triedb != nil { + triedb.Close() + } + }() + checkedErr := t.checkError(subtest, err) + if checkedErr != nil { + return checkedErr } // The error has been checked; if it was unexpected, it's already returned. if err != nil { // Here, an error exists but it was expected. // We do not check the post state or logs. - return snaps, statedb, nil + return nil } post := t.json.Post[subtest.Fork][subtest.Index] // N.B: We need to do this in a two-step process, because the first Commit takes care // of self-destructs, and we need to touch the coinbase _after_ it has potentially self-destructed. if root != common.Hash(post.Root) { - return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root) + return fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root) } if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) { - return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs) - } - // Re-init the post-state instance for further operation - statedb, err = state.New(root, statedb.Database(), snaps) - if err != nil { - return nil, nil, err + return fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs) } - return snaps, statedb, nil + statedb, _ = state.New(root, statedb.Database(), snaps) + return nil } // RunNoVerify runs a specific subtest and returns the statedb and post-state root -func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*snapshot.Tree, *state.StateDB, common.Hash, error) { +func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB, common.Hash, error) { config, eips, err := GetChainConfig(subtest.Fork) if err != nil { - return nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork} + return nil, nil, nil, common.Hash{}, UnsupportedForkError{subtest.Fork} } vmconfig.ExtraEips = eips + block := t.genesis(config).ToBlock() - snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter) + triedb, snaps, statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter, scheme) var baseFee *big.Int if config.IsLondon(new(big.Int)) { @@ -238,7 +247,8 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh post := t.json.Post[subtest.Fork][subtest.Index] msg, err := t.json.Tx.toMessage(post, baseFee) if err != nil { - return nil, nil, common.Hash{}, err + triedb.Close() + return nil, nil, nil, common.Hash{}, err } // Try to recover tx with current signer @@ -246,11 +256,13 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh var ttx types.Transaction err := ttx.UnmarshalBinary(post.TxBytes) if err != nil { - return nil, nil, common.Hash{}, err + triedb.Close() + return nil, nil, nil, common.Hash{}, err } if _, err := types.Sender(types.LatestSigner(config), &ttx); err != nil { - return nil, nil, common.Hash{}, err + triedb.Close() + return nil, nil, nil, common.Hash{}, err } } @@ -269,6 +281,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh context.Difficulty = big.NewInt(0) } evm := vm.NewEVM(context, txContext, statedb, config, vmconfig, firehose.NoOpContext) + // Execute the message. snapshot := statedb.Snapshot() gaspool := new(core.GasPool) @@ -289,15 +302,22 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh root := statedb.IntermediateRoot(config.IsEIP158(block.Number())) statedb.SetExpectedStateRoot(root) root, _, err = statedb.Commit(block.NumberU64(), nil) - return snaps, statedb, root, err + return triedb, snaps, statedb, root, err } func (t *StateTest) gasLimit(subtest StateSubtest) uint64 { return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas] } -func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) { - sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) +func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool, scheme string) (*trie.Database, *snapshot.Tree, *state.StateDB) { + tconf := &trie.Config{Preimages: true} + if scheme == rawdb.HashScheme { + tconf.HashDB = hashdb.Defaults + } else { + tconf.PathDB = pathdb.Defaults + } + triedb := trie.NewDatabase(db, tconf) + sdb := state.NewDatabaseWithNodeDB(db, triedb) statedb, _ := state.New(types.EmptyRootHash, sdb, nil) for addr, a := range accounts { statedb.SetCode(addr, a.Code, firehose.NoOpContext) @@ -323,7 +343,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo snaps, _ = snapshot.New(snapconfig, db, sdb.TrieDB(), root, 128, false) } statedb, _ = state.New(root, sdb, snaps) - return snaps, statedb + return triedb, snaps, statedb } func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis { diff --git a/trie/database.go b/trie/database.go index ae1f7871a4f0..8581b8bcaacc 100644 --- a/trie/database.go +++ b/trie/database.go @@ -18,9 +18,12 @@ package trie import ( "errors" + "strings" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie/triedb/hashdb" "github.com/ethereum/go-ethereum/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/trie/trienode" @@ -30,14 +33,22 @@ import ( // Config defines all necessary options for database. type Config struct { NoTries bool - Cache int // Memory allowance (MB) to use for caching trie nodes in memory - Preimages bool // Flag whether the preimage of trie key is recorded - PathDB *pathdb.Config // Configs for experimental path-based scheme, not used yet. + Preimages bool // Flag whether the preimage of node key is recorded + Cache int + HashDB *hashdb.Config // Configs for hash-based scheme + PathDB *pathdb.Config // Configs for experimental path-based scheme // Testing hooks OnCommit func(states *triestate.Set) // Hook invoked when commit is performed } +// HashDefaults represents a config for using hash-based scheme with +// default settings. +var HashDefaults = &Config{ + Preimages: false, + HashDB: hashdb.Defaults, +} + // backend defines the methods needed to access/update trie nodes in different // state scheme. type backend interface { @@ -50,7 +61,7 @@ type backend interface { // Size returns the current storage size of the memory cache in front of the // persistent database layer. - Size() common.StorageSize + Size() (common.StorageSize, common.StorageSize, common.StorageSize) // Update performs a state transition by committing dirty nodes contained // in the given set in order to update state from the specified parent to @@ -92,22 +103,62 @@ func prepare(diskdb ethdb.Database, config *Config) *Database { } } -// NewDatabase initializes the trie database with default settings, namely +// NewDatabase initializes the trie database with default settings, note // the legacy hash-based scheme is used by default. -func NewDatabase(diskdb ethdb.Database) *Database { - return NewDatabaseWithConfig(diskdb, nil) -} - -// NewDatabaseWithConfig initializes the trie database with provided configs. -// The path-based scheme is not activated yet, always initialized with legacy -// hash-based scheme by default. -func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database { - var cleans int - if config != nil && config.Cache != 0 { - cleans = config.Cache * 1024 * 1024 +func NewDatabase(diskdb ethdb.Database, config *Config) *Database { + // Sanitize the config and use the default one if it's not specified. + dbScheme := rawdb.ReadStateScheme(diskdb) + if config == nil { + if dbScheme == rawdb.PathScheme { + config = &Config{ + PathDB: pathdb.Defaults, + } + } else { + config = HashDefaults + } + } + if config.PathDB == nil && config.HashDB == nil { + if dbScheme == rawdb.PathScheme { + config.PathDB = pathdb.Defaults + } else { + config.HashDB = hashdb.Defaults + } + } + var preimages *preimageStore + if config.Preimages { + preimages = newPreimageStore(diskdb) + } + db := &Database{ + config: config, + diskdb: diskdb, + preimages: preimages, + } + /* + * 1. First, initialize db according to the user config + * 2. Second, initialize the db according to the scheme already used by db + * 3. Last, use the default scheme, namely hash scheme + */ + if config.HashDB != nil { + if rawdb.ReadStateScheme(diskdb) == rawdb.PathScheme { + log.Warn("incompatible state scheme", "old", rawdb.PathScheme, "new", rawdb.HashScheme) + } + db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{}) + } else if config.PathDB != nil { + if rawdb.ReadStateScheme(diskdb) == rawdb.HashScheme { + log.Warn("incompatible state scheme", "old", rawdb.HashScheme, "new", rawdb.PathScheme) + } + db.backend = pathdb.New(diskdb, config.PathDB) + } else if strings.Compare(dbScheme, rawdb.PathScheme) == 0 { + if config.PathDB == nil { + config.PathDB = pathdb.Defaults + } + db.backend = pathdb.New(diskdb, config.PathDB) + } else { + if config.HashDB == nil { + config.HashDB = hashdb.Defaults + } + db.backend = hashdb.New(diskdb, config.HashDB, mptResolver{}) } - db := prepare(diskdb, config) - db.backend = hashdb.New(diskdb, cleans, mptResolver{}) return db } @@ -156,16 +207,16 @@ func (db *Database) Commit(root common.Hash, report bool) error { // Size returns the storage size of dirty trie nodes in front of the persistent // database and the size of cached preimages. -func (db *Database) Size() (common.StorageSize, common.StorageSize) { +func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize, common.StorageSize) { var ( - storages common.StorageSize - preimages common.StorageSize + diffs, nodes, immutablenodes common.StorageSize + preimages common.StorageSize ) - storages = db.backend.Size() + diffs, nodes, immutablenodes = db.backend.Size() if db.preimages != nil { preimages = db.preimages.size() } - return storages, preimages + return diffs, nodes, immutablenodes, preimages } // Initialized returns an indicator if the state data is already initialized @@ -245,3 +296,71 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { } return hdb.Node(hash) } + +// Recover rollbacks the database to a specified historical point. The state is +// supported as the rollback destination only if it's canonical state and the +// corresponding trie histories are existent. It's only supported by path-based +// database and will return an error for others. +func (db *Database) Recover(target common.Hash) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.Recover(target, &trieLoader{db: db}) +} + +// Recoverable returns the indicator if the specified state is enabled to be +// recovered. It's only supported by path-based database and will return an +// error for others. +func (db *Database) Recoverable(root common.Hash) (bool, error) { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return false, errors.New("not supported") + } + return pdb.Recoverable(root), nil +} + +// Reset wipes all available journal from the persistent database and discard +// all caches and diff layers. Using the given root to create a new disk layer. +// It's only supported by path-based database and will return an error for others. +func (db *Database) Reset(root common.Hash) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.Reset(root) +} + +// Journal commits an entire diff hierarchy to disk into a single journal entry. +// This is meant to be used during shutdown to persist the snapshot without +// flattening everything down (bad for reorgs). It's only supported by path-based +// database and will return an error for others. +func (db *Database) Journal(root common.Hash) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.Journal(root) +} + +// SetBufferSize sets the node buffer size to the provided value(in bytes). +// It's only supported by path-based database and will return an error for +// others. +func (db *Database) SetBufferSize(size int) error { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return errors.New("not supported") + } + return pdb.SetBufferSize(size) +} + +// Head return the top non-fork difflayer/disklayer root hash for rewinding. +// It's only supported by path-based database and will return an error for +// others. +func (db *Database) Head() common.Hash { + pdb, ok := db.backend.(*pathdb.Database) + if !ok { + return common.Hash{} + } + return pdb.Head() +} diff --git a/trie/database_test.go b/trie/database_test.go index ed43a81e5976..d675a2191b74 100644 --- a/trie/database_test.go +++ b/trie/database_test.go @@ -27,7 +27,7 @@ import ( func newTestDatabase(diskdb ethdb.Database, scheme string) *Database { db := prepare(diskdb, nil) if scheme == rawdb.HashScheme { - db.backend = hashdb.New(diskdb, 0, mptResolver{}) + db.backend = hashdb.New(diskdb, &hashdb.Config{}, mptResolver{}) } else { db.backend = pathdb.New(diskdb, &pathdb.Config{}) // disable clean/dirty cache } diff --git a/trie/hbss2pbss.go b/trie/hbss2pbss.go new file mode 100644 index 000000000000..1faaf3478364 --- /dev/null +++ b/trie/hbss2pbss.go @@ -0,0 +1,243 @@ +package trie + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "sync" + "sync/atomic" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +type Hbss2Pbss struct { + trie *Trie // traverse trie + db *Database + blocknum uint64 + root node // root of triedb + stateRootHash common.Hash + concurrentQueue chan struct{} + totalNum uint64 + wg sync.WaitGroup +} + +const ( + DEFAULT_TRIEDBCACHE_SIZE = 1024 * 1024 * 1024 +) + +// NewHbss2Pbss return a hash2Path obj +func NewHbss2Pbss(tr *Trie, db *Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Hbss2Pbss, error) { + if tr == nil { + return nil, errors.New("trie is nil") + } + + if tr.root == nil { + return nil, errors.New("trie root is nil") + } + + ins := &Hbss2Pbss{ + trie: tr, + blocknum: blocknum, + db: db, + stateRootHash: stateRootHash, + root: tr.root, + concurrentQueue: make(chan struct{}, jobnum), + wg: sync.WaitGroup{}, + } + + return ins, nil +} + +func (t *Trie) resloveWithoutTrack(n node, prefix []byte) (node, error) { + if n, ok := n.(hashNode); ok { + blob, err := t.reader.node(prefix, common.BytesToHash(n)) + if err != nil { + return nil, err + } + return mustDecodeNode(n, blob), nil + } + return n, nil +} + +func (h2p *Hbss2Pbss) writeNode(pathKey []byte, n *trienode.Node, owner common.Hash) { + if owner == (common.Hash{}) { + rawdb.WriteAccountTrieNode(h2p.db.diskdb, pathKey, n.Blob) + log.Debug("WriteNodes account node, ", "path: ", common.Bytes2Hex(pathKey), "Hash: ", n.Hash, "BlobHash: ", crypto.Keccak256Hash(n.Blob)) + } else { + rawdb.WriteStorageTrieNode(h2p.db.diskdb, owner, pathKey, n.Blob) + log.Debug("WriteNodes storage node, ", "path: ", common.Bytes2Hex(pathKey), "owner: ", owner.String(), "Hash: ", n.Hash, "BlobHash: ", crypto.Keccak256Hash(n.Blob)) + } +} + +// Run statistics, external call +func (h2p *Hbss2Pbss) Run() { + log.Debug("Find Account Trie Tree, rootHash: ", h2p.trie.Hash().String(), "BlockNum: ", h2p.blocknum) + + h2p.ConcurrentTraversal(h2p.trie, h2p.root, []byte{}) + h2p.wg.Wait() + + log.Info("Total", "complete", h2p.totalNum, "go routines Num", runtime.NumGoroutine, "h2p concurrentQueue", len(h2p.concurrentQueue)) + + rawdb.WritePersistentStateID(h2p.db.diskdb, h2p.blocknum) + rawdb.WriteStateID(h2p.db.diskdb, h2p.stateRootHash, h2p.blocknum) +} + +func (h2p *Hbss2Pbss) SubConcurrentTraversal(theTrie *Trie, theNode node, path []byte) { + h2p.concurrentQueue <- struct{}{} + h2p.ConcurrentTraversal(theTrie, theNode, path) + <-h2p.concurrentQueue + h2p.wg.Done() +} + +func (h2p *Hbss2Pbss) ConcurrentTraversal(theTrie *Trie, theNode node, path []byte) { + total_num := uint64(0) + // nil node + if theNode == nil { + return + } + + switch current := (theNode).(type) { + case *shortNode: + collapsed := current.copy() + collapsed.Key = hexToCompact(current.Key) + var hash, _ = current.cache() + h2p.writeNode(path, trienode.New(common.BytesToHash(hash), nodeToBytes(collapsed)), theTrie.owner) + + h2p.ConcurrentTraversal(theTrie, current.Val, append(path, current.Key...)) + + case *fullNode: + // copy from trie/Committer (*committer).commit + collapsed := current.copy() + var hash, _ = collapsed.cache() + collapsed.Children = h2p.commitChildren(path, current) + + nodebytes := nodeToBytes(collapsed) + if common.BytesToHash(hash) != common.BytesToHash(crypto.Keccak256(nodebytes)) { + log.Error("Hash is inconsistent, hash: ", common.BytesToHash(hash), "node hash: ", common.BytesToHash(crypto.Keccak256(nodebytes)), "node: ", collapsed.fstring("")) + panic("hash inconsistent.") + } + + h2p.writeNode(path, trienode.New(common.BytesToHash(hash), nodeToBytes(collapsed)), theTrie.owner) + + for idx, child := range current.Children { + if child == nil { + continue + } + childPath := append(path, byte(idx)) + if len(h2p.concurrentQueue)*2 < cap(h2p.concurrentQueue) { + h2p.wg.Add(1) + dst := make([]byte, len(childPath)) + copy(dst, childPath) + go h2p.SubConcurrentTraversal(theTrie, child, dst) + } else { + h2p.ConcurrentTraversal(theTrie, child, childPath) + } + } + case hashNode: + n, err := theTrie.resloveWithoutTrack(current, path) + if err != nil { + log.Error("Resolve HashNode", "error", err, "TrieRoot", theTrie.Hash(), "Path", path) + return + } + h2p.ConcurrentTraversal(theTrie, n, path) + total_num = atomic.AddUint64(&h2p.totalNum, 1) + if total_num%100000 == 0 { + log.Info("Converting ", "Complete progress", total_num, "go routines Num", runtime.NumGoroutine(), "h2p concurrentQueue", len(h2p.concurrentQueue)) + } + return + case valueNode: + if !hasTerm(path) { + log.Info("ValueNode miss path term", "path", common.Bytes2Hex(path)) + break + } + var account types.StateAccount + if err := rlp.Decode(bytes.NewReader(current), &account); err != nil { + // log.Info("Rlp decode account failed.", "err", err) + break + } + if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash { + // log.Info("Not a storage trie.", "account", common.BytesToHash(path).String()) + break + } + + ownerAddress := common.BytesToHash(hexToCompact(path)) + tr, err := New(StorageTrieID(h2p.stateRootHash, ownerAddress, account.Root), h2p.db) + if err != nil { + log.Error("New Storage trie error", "err", err, "root", account.Root.String(), "owner", ownerAddress.String()) + break + } + log.Debug("Find Contract Trie Tree", "rootHash: ", tr.Hash().String(), "") + h2p.wg.Add(1) + go h2p.SubConcurrentTraversal(tr, tr.root, []byte{}) + default: + panic(errors.New("Invalid node type to traverse.")) + } +} + +// copy from trie/Commiter (*committer).commit +func (h2p *Hbss2Pbss) commitChildren(path []byte, n *fullNode) [17]node { + var children [17]node + for i := 0; i < 16; i++ { + child := n.Children[i] + if child == nil { + continue + } + // If it's the hashed child, save the hash value directly. + // Note: it's impossible that the child in range [0, 15] + // is a valueNode. + if hn, ok := child.(hashNode); ok { + children[i] = hn + continue + } + + children[i] = h2p.commit(append(path, byte(i)), child) + } + // For the 17th child, it's possible the type is valuenode. + if n.Children[16] != nil { + children[16] = n.Children[16] + } + return children +} + +// commit collapses a node down into a hash node and returns it. +func (h2p *Hbss2Pbss) commit(path []byte, n node) node { + // if this path is clean, use available cached data + hash, dirty := n.cache() + if hash != nil && !dirty { + return hash + } + // Commit children, then parent, and remove the dirty flag. + switch cn := n.(type) { + case *shortNode: + // Commit child + collapsed := cn.copy() + + // If the child is fullNode, recursively commit, + // otherwise it can only be hashNode or valueNode. + if _, ok := cn.Val.(*fullNode); ok { + collapsed.Val = h2p.commit(append(path, cn.Key...), cn.Val) + } + // The key needs to be copied, since we're adding it to the + // modified nodeset. + collapsed.Key = hexToCompact(cn.Key) + return collapsed + case *fullNode: + hashedKids := h2p.commitChildren(path, cn) + collapsed := cn.copy() + collapsed.Children = hashedKids + + return collapsed + case hashNode: + return cn + default: + // nil, valuenode shouldn't be committed + panic(fmt.Sprintf("%T: invalid node: %v", n, n)) + } +} diff --git a/trie/iterator_test.go b/trie/iterator_test.go index e711ffb81583..57d1f06a160a 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -18,7 +18,6 @@ package trie import ( "bytes" - "encoding/binary" "fmt" "math/rand" "testing" @@ -27,13 +26,11 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/trie/trienode" ) func TestEmptyIterator(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) iter := trie.MustNodeIterator(nil) seen := make(map[string]struct{}) @@ -46,7 +43,7 @@ func TestEmptyIterator(t *testing.T) { } func TestIterator(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) + db := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(db) vals := []struct{ k, v string }{ {"do", "verb"}, @@ -89,7 +86,7 @@ func (k *kv) cmp(other *kv) int { } func TestIteratorLargeData(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) for i := byte(0); i < 255; i++ { @@ -208,7 +205,7 @@ var testdata2 = []kvs{ } func TestIteratorSeek(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for _, val := range testdata1 { trie.MustUpdate([]byte(val.k), []byte(val.v)) } @@ -249,7 +246,7 @@ func checkIteratorOrder(want []kvs, it *Iterator) error { } func TestDifferenceIterator(t *testing.T) { - dba := NewDatabase(rawdb.NewMemoryDatabase()) + dba := NewDatabase(rawdb.NewMemoryDatabase(), nil) triea := NewEmpty(dba) for _, val := range testdata1 { triea.MustUpdate([]byte(val.k), []byte(val.v)) @@ -258,7 +255,7 @@ func TestDifferenceIterator(t *testing.T) { dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) triea, _ = New(TrieID(rootA), dba) - dbb := NewDatabase(rawdb.NewMemoryDatabase()) + dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trieb := NewEmpty(dbb) for _, val := range testdata2 { trieb.MustUpdate([]byte(val.k), []byte(val.v)) @@ -291,7 +288,7 @@ func TestDifferenceIterator(t *testing.T) { } func TestUnionIterator(t *testing.T) { - dba := NewDatabase(rawdb.NewMemoryDatabase()) + dba := NewDatabase(rawdb.NewMemoryDatabase(), nil) triea := NewEmpty(dba) for _, val := range testdata1 { triea.MustUpdate([]byte(val.k), []byte(val.v)) @@ -300,7 +297,7 @@ func TestUnionIterator(t *testing.T) { dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) triea, _ = New(TrieID(rootA), dba) - dbb := NewDatabase(rawdb.NewMemoryDatabase()) + dbb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trieb := NewEmpty(dbb) for _, val := range testdata2 { trieb.MustUpdate([]byte(val.k), []byte(val.v)) @@ -344,7 +341,7 @@ func TestUnionIterator(t *testing.T) { } func TestIteratorNoDups(t *testing.T) { - tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + tr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for _, val := range testdata1 { tr.MustUpdate([]byte(val.k), []byte(val.v)) } @@ -537,96 +534,6 @@ func TestIteratorNodeBlob(t *testing.T) { testIteratorNodeBlob(t, rawdb.PathScheme) } -type loggingDb struct { - getCount uint64 - backend ethdb.KeyValueStore -} - -func (l *loggingDb) Has(key []byte) (bool, error) { - return l.backend.Has(key) -} - -func (l *loggingDb) Get(key []byte) ([]byte, error) { - l.getCount++ - return l.backend.Get(key) -} - -func (l *loggingDb) Put(key []byte, value []byte) error { - return l.backend.Put(key, value) -} - -func (l *loggingDb) Delete(key []byte) error { - return l.backend.Delete(key) -} - -func (l *loggingDb) NewBatch() ethdb.Batch { - return l.backend.NewBatch() -} - -func (l *loggingDb) NewBatchWithSize(size int) ethdb.Batch { - return l.backend.NewBatchWithSize(size) -} - -func (l *loggingDb) NewIterator(prefix []byte, start []byte) ethdb.Iterator { - return l.backend.NewIterator(prefix, start) -} - -func (l *loggingDb) NewSnapshot() (ethdb.Snapshot, error) { - return l.backend.NewSnapshot() -} - -func (l *loggingDb) Stat(property string) (string, error) { - return l.backend.Stat(property) -} - -func (l *loggingDb) Compact(start []byte, limit []byte) error { - return l.backend.Compact(start, limit) -} - -func (l *loggingDb) Close() error { - return l.backend.Close() -} - -// makeLargeTestTrie create a sample test trie -func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) { - // Create an empty trie - logDb := &loggingDb{0, memorydb.New()} - triedb := NewDatabase(rawdb.NewDatabase(logDb)) - trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) - - // Fill it with some arbitrary data - for i := 0; i < 10000; i++ { - key := make([]byte, 32) - val := make([]byte, 32) - binary.BigEndian.PutUint64(key, uint64(i)) - binary.BigEndian.PutUint64(val, uint64(i)) - key = crypto.Keccak256(key) - val = crypto.Keccak256(val) - trie.MustUpdate(key, val) - } - root, nodes, _ := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) - triedb.Commit(root, false) - - // Return the generated trie - trie, _ = NewStateTrie(TrieID(root), triedb) - return triedb, trie, logDb -} - -// Tests that the node iterator indeed walks over the entire database contents. -func TestNodeIteratorLargeTrie(t *testing.T) { - // Create some arbitrary test trie to iterate - db, trie, logDb := makeLargeTestTrie() - db.Cap(0) // flush everything - // Do a seek operation - trie.NodeIterator(common.FromHex("0x77667766776677766778855885885885")) - // master: 24 get operations - // this pr: 6 get operations - if have, want := logDb.getCount, uint64(6); have != want { - t.Fatalf("Too many lookups during seek, have %d want %d", have, want) - } -} - func testIteratorNodeBlob(t *testing.T, scheme string) { var ( db = rawdb.NewMemoryDatabase() @@ -700,7 +607,7 @@ func isTrieNode(scheme string, key, val []byte) (bool, []byte, common.Hash) { } hash = common.BytesToHash(key) } else { - ok, remain := rawdb.IsAccountTrieNode(key) + ok, remain := rawdb.ResolveAccountTrieNodeKey(key) if !ok { return false, nil, common.Hash{} } diff --git a/trie/node.go b/trie/node.go index 15bbf62f1c93..d78ed5c5691f 100644 --- a/trie/node.go +++ b/trie/node.go @@ -112,6 +112,11 @@ func (n rawNode) EncodeRLP(w io.Writer) error { return err } +func NodeString(hash, buf []byte) string { + node := mustDecodeNode(hash, buf) + return node.fstring("NodeString: ") +} + // mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered. func mustDecodeNode(hash, buf []byte) node { n, err := decodeNode(hash, buf) diff --git a/trie/proof_test.go b/trie/proof_test.go index e8ea116c8803..fc2de626496e 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -94,7 +94,7 @@ func TestProof(t *testing.T) { } func TestOneElementProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) updateString(trie, "k", "v") for i, prover := range makeProvers(trie) { proof := prover([]byte("k")) @@ -145,7 +145,7 @@ func TestBadProof(t *testing.T) { // Tests that missing keys can also be proven. The test explicitly uses a single // entry trie and checks for missing keys both before and after the single entry. func TestMissingKeyProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) updateString(trie, "k", "v") for i, key := range []string{"a", "j", "l", "z"} { @@ -395,7 +395,7 @@ func TestOneElementRangeProof(t *testing.T) { } // Test the mini trie with only a single element. - tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + tinyTrie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) entry := &kv{randBytes(32), randBytes(20), false} tinyTrie.MustUpdate(entry.k, entry.v) @@ -467,7 +467,7 @@ func TestAllElementsProof(t *testing.T) { // TestSingleSideRangeProof tests the range starts from zero. func TestSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) var entries []*kv for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} @@ -502,7 +502,7 @@ func TestSingleSideRangeProof(t *testing.T) { // TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff. func TestReverseSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) var entries []*kv for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} @@ -609,7 +609,7 @@ func TestBadRangeProof(t *testing.T) { // TestGappedRangeProof focuses on the small trie with embedded nodes. // If the gapped node is embedded in the trie, it should be detected too. func TestGappedRangeProof(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) var entries []*kv // Sorted entries for i := byte(0); i < 10; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} @@ -683,7 +683,7 @@ func TestSameSideProofs(t *testing.T) { } func TestHasRightElement(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) var entries []*kv for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} @@ -1036,7 +1036,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) { } func randomTrie(n int) (*Trie, map[string]*kv) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) for i := byte(0); i < 100; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} @@ -1055,7 +1055,7 @@ func randomTrie(n int) (*Trie, map[string]*kv) { } func nonRandomTrie(n int) (*Trie, map[string]*kv) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := make(map[string]*kv) max := uint64(0xffffffffffffffff) for i := uint64(0); i < uint64(n); i++ { @@ -1080,7 +1080,7 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) { common.Hex2Bytes("02"), common.Hex2Bytes("03"), } - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i, key := range keys { trie.MustUpdate(key, vals[i]) } diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index 89bc9547d350..af67025006bf 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -32,14 +32,14 @@ import ( ) func newEmptySecure() *StateTrie { - trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase())) + trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), NewDatabase(rawdb.NewMemoryDatabase(), nil)) return trie } // makeTestStateTrie creates a large enough secure trie for testing. func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { // Create an empty trie - triedb := NewDatabase(rawdb.NewMemoryDatabase()) + triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie, _ := NewStateTrie(TrieID(types.EmptyRootHash), triedb) // Fill it with some arbitrary data diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index ea3eef78820d..822dd31a9333 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -188,7 +188,7 @@ func TestStackTrieInsertAndHash(t *testing.T) { func TestSizeBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -203,7 +203,7 @@ func TestSizeBug(t *testing.T) { func TestEmptyBug(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -229,7 +229,7 @@ func TestEmptyBug(t *testing.T) { func TestValLength56(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) //leaf := common.FromHex("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") //value := common.FromHex("94cf40d0d2b44f2b66e07cace1372ca42b73cf21a3") @@ -254,7 +254,7 @@ func TestValLength56(t *testing.T) { // which causes a lot of node-within-node. This case was found via fuzzing. func TestUpdateSmallNodes(t *testing.T) { st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) kvs := []struct { K string V string @@ -282,7 +282,7 @@ func TestUpdateSmallNodes(t *testing.T) { func TestUpdateVariableKeys(t *testing.T) { t.SkipNow() st := NewStackTrie(nil) - nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) kvs := []struct { K string V string @@ -351,7 +351,7 @@ func TestStacktrieNotModifyValues(t *testing.T) { func TestStacktrieSerialization(t *testing.T) { var ( st = NewStackTrie(nil) - nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + nt = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) keyB = big.NewInt(1) keyDelta = big.NewInt(1) vals [][]byte diff --git a/trie/sync.go b/trie/sync.go index 4f5584599179..9da07060759a 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" ) // ErrNotRequested is returned by the trie sync when it's requested to process a @@ -42,6 +43,16 @@ var ErrAlreadyProcessed = errors.New("already processed") // memory if the node was configured with a significant number of peers. const maxFetchesPerDepth = 16384 +var ( + // deletionGauge is the metric to track how many trie node deletions + // are performed in total during the sync process. + deletionGauge = metrics.NewRegisteredGauge("trie/sync/delete", nil) + + // lookupGauge is the metric to track how many trie node lookups are + // performed to determine if node needs to be deleted. + lookupGauge = metrics.NewRegisteredGauge("trie/sync/lookup", nil) +) + // SyncPath is a path tuple identifying a particular trie node either in a single // trie (account) or a layered trie (account -> storage). // @@ -93,9 +104,10 @@ type LeafCallback func(keys [][]byte, path []byte, leaf []byte, parent common.Ha // nodeRequest represents a scheduled or already in-flight trie node retrieval request. type nodeRequest struct { - hash common.Hash // Hash of the trie node to retrieve - path []byte // Merkle path leading to this node for prioritization - data []byte // Data content of the node, cached until all subtrees complete + hash common.Hash // Hash of the trie node to retrieve + path []byte // Merkle path leading to this node for prioritization + data []byte // Data content of the node, cached until all subtrees complete + deletes [][]byte // List of internal path segments for trie nodes to delete parent *nodeRequest // Parent state node referencing this entry deps int // Number of dependencies before allowed to commit this node @@ -125,18 +137,20 @@ type CodeSyncResult struct { // syncMemBatch is an in-memory buffer of successfully downloaded but not yet // persisted data items. type syncMemBatch struct { - nodes map[string][]byte // In-memory membatch of recently completed nodes - hashes map[string]common.Hash // Hashes of recently completed nodes - codes map[common.Hash][]byte // In-memory membatch of recently completed codes - size uint64 // Estimated batch-size of in-memory data. + nodes map[string][]byte // In-memory membatch of recently completed nodes + hashes map[string]common.Hash // Hashes of recently completed nodes + deletes map[string]struct{} // List of paths for trie node to delete + codes map[common.Hash][]byte // In-memory membatch of recently completed codes + size uint64 // Estimated batch-size of in-memory data. } // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. func newSyncMemBatch() *syncMemBatch { return &syncMemBatch{ - nodes: make(map[string][]byte), - hashes: make(map[string]common.Hash), - codes: make(map[common.Hash][]byte), + nodes: make(map[string][]byte), + hashes: make(map[string]common.Hash), + deletes: make(map[string]struct{}), + codes: make(map[common.Hash][]byte), } } @@ -347,16 +361,23 @@ func (s *Sync) ProcessNode(result NodeSyncResult) error { // Commit flushes the data stored in the internal membatch out to persistent // storage, returning any occurred error. func (s *Sync) Commit(dbw ethdb.Batch) error { - // Dump the membatch into a database dbw + // Flush the pending node writes into database batch. for path, value := range s.membatch.nodes { owner, inner := ResolvePath([]byte(path)) rawdb.WriteTrieNode(dbw, owner, inner, s.membatch.hashes[path], value, s.scheme) } + // Flush the pending node deletes into the database batch. + // Please note that each written and deleted node has a + // unique path, ensuring no duplication occurs. + for path := range s.membatch.deletes { + owner, inner := ResolvePath([]byte(path)) + rawdb.DeleteTrieNode(dbw, owner, inner, common.Hash{} /* unused */, s.scheme) + } + // Flush the pending code writes into database batch. for hash, value := range s.membatch.codes { rawdb.WriteCode(dbw, hash, value) } - // Drop the membatch data and return - s.membatch = newSyncMemBatch() + s.membatch = newSyncMemBatch() // reset the batch return nil } @@ -425,6 +446,39 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) { node: node.Val, path: append(append([]byte(nil), req.path...), key...), }} + // Mark all internal nodes between shortNode and its **in disk** + // child as invalid. This is essential in the case of path mode + // scheme; otherwise, state healing might overwrite existing child + // nodes silently while leaving a dangling parent node within the + // range of this internal path on disk. This would break the + // guarantee for state healing. + // + // While it's possible for this shortNode to overwrite a previously + // existing full node, the other branches of the fullNode can be + // retained as they remain untouched and complete. + // + // This step is only necessary for path mode, as there is no deletion + // in hash mode at all. + if _, ok := node.Val.(hashNode); ok && s.scheme == rawdb.PathScheme { + owner, inner := ResolvePath(req.path) + for i := 1; i < len(key); i++ { + // While checking for a non-existent item in Pebble can be less efficient + // without a bloom filter, the relatively low frequency of lookups makes + // the performance impact negligible. + var exists bool + if owner == (common.Hash{}) { + exists = rawdb.ExistsAccountTrieNode(s.database, append(inner, key[:i]...)) + } else { + exists = rawdb.ExistsStorageTrieNode(s.database, owner, append(inner, key[:i]...)) + } + if exists { + req.deletes = append(req.deletes, key[:i]) + deletionGauge.Inc(1) + log.Debug("Detected dangling node", "owner", owner, "path", append(inner, key[:i]...)) + } + } + lookupGauge.Inc(int64(len(key) - 1)) + } case *fullNode: for i := 0; i < 17; i++ { if node.Children[i] != nil { @@ -509,10 +563,19 @@ func (s *Sync) commitNodeRequest(req *nodeRequest) error { // Write the node content to the membatch s.membatch.nodes[string(req.path)] = req.data s.membatch.hashes[string(req.path)] = req.hash + // The size tracking refers to the db-batch, not the in-memory data. - // Therefore, we ignore the req.path, and account only for the hash+data - // which eventually is written to db. - s.membatch.size += common.HashLength + uint64(len(req.data)) + if s.scheme == rawdb.PathScheme { + s.membatch.size += uint64(len(req.path) + len(req.data)) + } else { + s.membatch.size += common.HashLength + uint64(len(req.data)) + } + // Delete the internal nodes which are marked as invalid + for _, segment := range req.deletes { + path := append(req.path, segment...) + s.membatch.deletes[string(path)] = struct{}{} + s.membatch.size += uint64(len(path)) + } delete(s.nodeReqs, string(req.path)) s.fetches[len(req.path)]-- diff --git a/trie/sync_test.go b/trie/sync_test.go index b6fe8d84a6df..3b7986ef6792 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -70,31 +70,53 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str // checkTrieContents cross references a reconstructed trie with an expected data // content map. -func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte) { +func checkTrieContents(t *testing.T, db ethdb.Database, scheme string, root []byte, content map[string][]byte, rawTrie bool) { // Check root availability and trie contents ndb := newTestDatabase(db, scheme) - trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) - if err != nil { - t.Fatalf("failed to create trie at %x: %v", root, err) - } - if err := checkTrieConsistency(db, scheme, common.BytesToHash(root)); err != nil { + if err := checkTrieConsistency(db, scheme, common.BytesToHash(root), rawTrie); err != nil { t.Fatalf("inconsistent trie at %x: %v", root, err) } + type reader interface { + MustGet(key []byte) []byte + } + var r reader + if rawTrie { + trie, err := New(TrieID(common.BytesToHash(root)), ndb) + if err != nil { + t.Fatalf("failed to create trie at %x: %v", root, err) + } + r = trie + } else { + trie, err := NewStateTrie(TrieID(common.BytesToHash(root)), ndb) + if err != nil { + t.Fatalf("failed to create trie at %x: %v", root, err) + } + r = trie + } for key, val := range content { - if have := trie.MustGet([]byte(key)); !bytes.Equal(have, val) { + if have := r.MustGet([]byte(key)); !bytes.Equal(have, val) { t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) } } } // checkTrieConsistency checks that all nodes in a trie are indeed present. -func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash) error { +func checkTrieConsistency(db ethdb.Database, scheme string, root common.Hash, rawTrie bool) error { ndb := newTestDatabase(db, scheme) - trie, err := NewStateTrie(TrieID(root), ndb) - if err != nil { - return nil // Consider a non existent state consistent + var it NodeIterator + if rawTrie { + trie, err := New(TrieID(root), ndb) + if err != nil { + return nil // Consider a non existent state consistent + } + it = trie.MustNodeIterator(nil) + } else { + trie, err := NewStateTrie(TrieID(root), ndb) + if err != nil { + return nil // Consider a non existent state consistent + } + it = trie.MustNodeIterator(nil) } - it := trie.MustNodeIterator(nil) for it.Next(true) { } return it.Error() @@ -109,8 +131,8 @@ type trieElement struct { // Tests that an empty trie is not scheduled for syncing. func TestEmptySync(t *testing.T) { - dbA := NewDatabase(rawdb.NewMemoryDatabase()) - dbB := NewDatabase(rawdb.NewMemoryDatabase()) + dbA := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) + dbB := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.HashScheme) dbC := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) dbD := newTestDatabase(rawdb.NewMemoryDatabase(), rawdb.PathScheme) @@ -205,7 +227,7 @@ func testIterativeSync(t *testing.T, count int, bypath bool, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that the trie scheduler can correctly reconstruct the state even if only @@ -271,7 +293,7 @@ func testIterativeDelayedSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that given a root hash, a trie can sync iteratively on a single thread, @@ -341,7 +363,7 @@ func testIterativeRandomSync(t *testing.T, count int, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that the trie scheduler can correctly reconstruct the state even if only @@ -413,7 +435,7 @@ func testIterativeRandomDelayedSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that a trie sync will not request nodes multiple times, even if they @@ -484,7 +506,7 @@ func testDuplicateAvoidanceSync(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) } // Tests that at any point in time during a sync, only complete sub-tries are in @@ -569,7 +591,7 @@ func testIncompleteSync(t *testing.T, scheme string) { nodeHash := addedHashes[i] value := rawdb.ReadTrieNode(diskdb, owner, inner, nodeHash, scheme) rawdb.DeleteTrieNode(diskdb, owner, inner, nodeHash, scheme) - if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root); err == nil { + if err := checkTrieConsistency(diskdb, srcDb.Scheme(), root, false); err == nil { t.Fatalf("trie inconsistency not caught, missing: %x", path) } rawdb.WriteTrieNode(diskdb, owner, inner, nodeHash, value, scheme) @@ -643,7 +665,7 @@ func testSyncOrdering(t *testing.T, scheme string) { } } // Cross check that the two tries are in sync - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) // Check that the trie nodes have been requested path-ordered for i := 0; i < len(reqs)-1; i++ { @@ -664,7 +686,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database // The code requests are ignored here since there is no code // at the testing trie. - paths, nodes, _ := sched.Missing(1) + paths, nodes, _ := sched.Missing(0) var elements []trieElement for i := 0; i < len(paths); i++ { elements = append(elements, trieElement{ @@ -698,7 +720,7 @@ func syncWith(t *testing.T, root common.Hash, db ethdb.Database, srcDb *Database } batch.Write() - paths, nodes, _ = sched.Missing(1) + paths, nodes, _ = sched.Missing(0) elements = elements[:0] for i := 0; i < len(paths); i++ { elements = append(elements, trieElement{ @@ -724,7 +746,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) { // Create a destination trie and sync with the scheduler diskdb := rawdb.NewMemoryDatabase() syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), srcData, false) // Push more modifications into the src trie, to see if dest trie can still // sync with it(overwrite stale states) @@ -748,7 +770,7 @@ func testSyncMovingTarget(t *testing.T, scheme string) { srcTrie, _ = NewStateTrie(TrieID(root), srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), diff, false) // Revert added modifications from the src trie, to see if dest trie can still // sync with it(overwrite reverted states) @@ -772,5 +794,98 @@ func testSyncMovingTarget(t *testing.T, scheme string) { srcTrie, _ = NewStateTrie(TrieID(root), srcDb) syncWith(t, srcTrie.Hash(), diskdb, srcDb) - checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted) + checkTrieContents(t, diskdb, srcDb.Scheme(), srcTrie.Hash().Bytes(), reverted, false) +} + +// Tests if state syncer can correctly catch up the pivot move. +func TestPivotMove(t *testing.T) { + testPivotMove(t, rawdb.HashScheme, true) + testPivotMove(t, rawdb.HashScheme, false) + testPivotMove(t, rawdb.PathScheme, true) + testPivotMove(t, rawdb.PathScheme, false) +} + +func testPivotMove(t *testing.T, scheme string, tiny bool) { + var ( + srcDisk = rawdb.NewMemoryDatabase() + srcTrieDB = newTestDatabase(srcDisk, scheme) + srcTrie, _ = New(TrieID(types.EmptyRootHash), srcTrieDB) + + deleteFn = func(key []byte, tr *Trie, states map[string][]byte) { + tr.Delete(key) + delete(states, string(key)) + } + writeFn = func(key []byte, val []byte, tr *Trie, states map[string][]byte) { + if val == nil { + if tiny { + val = randBytes(4) + } else { + val = randBytes(32) + } + } + tr.Update(key, val) + states[string(key)] = common.CopyBytes(val) + } + copyStates = func(states map[string][]byte) map[string][]byte { + cpy := make(map[string][]byte) + for k, v := range states { + cpy[k] = v + } + return cpy + } + ) + stateA := make(map[string][]byte) + writeFn([]byte{0x01, 0x23}, nil, srcTrie, stateA) + writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateA) + writeFn([]byte{0x12, 0x33}, nil, srcTrie, stateA) + writeFn([]byte{0x12, 0x34}, nil, srcTrie, stateA) + writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateA) + writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateA) + + rootA, nodesA, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootA, false); err != nil { + panic(err) + } + // Create a destination trie and sync with the scheduler + destDisk := rawdb.NewMemoryDatabase() + syncWith(t, rootA, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateA, true) + + // Delete element to collapse trie + stateB := copyStates(stateA) + srcTrie, _ = New(TrieID(rootA), srcTrieDB) + deleteFn([]byte{0x02, 0x34}, srcTrie, stateB) + deleteFn([]byte{0x13, 0x44}, srcTrie, stateB) + writeFn([]byte{0x01, 0x24}, nil, srcTrie, stateB) + + rootB, nodesB, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootB, rootA, 0, trienode.NewWithNodeSet(nodesB), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootB, false); err != nil { + panic(err) + } + syncWith(t, rootB, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateB, true) + + // Add elements to expand trie + stateC := copyStates(stateB) + srcTrie, _ = New(TrieID(rootB), srcTrieDB) + + writeFn([]byte{0x01, 0x24}, stateA[string([]byte{0x01, 0x24})], srcTrie, stateC) + writeFn([]byte{0x02, 0x34}, nil, srcTrie, stateC) + writeFn([]byte{0x13, 0x44}, nil, srcTrie, stateC) + + rootC, nodesC, _ := srcTrie.Commit(false) + if err := srcTrieDB.Update(rootC, rootB, 0, trienode.NewWithNodeSet(nodesC), nil); err != nil { + panic(err) + } + if err := srcTrieDB.Commit(rootC, false); err != nil { + panic(err) + } + syncWith(t, rootC, destDisk, srcTrieDB) + checkTrieContents(t, destDisk, scheme, srcTrie.Hash().Bytes(), stateC, true) } diff --git a/trie/tracer_test.go b/trie/tracer_test.go index 86daec6d2077..acb8c2f6bf4f 100644 --- a/trie/tracer_test.go +++ b/trie/tracer_test.go @@ -61,7 +61,7 @@ func TestTrieTracer(t *testing.T) { // Tests if the trie diffs are tracked correctly. Tracer should capture // all non-leaf dirty nodes, no matter the node is embedded or not. func testTrieTracer(t *testing.T, vals []struct{ k, v string }) { - db := NewDatabase(rawdb.NewMemoryDatabase()) + db := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(db) // Determine all new nodes are tracked @@ -104,7 +104,7 @@ func TestTrieTracerNoop(t *testing.T) { } func testTrieTracerNoop(t *testing.T, vals []struct{ k, v string }) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for _, val := range vals { trie.MustUpdate([]byte(val.k), []byte(val.v)) } @@ -128,7 +128,7 @@ func TestAccessList(t *testing.T) { func testAccessList(t *testing.T, vals []struct{ k, v string }) { var ( - db = NewDatabase(rawdb.NewMemoryDatabase()) + db = NewDatabase(rawdb.NewMemoryDatabase(), nil) trie = NewEmpty(db) orig = trie.Copy() ) @@ -211,7 +211,7 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { // Tests origin values won't be tracked in Iterator or Prover func TestAccessListLeak(t *testing.T) { var ( - db = NewDatabase(rawdb.NewMemoryDatabase()) + db = NewDatabase(rawdb.NewMemoryDatabase(), nil) trie = NewEmpty(db) ) // Create trie from scratch @@ -262,7 +262,7 @@ func TestAccessListLeak(t *testing.T) { // in its parent due to the smaller size of the original tree node. func TestTinyTree(t *testing.T) { var ( - db = NewDatabase(rawdb.NewMemoryDatabase()) + db = NewDatabase(rawdb.NewMemoryDatabase(), nil) trie = NewEmpty(db) ) for _, val := range tiny { diff --git a/trie/trie_reader.go b/trie/trie_reader.go index 1c63ff4544fd..42159645590f 100644 --- a/trie/trie_reader.go +++ b/trie/trie_reader.go @@ -20,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/triestate" ) // Reader wraps the Node method of a backing trie store. @@ -83,3 +84,18 @@ func (r *trieReader) node(path []byte, hash common.Hash) ([]byte, error) { } return blob, nil } + +// trieLoader implements triestate.TrieLoader for constructing tries. +type trieLoader struct { + db *Database +} + +// OpenTrie opens the main account trie. +func (l *trieLoader) OpenTrie(root common.Hash) (triestate.Trie, error) { + return New(TrieID(root), l.db) +} + +// OpenStorageTrie opens the storage trie of an account. +func (l *trieLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) { + return New(StorageTrieID(stateRoot, addrHash, root), l.db) +} diff --git a/trie/trie_test.go b/trie/trie_test.go index 3cb21c1956b3..35ccc772010c 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -45,7 +45,7 @@ func init() { } func TestEmptyTrie(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) res := trie.Hash() exp := types.EmptyRootHash if res != exp { @@ -54,7 +54,7 @@ func TestEmptyTrie(t *testing.T) { } func TestNull(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) key := make([]byte, 32) value := []byte("test") trie.MustUpdate(key, value) @@ -64,8 +64,13 @@ func TestNull(t *testing.T) { } func TestMissingRoot(t *testing.T) { + testMissingRoot(t, rawdb.HashScheme) + testMissingRoot(t, rawdb.PathScheme) +} + +func testMissingRoot(t *testing.T, scheme string) { root := common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33") - trie, err := New(TrieID(root), NewDatabase(rawdb.NewMemoryDatabase())) + trie, err := New(TrieID(root), newTestDatabase(rawdb.NewMemoryDatabase(), scheme)) if trie != nil { t.Error("New returned non-nil trie for invalid root") } @@ -161,7 +166,7 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) { } func TestInsert(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) updateString(trie, "doe", "reindeer") updateString(trie, "dog", "puppy") @@ -173,7 +178,7 @@ func TestInsert(t *testing.T) { t.Errorf("case 1: exp %x got %x", exp, root) } - trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie = NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab") @@ -184,7 +189,7 @@ func TestInsert(t *testing.T) { } func TestGet(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) + db := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(db) updateString(trie, "doe", "reindeer") updateString(trie, "dog", "puppy") @@ -209,7 +214,7 @@ func TestGet(t *testing.T) { } func TestDelete(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := []struct{ k, v string }{ {"do", "verb"}, {"ether", "wookiedoo"}, @@ -236,7 +241,7 @@ func TestDelete(t *testing.T) { } func TestEmptyValues(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) vals := []struct{ k, v string }{ {"do", "verb"}, @@ -260,7 +265,7 @@ func TestEmptyValues(t *testing.T) { } func TestReplication(t *testing.T) { - db := NewDatabase(rawdb.NewMemoryDatabase()) + db := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(db) vals := []struct{ k, v string }{ {"do", "verb"}, @@ -321,7 +326,7 @@ func TestReplication(t *testing.T) { } func TestLargeValue(t *testing.T) { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie.MustUpdate([]byte("key1"), []byte{99, 99, 99, 99}) trie.MustUpdate([]byte("key2"), bytes.Repeat([]byte{1}, 32)) trie.Hash() @@ -604,7 +609,7 @@ func BenchmarkUpdateLE(b *testing.B) { benchUpdate(b, binary.LittleEndian) } const benchElemCount = 20000 func benchGet(b *testing.B) { - triedb := NewDatabase(rawdb.NewMemoryDatabase()) + triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(triedb) k := make([]byte, 32) for i := 0; i < benchElemCount; i++ { @@ -621,7 +626,7 @@ func benchGet(b *testing.B) { } func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) k := make([]byte, 32) b.ReportAllocs() for i := 0; i < b.N; i++ { @@ -651,7 +656,7 @@ func BenchmarkHash(b *testing.B) { // entries, then adding N more. addresses, accounts := makeAccounts(2 * b.N) // Insert the accounts into the trie and hash it - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) i := 0 for ; i < len(addresses)/2; i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) @@ -682,7 +687,7 @@ func BenchmarkCommitAfterHash(b *testing.B) { func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) { // Make the random benchmark deterministic addresses, accounts := makeAccounts(b.N) - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } @@ -696,7 +701,7 @@ func benchmarkCommitAfterHash(b *testing.B, collectLeaf bool) { func TestTinyTrie(t *testing.T) { // Create a realistic account trie to hash _, accounts := makeAccounts(5) - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) trie.MustUpdate(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3]) if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root { t.Errorf("1: got %x, exp %x", root, exp) @@ -709,7 +714,7 @@ func TestTinyTrie(t *testing.T) { if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root { t.Errorf("3: got %x, exp %x", root, exp) } - checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) it := NewIterator(trie.MustNodeIterator(nil)) for it.Next() { checktr.MustUpdate(it.Key, it.Value) @@ -722,7 +727,7 @@ func TestTinyTrie(t *testing.T) { func TestCommitAfterHash(t *testing.T) { // Create a realistic account trie to hash addresses, accounts := makeAccounts(1000) - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } @@ -788,11 +793,17 @@ func (s *spongeDb) Stat(property string) (string, error) { panic("implement func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") } func (s *spongeDb) Close() error { return nil } func (s *spongeDb) Put(key []byte, value []byte) error { - valbrief := value + var ( + keybrief = key + valbrief = value + ) + if len(keybrief) > 8 { + keybrief = keybrief[:8] + } if len(valbrief) > 8 { valbrief = valbrief[:8] } - s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, key[:8], len(value), valbrief)) + s.journal = append(s.journal, fmt.Sprintf("%v: PUT([%x...], [%d bytes] %x...)\n", s.id, keybrief, len(value), valbrief)) s.sponge.Write(key) s.sponge.Write(value) return nil @@ -830,7 +841,7 @@ func TestCommitSequence(t *testing.T) { addresses, accounts := makeAccounts(tc.count) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} - db := NewDatabase(rawdb.NewDatabase(s)) + db := NewDatabase(rawdb.NewDatabase(s), nil) trie := NewEmpty(db) // Fill the trie with elements for i := 0; i < tc.count; i++ { @@ -861,7 +872,7 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { prng := rand.New(rand.NewSource(int64(i))) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256()} - db := NewDatabase(rawdb.NewDatabase(s)) + db := NewDatabase(rawdb.NewDatabase(s), nil) trie := NewEmpty(db) // Fill the trie with elements for i := 0; i < tc.count; i++ { @@ -893,7 +904,7 @@ func TestCommitSequenceStackTrie(t *testing.T) { prng := rand.New(rand.NewSource(int64(count))) // This spongeDb is used to check the sequence of disk-db-writes s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"} - db := NewDatabase(rawdb.NewDatabase(s)) + db := NewDatabase(rawdb.NewDatabase(s), nil) trie := NewEmpty(db) // Another sponge is used for the stacktrie commits stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} @@ -952,7 +963,7 @@ func TestCommitSequenceStackTrie(t *testing.T) { // not fit into 32 bytes, rlp-encoded. However, it's still the correct thing to do. func TestCommitSequenceSmallRoot(t *testing.T) { s := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "a"} - db := NewDatabase(rawdb.NewDatabase(s)) + db := NewDatabase(rawdb.NewDatabase(s), nil) trie := NewEmpty(db) // Another sponge is used for the stacktrie commits stackTrieSponge := &spongeDb{sponge: sha3.NewLegacyKeccak256(), id: "b"} @@ -1029,7 +1040,7 @@ func BenchmarkHashFixedSize(b *testing.B) { func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { b.ReportAllocs() - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } @@ -1080,7 +1091,7 @@ func BenchmarkCommitAfterHashFixedSize(b *testing.B) { func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { b.ReportAllocs() - trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) + trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase(), nil)) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } @@ -1132,7 +1143,7 @@ func BenchmarkDerefRootFixedSize(b *testing.B) { func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) { b.ReportAllocs() - triedb := NewDatabase(rawdb.NewMemoryDatabase()) + triedb := NewDatabase(rawdb.NewMemoryDatabase(), nil) trie := NewEmpty(triedb) for i := 0; i < len(addresses); i++ { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go index d4a344452de0..caf79ef93659 100644 --- a/trie/triedb/hashdb/database.go +++ b/trie/triedb/hashdb/database.go @@ -65,6 +65,20 @@ type ChildResolver interface { ForEach(node []byte, onChild func(common.Hash)) } +// Config contains the settings for database. +type Config struct { + CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes +} + +// Defaults is the default setting for database if it's not specified. +// Notably, clean cache is disabled explicitly, +var Defaults = &Config{ + // Explicitly set clean cache size to 0 to avoid creating fastcache, + // otherwise database must be closed when it's no longer needed to + // prevent memory leak. + CleanCacheSize: 0, +} + // Database is an intermediate write layer between the trie data structures and // the disk database. The aim is to accumulate trie writes in-memory and only // periodically flush a couple tries to disk, garbage collecting the remainder. @@ -122,12 +136,13 @@ func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash commo } // New initializes the hash-based node database. -func New(diskdb ethdb.Database, size int, resolver ChildResolver) *Database { - // Initialize the clean cache if the specified cache allowance - // is non-zero. Note, the size is in bytes. +func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Database { + if config == nil { + config = Defaults + } var cleans *fastcache.Cache - if size > 0 { - cleans = fastcache.New(size) + if config.CleanCacheSize > 0 { + cleans = fastcache.New(config.CleanCacheSize) } return &Database{ diskdb: diskdb, @@ -626,7 +641,7 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n // Size returns the current storage size of the memory cache in front of the // persistent database layer. -func (db *Database) Size() common.StorageSize { +func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) { db.lock.RLock() defer db.lock.RUnlock() @@ -634,11 +649,17 @@ func (db *Database) Size() common.StorageSize { // the total memory consumption, the maintenance metadata is also needed to be // counted. var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize) - return db.dirtiesSize + db.childrenSize + metadataSize + return 0, db.dirtiesSize + db.childrenSize + metadataSize, 0 } // Close closes the trie database and releases all held resources. -func (db *Database) Close() error { return nil } +func (db *Database) Close() error { + if db.cleans != nil { + db.cleans.Reset() + db.cleans = nil + } + return nil +} // Scheme returns the node scheme used in the database. func (db *Database) Scheme() string { diff --git a/trie/triedb/pathdb/asyncnodebuffer.go b/trie/triedb/pathdb/asyncnodebuffer.go new file mode 100644 index 000000000000..c8c3921177c7 --- /dev/null +++ b/trie/triedb/pathdb/asyncnodebuffer.go @@ -0,0 +1,448 @@ +package pathdb + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/VictoriaMetrics/fastcache" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie/trienode" +) + +var _ trienodebuffer = &asyncnodebuffer{} + +// asyncnodebuffer implement trienodebuffer interface, and aysnc the nodecache +// to disk. +type asyncnodebuffer struct { + mux sync.RWMutex + current *nodecache + background *nodecache +} + +// newAsyncNodeBuffer initializes the async node buffer with the provided nodes. +func newAsyncNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *asyncnodebuffer { + if nodes == nil { + nodes = make(map[common.Hash]map[string]*trienode.Node) + } + var size uint64 + for _, subset := range nodes { + for path, n := range subset { + size += uint64(len(n.Blob) + len(path)) + } + } + + return &asyncnodebuffer{ + current: newNodeCache(uint64(limit), size, nodes, layers), + background: newNodeCache(uint64(limit), 0, make(map[common.Hash]map[string]*trienode.Node), 0), + } +} + +// node retrieves the trie node with given node info. +func (a *asyncnodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) { + a.mux.RLock() + defer a.mux.RUnlock() + + node, err := a.current.node(owner, path, hash) + if err != nil { + return nil, err + } + if node == nil { + return a.background.node(owner, path, hash) + } + return node, nil +} + +// commit merges the dirty nodes into the nodebuffer. This operation won't take +// the ownership of the nodes map which belongs to the bottom-most diff layer. +// It will just hold the node references from the given map which are safe to +// copy. +func (a *asyncnodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer { + a.mux.Lock() + defer a.mux.Unlock() + + err := a.current.commit(nodes) + if err != nil { + log.Crit("[BUG] failed to commit nodes to asyncnodebuffer", "error", err) + } + return a +} + +// revert is the reverse operation of commit. It also merges the provided nodes +// into the nodebuffer, the difference is that the provided node set should +// revert the changes made by the last state transition. +func (a *asyncnodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { + a.mux.Lock() + defer a.mux.Unlock() + + var err error + a.current, err = a.current.merge(a.background) + if err != nil { + log.Crit("[BUG] failed to merge node cache under revert async node buffer", "error", err) + } + a.background.reset() + return a.current.revert(db, nodes) +} + +// setSize is unsupported in asyncnodebuffer, due to the double buffer, blocking will occur. +func (a *asyncnodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error { + return errors.New("not supported") +} + +// reset cleans up the disk cache. +func (a *asyncnodebuffer) reset() { + a.mux.Lock() + defer a.mux.Unlock() + + a.current.reset() + a.background.reset() +} + +// empty returns an indicator if nodebuffer contains any state transition inside. +func (a *asyncnodebuffer) empty() bool { + a.mux.RLock() + defer a.mux.RUnlock() + + return a.current.empty() && a.background.empty() +} + +// setSize sets the buffer size to the provided number, and invokes a flush +// operation if the current memory usage exceeds the new limit. +//func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error { +// b.limit = uint64(size) +// return b.flush(db, clean, id, false) +//} + +// flush persists the in-memory dirty trie node into the disk if the configured +// memory threshold is reached. Note, all data must be written atomically. +func (a *asyncnodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error { + a.mux.Lock() + defer a.mux.Unlock() + + if force { + for { + if atomic.LoadUint64(&a.background.immutable) == 1 { + time.Sleep(time.Duration(DefaultBackgroundFlushInterval) * time.Second) + log.Info("waiting background memory table flush to disk for force flush node buffer") + continue + } + atomic.StoreUint64(&a.current.immutable, 1) + return a.current.flush(db, clean, id) + } + } + + if a.current.size < a.current.limit { + return nil + } + + // background flush doing + if atomic.LoadUint64(&a.background.immutable) == 1 { + return nil + } + + atomic.StoreUint64(&a.current.immutable, 1) + a.current, a.background = a.background, a.current + + go func(persistId uint64) { + for { + err := a.background.flush(db, clean, persistId) + if err == nil { + log.Debug("succeed to flush background nodecahce to disk", "state_id", persistId) + return + } + log.Error("failed to flush background nodecahce to disk", "state_id", persistId, "error", err) + } + }(id) + return nil +} + +func (a *asyncnodebuffer) getAllNodes() map[common.Hash]map[string]*trienode.Node { + a.mux.Lock() + defer a.mux.Unlock() + + cached, err := a.current.merge(a.background) + if err != nil { + log.Crit("[BUG] failed to merge nodecache under revert asyncnodebuffer", "error", err) + } + return cached.nodes +} + +func (a *asyncnodebuffer) getLayers() uint64 { + a.mux.RLock() + defer a.mux.RUnlock() + + return a.current.layers + a.background.layers +} + +func (a *asyncnodebuffer) getSize() (uint64, uint64) { + a.mux.RLock() + defer a.mux.RUnlock() + + return a.current.size, a.background.size +} + +type nodecache struct { + layers uint64 // The number of diff layers aggregated inside + size uint64 // The size of aggregated writes + limit uint64 // The maximum memory allowance in bytes + nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path + immutable uint64 // The flag equal 1, flush nodes to disk background +} + +func newNodeCache(limit, size uint64, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodecache { + return &nodecache{ + layers: layers, + size: size, + limit: limit, + nodes: nodes, + immutable: 0, + } +} + +func (nc *nodecache) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) { + subset, ok := nc.nodes[owner] + if !ok { + return nil, nil + } + n, ok := subset[string(path)] + if !ok { + return nil, nil + } + if n.Hash != hash { + dirtyFalseMeter.Mark(1) + log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) + return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob) + } + return n, nil +} + +func (nc *nodecache) commit(nodes map[common.Hash]map[string]*trienode.Node) error { + if atomic.LoadUint64(&nc.immutable) == 1 { + return errWriteImmutable + } + var ( + delta int64 + overwrite int64 + overwriteSize int64 + ) + for owner, subset := range nodes { + current, exist := nc.nodes[owner] + if !exist { + // Allocate a new map for the subset instead of claiming it directly + // from the passed map to avoid potential concurrent map read/write. + // The nodes belong to original diff layer are still accessible even + // after merging, thus the ownership of nodes map should still belong + // to original layer and any mutation on it should be prevented. + current = make(map[string]*trienode.Node) + for path, n := range subset { + current[path] = n + delta += int64(len(n.Blob) + len(path)) + } + nc.nodes[owner] = current + continue + } + for path, n := range subset { + if orig, exist := current[path]; !exist { + delta += int64(len(n.Blob) + len(path)) + } else { + delta += int64(len(n.Blob) - len(orig.Blob)) + overwrite++ + overwriteSize += int64(len(orig.Blob) + len(path)) + } + current[path] = n + } + nc.nodes[owner] = current + } + nc.updateSize(delta) + nc.layers++ + gcNodesMeter.Mark(overwrite) + gcBytesMeter.Mark(overwriteSize) + return nil +} + +func (nc *nodecache) updateSize(delta int64) { + size := int64(nc.size) + delta + if size >= 0 { + nc.size = uint64(size) + return + } + s := nc.size + nc.size = 0 + log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta)) +} + +func (nc *nodecache) reset() { + atomic.StoreUint64(&nc.immutable, 0) + nc.layers = 0 + nc.size = 0 + nc.nodes = make(map[common.Hash]map[string]*trienode.Node) +} + +func (nc *nodecache) empty() bool { + return nc.layers == 0 +} + +func (nc *nodecache) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error { + if atomic.LoadUint64(&nc.immutable) != 1 { + return errFlushMutable + } + + // Ensure the target state id is aligned with the internal counter. + head := rawdb.ReadPersistentStateID(db) + if head+nc.layers != id { + return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", nc.layers, head, id) + } + var ( + start = time.Now() + batch = db.NewBatchWithSize(int(float64(nc.size) * DefaultBatchRedundancyRate)) + ) + nodes := writeNodes(batch, nc.nodes, clean) + rawdb.WritePersistentStateID(batch, id) + + // Flush all mutations in a single batch + size := batch.ValueSize() + if err := batch.Write(); err != nil { + return err + } + commitBytesMeter.Mark(int64(size)) + commitNodesMeter.Mark(int64(nodes)) + commitTimeTimer.UpdateSince(start) + log.Debug("Persisted pathdb nodes", "nodes", len(nc.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start))) + nc.reset() + return nil +} + +func (nc *nodecache) merge(nc1 *nodecache) (*nodecache, error) { + if nc == nil && nc1 == nil { + return nil, nil + } + if nc == nil || nc.empty() { + res := copyNodeCache(nc1) + atomic.StoreUint64(&res.immutable, 0) + return nc1, nil + } + if nc1 == nil || nc1.empty() { + res := copyNodeCache(nc) + atomic.StoreUint64(&res.immutable, 0) + return nc, nil + } + if atomic.LoadUint64(&nc.immutable) == atomic.LoadUint64(&nc1.immutable) { + return nil, errIncompatibleMerge + } + + var ( + immutable *nodecache + mutable *nodecache + res = &nodecache{} + ) + if atomic.LoadUint64(&nc.immutable) == 1 { + immutable = nc + mutable = nc1 + } else { + immutable = nc1 + mutable = nc + } + res.size = immutable.size + mutable.size + res.layers = immutable.layers + mutable.layers + res.limit = immutable.size + res.nodes = make(map[common.Hash]map[string]*trienode.Node) + for acc, subTree := range immutable.nodes { + if _, ok := res.nodes[acc]; !ok { + res.nodes[acc] = make(map[string]*trienode.Node) + } + for path, node := range subTree { + res.nodes[acc][path] = node + } + } + + for acc, subTree := range mutable.nodes { + if _, ok := res.nodes[acc]; !ok { + res.nodes[acc] = make(map[string]*trienode.Node) + } + for path, node := range subTree { + res.nodes[acc][path] = node + } + } + return res, nil +} + +func (nc *nodecache) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { + if atomic.LoadUint64(&nc.immutable) == 1 { + return errRevertImmutable + } + + // Short circuit if no embedded state transition to revert. + if nc.layers == 0 { + return errStateUnrecoverable + } + nc.layers-- + + // Reset the entire buffer if only a single transition left. + if nc.layers == 0 { + nc.reset() + return nil + } + var delta int64 + for owner, subset := range nodes { + current, ok := nc.nodes[owner] + if !ok { + panic(fmt.Sprintf("non-existent subset (%x)", owner)) + } + for path, n := range subset { + orig, ok := current[path] + if !ok { + // There is a special case in MPT that one child is removed from + // a fullNode which only has two children, and then a new child + // with different position is immediately inserted into the fullNode. + // In this case, the clean child of the fullNode will also be + // marked as dirty because of node collapse and expansion. + // + // In case of database rollback, don't panic if this "clean" + // node occurs which is not present in buffer. + var nhash common.Hash + if owner == (common.Hash{}) { + _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path)) + } else { + _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) + } + // Ignore the clean node in the case described above. + if nhash == n.Hash { + continue + } + panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) + } + current[path] = n + delta += int64(len(n.Blob)) - int64(len(orig.Blob)) + } + } + nc.updateSize(delta) + return nil +} + +func copyNodeCache(n *nodecache) *nodecache { + if n == nil { + return nil + } + nc := &nodecache{ + layers: n.layers, + size: n.size, + limit: n.limit, + immutable: atomic.LoadUint64(&n.immutable), + nodes: make(map[common.Hash]map[string]*trienode.Node), + } + for acc, subTree := range n.nodes { + if _, ok := nc.nodes[acc]; !ok { + nc.nodes[acc] = make(map[string]*trienode.Node) + } + for path, node := range subTree { + nc.nodes[acc][path] = node + } + } + return nc +} diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go index b1a14589a2f7..167db6643d56 100644 --- a/trie/triedb/pathdb/database.go +++ b/trie/triedb/pathdb/database.go @@ -33,8 +33,34 @@ import ( "github.com/ethereum/go-ethereum/trie/triestate" ) -// maxDiffLayers is the maximum diff layers allowed in the layer tree. -const maxDiffLayers = 128 +const ( + // maxDiffLayers is the maximum diff layers allowed in the layer tree. + maxDiffLayers = 128 + + // defaultCleanSize is the default memory allowance of clean cache. + defaultCleanSize = 16 * 1024 * 1024 + + // MaxDirtyBufferSize is the maximum memory allowance of node buffer. + // Too large nodebuffer will cause the system to pause for a long + // time when write happens. Also, the largest batch that pebble can + // support is 4GB, node will panic if batch size exceeds this limit. + MaxDirtyBufferSize = 256 * 1024 * 1024 + + // DefaultDirtyBufferSize is the default memory allowance of node buffer + // that aggregates the writes from above until it's flushed into the + // disk. It's meant to be used once the initial sync is finished. + // Do not increase the buffer size arbitrarily, otherwise the system + // pause time will increase when the database writes happen. + DefaultDirtyBufferSize = 64 * 1024 * 1024 + + // DefaultBackgroundFlushInterval defines the default the wait interval + // that background node cache flush disk. + DefaultBackgroundFlushInterval = 3 + + // DefaultBatchRedundancyRate defines the batch size, compatible write + // size calculation is inaccurate + DefaultBatchRedundancyRate = 1.1 +) // layer is the interface implemented by all state layers which includes some // public methods and some additional methods for internal usage. @@ -68,30 +94,34 @@ type layer interface { // Config contains the settings for database. type Config struct { - StateLimit uint64 // Number of recent blocks to maintain state history for - CleanSize int // Maximum memory allowance (in bytes) for caching clean nodes - DirtySize int // Maximum memory allowance (in bytes) for caching dirty nodes - ReadOnly bool // Flag whether the database is opened in read only mode. + SyncFlush bool // Flag of trienodebuffer sync flush cache to disk + StateHistory uint64 // Number of recent blocks to maintain state history for + CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes + DirtyCacheSize int // Maximum memory allowance (in bytes) for caching dirty nodes + ReadOnly bool // Flag whether the database is opened in read only mode. } -var ( - // defaultCleanSize is the default memory allowance of clean cache. - defaultCleanSize = 16 * 1024 * 1024 - - // defaultBufferSize is the default memory allowance of node buffer - // that aggregates the writes from above until it's flushed into the - // disk. Do not increase the buffer size arbitrarily, otherwise the - // system pause time will increase when the database writes happen. - defaultBufferSize = 128 * 1024 * 1024 -) +// sanitize checks the provided user configurations and changes anything that's +// unreasonable or unworkable. +func (c *Config) sanitize() *Config { + conf := *c + if conf.DirtyCacheSize > MaxDirtyBufferSize { + log.Warn("Sanitizing invalid node buffer size", "provided", common.StorageSize(conf.DirtyCacheSize), "updated", common.StorageSize(MaxDirtyBufferSize)) + conf.DirtyCacheSize = MaxDirtyBufferSize + } + return &conf +} // Defaults contains default settings for Ethereum mainnet. var Defaults = &Config{ - StateLimit: params.FullImmutabilityThreshold, - CleanSize: defaultCleanSize, - DirtySize: defaultBufferSize, + StateHistory: params.FullImmutabilityThreshold, + CleanCacheSize: defaultCleanSize, + DirtyCacheSize: DefaultDirtyBufferSize, } +// ReadOnly is the config in order to open database in read only mode. +var ReadOnly = &Config{ReadOnly: true} + // Database is a multiple-layered structure for maintaining in-memory trie nodes. // It consists of one persistent base layer backed by a key-value store, on top // of which arbitrarily many in-memory diff layers are stacked. The memory diffs @@ -123,9 +153,11 @@ func New(diskdb ethdb.Database, config *Config) *Database { if config == nil { config = Defaults } + config = config.sanitize() + db := &Database{ readOnly: config.ReadOnly, - bufferSize: config.DirtySize, + bufferSize: config.DirtyCacheSize, config: config, diskdb: diskdb, } @@ -141,7 +173,7 @@ func New(diskdb ethdb.Database, config *Config) *Database { // is opened at the same time to prevent accidental mutation. if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly { offset := uint64(0) //TODO(Nathan): just for passing compilation - freezer, err := rawdb.NewStateHistoryFreezer(ancient, false, offset) + freezer, err := rawdb.NewStateFreezer(ancient, false, offset) if err != nil { log.Crit("Failed to open state history freezer", "err", err) } @@ -157,7 +189,7 @@ func New(diskdb ethdb.Database, config *Config) *Database { log.Warn("Truncated extra state histories", "number", pruned) } } - log.Warn("Path-based state scheme is an experimental feature") + log.Warn("Path-based state scheme is an experimental feature", "sync", db.config.SyncFlush) return db } @@ -260,7 +292,7 @@ func (db *Database) Reset(root common.Hash) error { } // Re-construct a new disk layer backed by persistent state // with **empty clean cache and node buffer**. - dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)) + dl := newDiskLayer(root, 0, db, nil, NewTrieNodeBuffer(db.config.SyncFlush, db.bufferSize, nil, 0)) db.tree.reset(dl) log.Info("Rebuilt trie database", "root", root) return nil @@ -345,7 +377,14 @@ func (db *Database) Close() error { db.lock.Lock() defer db.lock.Unlock() + // Set the database to read-only mode to prevent all + // following mutations. db.readOnly = true + + // Release the memory held by clean cache. + db.tree.bottom().resetCache() + + // Close the attached state history freezer. if db.freezer == nil { return nil } @@ -354,16 +393,16 @@ func (db *Database) Close() error { // Size returns the current storage size of the memory cache in front of the // persistent database layer. -func (db *Database) Size() (size common.StorageSize) { +func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize, immutableNodes common.StorageSize) { db.tree.forEach(func(layer layer) { if diff, ok := layer.(*diffLayer); ok { - size += common.StorageSize(diff.memory) + diffs += common.StorageSize(diff.memory) } if disk, ok := layer.(*diskLayer); ok { - size += disk.size() + nodes, immutableNodes = disk.size() } }) - return size + return diffs, nodes, immutableNodes } // Initialized returns an indicator if the state data is already @@ -383,6 +422,10 @@ func (db *Database) SetBufferSize(size int) error { db.lock.Lock() defer db.lock.Unlock() + if size > MaxDirtyBufferSize { + log.Info("Capped node buffer size", "provided", common.StorageSize(size), "adjusted", common.StorageSize(MaxDirtyBufferSize)) + size = MaxDirtyBufferSize + } db.bufferSize = size return db.tree.bottom().setBufferSize(db.bufferSize) } @@ -391,3 +434,10 @@ func (db *Database) SetBufferSize(size int) error { func (db *Database) Scheme() string { return rawdb.PathScheme } + +// Head return the top non-fork difflayer/disklayer root hash for rewinding. +func (db *Database) Head() common.Hash { + db.lock.Lock() + defer db.lock.Unlock() + return db.tree.front() +} diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go index 83a299b02097..42bb014e737e 100644 --- a/trie/triedb/pathdb/database_test.go +++ b/trie/triedb/pathdb/database_test.go @@ -46,7 +46,8 @@ func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[comm h.Update(key.Bytes(), val) } } - return h.Commit(false) + root, nodes, _ := h.Commit(false) + return root, nodes } func generateAccount(storageRoot common.Hash) types.StateAccount { @@ -95,11 +96,15 @@ type tester struct { snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte } -func newTester(t *testing.T) *tester { +func newTester(t *testing.T, historyLimit uint64) *tester { var ( disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false) - db = New(disk, &Config{CleanSize: 256 * 1024, DirtySize: 256 * 1024}) - obj = &tester{ + db = New(disk, &Config{ + StateHistory: historyLimit, + CleanCacheSize: 256 * 1024, + DirtyCacheSize: 256 * 1024, + }) + obj = &tester{ db: db, preimages: make(map[common.Hash]common.Address), accounts: make(map[common.Hash][]byte), @@ -375,7 +380,7 @@ func (t *tester) bottomIndex() int { func TestDatabaseRollback(t *testing.T) { // Verify state histories - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() if err := tester.verifyHistory(); err != nil { @@ -401,7 +406,7 @@ func TestDatabaseRollback(t *testing.T) { func TestDatabaseRecoverable(t *testing.T) { var ( - tester = newTester(t) + tester = newTester(t, 0) index = tester.bottomIndex() ) defer tester.release() @@ -440,7 +445,7 @@ func TestDatabaseRecoverable(t *testing.T) { func TestReset(t *testing.T) { var ( - tester = newTester(t) + tester = newTester(t, 0) index = tester.bottomIndex() ) defer tester.release() @@ -474,7 +479,7 @@ func TestReset(t *testing.T) { } func TestCommit(t *testing.T) { - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() if err := tester.db.Commit(tester.lastHash(), false); err != nil { @@ -498,7 +503,7 @@ func TestCommit(t *testing.T) { } func TestJournal(t *testing.T) { - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() if err := tester.db.Journal(tester.lastHash()); err != nil { @@ -522,7 +527,7 @@ func TestJournal(t *testing.T) { } func TestCorruptedJournal(t *testing.T) { - tester := newTester(t) + tester := newTester(t, 0) defer tester.release() if err := tester.db.Journal(tester.lastHash()); err != nil { @@ -551,6 +556,35 @@ func TestCorruptedJournal(t *testing.T) { } } +// TestTailTruncateHistory function is designed to test a specific edge case where, +// when history objects are removed from the end, it should trigger a state flush +// if the ID of the new tail object is even higher than the persisted state ID. +// +// For example, let's say the ID of the persistent state is 10, and the current +// history objects range from ID(5) to ID(15). As we accumulate six more objects, +// the history will expand to cover ID(11) to ID(21). ID(11) then becomes the +// oldest history object, and its ID is even higher than the stored state. +// +// In this scenario, it is mandatory to update the persistent state before +// truncating the tail histories. This ensures that the ID of the persistent state +// always falls within the range of [oldest-history-id, latest-history-id]. +func TestTailTruncateHistory(t *testing.T) { + tester := newTester(t, 10) + defer tester.release() + + tester.db.Close() + tester.db = New(tester.db.diskdb, &Config{StateHistory: 10}) + + head, err := tester.db.freezer.Ancients() + if err != nil { + t.Fatalf("Failed to obtain freezer head") + } + stored := rawdb.ReadPersistentStateID(tester.db.diskdb) + if head != stored { + t.Fatalf("Failed to truncate excess history object above, stored: %d, head: %d", stored, head) + } +} + // copyAccounts returns a deep-copied account set of the provided one. func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte { copied := make(map[common.Hash][]byte, len(set)) diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go index d25ac1c601d7..ccd8d36764cc 100644 --- a/trie/triedb/pathdb/difflayer.go +++ b/trie/triedb/pathdb/difflayer.go @@ -71,7 +71,7 @@ func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes dirtyWriteMeter.Mark(size) diffLayerNodesMeter.Mark(int64(count)) diffLayerBytesMeter.Mark(int64(dl.memory)) - log.Debug("Created new diff layer", "id", id, "block", block, "nodes", count, "size", common.StorageSize(dl.memory)) + log.Debug("Created new diff layer", "id", id, "block", block, "nodes", count, "size", common.StorageSize(dl.memory), "root", dl.root) return dl } @@ -114,7 +114,7 @@ func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, dept if n.Hash != hash { dirtyFalseMeter.Mark(1) log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) - return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path) + return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path, n.Blob) } dirtyHitMeter.Mark(1) dirtyNodeHitDepthHist.Update(int64(depth)) diff --git a/trie/triedb/pathdb/difflayer_test.go b/trie/triedb/pathdb/difflayer_test.go index 77c4cd5722da..8598a05dce9c 100644 --- a/trie/triedb/pathdb/difflayer_test.go +++ b/trie/triedb/pathdb/difflayer_test.go @@ -29,7 +29,7 @@ import ( func emptyLayer() *diskLayer { return &diskLayer{ db: New(rawdb.NewMemoryDatabase(), nil), - buffer: newNodeBuffer(defaultBufferSize, nil, 0), + buffer: newNodeBuffer(DefaultDirtyBufferSize, nil, 0), } } diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go index b526b3b7dd9d..59be04c7448b 100644 --- a/trie/triedb/pathdb/disklayer.go +++ b/trie/triedb/pathdb/disklayer.go @@ -25,30 +25,82 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" "golang.org/x/crypto/sha3" ) +// trienodebuffer is a collection of modified trie nodes to aggregate the disk +// write. The content of the trienodebuffer must be checked before diving into +// disk (since it basically is not-yet-written data). +type trienodebuffer interface { + // node retrieves the trie node with given node info. + node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) + + // commit merges the dirty nodes into the trienodebuffer. This operation won't take + // the ownership of the nodes map which belongs to the bottom-most diff layer. + // It will just hold the node references from the given map which are safe to + // copy. + commit(nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer + + // revert is the reverse operation of commit. It also merges the provided nodes + // into the trienodebuffer, the difference is that the provided node set should + // revert the changes made by the last state transition. + revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error + + // flush persists the in-memory dirty trie node into the disk if the configured + // memory threshold is reached. Note, all data must be written atomically. + flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error + + // setSize sets the buffer size to the provided number, and invokes a flush + // operation if the current memory usage exceeds the new limit. + setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error + + // reset cleans up the disk cache. + reset() + + // empty returns an indicator if trienodebuffer contains any state transition inside. + empty() bool + + // getSize return the trienodebuffer used size. + getSize() (uint64, uint64) + + // getAllNodes return all the trie nodes are cached in trienodebuffer. + getAllNodes() map[common.Hash]map[string]*trienode.Node + + // getLayers return the size of cached difflayers. + getLayers() uint64 +} + +func NewTrieNodeBuffer(sync bool, limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) trienodebuffer { + if sync { + log.Info("new sync node buffer", "limit", common.StorageSize(limit), "layers", layers) + return newNodeBuffer(limit, nodes, layers) + } + log.Info("new async node buffer", "limit", common.StorageSize(limit), "layers", layers) + return newAsyncNodeBuffer(limit, nodes, layers) +} + // diskLayer is a low level persistent layer built on top of a key-value store. type diskLayer struct { root common.Hash // Immutable, root hash to which this layer was made for id uint64 // Immutable, corresponding state id db *Database // Path-based trie database cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs - buffer *nodebuffer // Node buffer to aggregate writes + buffer trienodebuffer // Node buffer to aggregate writes stale bool // Signals that the layer became stale (state progressed) lock sync.RWMutex // Lock used to protect stale flag } // newDiskLayer creates a new disk layer based on the passing arguments. -func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer { +func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer trienodebuffer) *diskLayer { // Initialize a clean cache if the memory allowance is not zero // or reuse the provided cache if it is not nil (inherited from // the original disk layer). - if cleans == nil && db.config.CleanSize != 0 { - cleans = fastcache.New(db.config.CleanSize) + if cleans == nil && db.config.CleanCacheSize != 0 { + cleans = fastcache.New(db.config.CleanCacheSize) } return &diskLayer{ root: root, @@ -150,7 +202,7 @@ func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]b if nHash != hash { diskFalseMeter.Mark(1) log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash) - return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path) + return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path, nBlob) } if dl.cleans != nil && len(nBlob) > 0 { dl.cleans.Set(key, nBlob) @@ -172,37 +224,65 @@ func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { dl.lock.Lock() defer dl.lock.Unlock() - // Construct and store the state history first. If crash happens - // after storing the state history but without flushing the - // corresponding states(journal), the stored state history will - // be truncated in the next restart. + // Construct and store the state history first. If crash happens after storing + // the state history but without flushing the corresponding states(journal), + // the stored state history will be truncated from head in the next restart. + var ( + overflow bool + oldest uint64 + ) if dl.db.freezer != nil { - err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateLimit) + err := writeHistory(dl.db.freezer, bottom) + if err != nil { + return nil, err + } + // Determine if the persisted history object has exceeded the configured + // limitation, set the overflow as true if so. + tail, err := dl.db.freezer.Tail() if err != nil { return nil, err } + limit := dl.db.config.StateHistory + if limit != 0 && bottom.stateID()-tail > limit { + overflow = true + oldest = bottom.stateID() - limit + 1 // track the id of history **after truncation** + } } // Mark the diskLayer as stale before applying any mutations on top. dl.stale = true - // Store the root->id lookup afterwards. All stored lookups are - // identified by the **unique** state root. It's impossible that - // in the same chain blocks are not adjacent but have the same - // root. + // Store the root->id lookup afterwards. All stored lookups are identified + // by the **unique** state root. It's impossible that in the same chain + // blocks are not adjacent but have the same root. if dl.id == 0 { rawdb.WriteStateID(dl.db.diskdb, dl.root, 0) } rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID()) - // Construct a new disk layer by merging the nodes from the provided - // diff layer, and flush the content in disk layer if there are too - // many nodes cached. The clean cache is inherited from the original - // disk layer for reusing. + // Construct a new disk layer by merging the nodes from the provided diff + // layer, and flush the content in disk layer if there are too many nodes + // cached. The clean cache is inherited from the original disk layer. ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes)) - err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force) - if err != nil { + + // In a unique scenario where the ID of the oldest history object (after tail + // truncation) surpasses the persisted state ID, we take the necessary action + // of forcibly committing the cached dirty nodes to ensure that the persisted + // state ID remains higher. + if !force && rawdb.ReadPersistentStateID(dl.db.diskdb) < oldest { + force = true + } + if err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force); err != nil { return nil, err } + // To remove outdated history objects from the end, we set the 'tail' parameter + // to 'oldest-1' due to the offset between the freezer index and the history ID. + if overflow { + pruned, err := truncateFromTail(ndl.db.diskdb, ndl.db.freezer, oldest-1) + if err != nil { + return nil, err + } + log.Debug("Pruned state history", "items", pruned, "tailid", oldest) + } return ndl, nil } @@ -254,7 +334,7 @@ func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil } -// setBufferSize sets the node buffer size to the provided value. +// setBufferSize sets the trie node buffer size to the provided value. func (dl *diskLayer) setBufferSize(size int) error { dl.lock.RLock() defer dl.lock.RUnlock() @@ -266,14 +346,29 @@ func (dl *diskLayer) setBufferSize(size int) error { } // size returns the approximate size of cached nodes in the disk layer. -func (dl *diskLayer) size() common.StorageSize { +func (dl *diskLayer) size() (common.StorageSize, common.StorageSize) { dl.lock.RLock() defer dl.lock.RUnlock() if dl.stale { - return 0 + return 0, 0 + } + dirtyNodes, dirtyimmutableNodes := dl.buffer.getSize() + return common.StorageSize(dirtyNodes), common.StorageSize(dirtyimmutableNodes) +} + +// resetCache releases the memory held by clean cache to prevent memory leak. +func (dl *diskLayer) resetCache() { + dl.lock.RLock() + defer dl.lock.RUnlock() + + // Stale disk layer loses the ownership of clean cache. + if dl.stale { + return + } + if dl.cleans != nil { + dl.cleans.Reset() } - return common.StorageSize(dl.buffer.size) } // hasher is used to compute the sha256 hash of the provided data. diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go index f503a9c49d2e..03114f9d6576 100644 --- a/trie/triedb/pathdb/errors.go +++ b/trie/triedb/pathdb/errors.go @@ -21,6 +21,7 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) var ( @@ -44,8 +45,26 @@ var ( // errUnexpectedNode is returned if the requested node with specified path is // not hash matched with expectation. errUnexpectedNode = errors.New("unexpected node") + + // errWriteImmutable is returned if write to background immutable nodecache + // under asyncnodebuffer + errWriteImmutable = errors.New("write immutable nodecache") + + // errFlushMutable is returned if flush the background mutable nodecache + // to disk, under asyncnodebuffer + errFlushMutable = errors.New("flush mutable nodecache") + + // errIncompatibleMerge is returned when merge node cache occurs error. + errIncompatibleMerge = errors.New("incompatible nodecache merge") + + // errRevertImmutable is returned if revert the background immutable nodecache + errRevertImmutable = errors.New("revert immutable nodecache") ) -func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error { - return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash) +func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte, blob []byte) error { + blobHex := "nil" + if len(blob) > 0 { + blobHex = hexutil.Encode(blob) + } + return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x, blob: %s", errUnexpectedNode, loc, owner, path, expHash, gotHash, blobHex) } diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go index ce82532507c7..10eb557e635e 100644 --- a/trie/triedb/pathdb/history.go +++ b/trie/triedb/pathdb/history.go @@ -512,38 +512,28 @@ func readHistory(freezer *rawdb.ResettableFreezer, id uint64) (*history, error) return &dec, nil } -// writeHistory writes the state history with provided state set. After -// storing the corresponding state history, it will also prune the stale -// histories from the disk with the given threshold. -func writeHistory(db ethdb.KeyValueStore, freezer *rawdb.ResettableFreezer, dl *diffLayer, limit uint64) error { +// writeHistory persists the state history with the provided state set. +func writeHistory(freezer *rawdb.ResettableFreezer, dl *diffLayer) error { // Short circuit if state set is not available. if dl.states == nil { return errors.New("state change set is not available") } var ( - err error - n int - start = time.Now() - h = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states) + start = time.Now() + history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states) ) - accountData, storageData, accountIndex, storageIndex := h.encode() + accountData, storageData, accountIndex, storageIndex := history.encode() dataSize := common.StorageSize(len(accountData) + len(storageData)) indexSize := common.StorageSize(len(accountIndex) + len(storageIndex)) // Write history data into five freezer table respectively. - rawdb.WriteStateHistory(freezer, dl.stateID(), h.meta.encode(), accountIndex, storageIndex, accountData, storageData) + rawdb.WriteStateHistory(freezer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData) - // Prune stale state histories based on the config. - if limit != 0 && dl.stateID() > limit { - n, err = truncateFromTail(db, freezer, dl.stateID()-limit) - if err != nil { - return err - } - } historyDataBytesMeter.Mark(int64(dataSize)) historyIndexBytesMeter.Mark(int64(indexSize)) historyBuildTimeMeter.UpdateSince(start) - log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "pruned", n, "elapsed", common.PrettyDuration(time.Since(start))) + log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "elapsed", common.PrettyDuration(time.Since(start))) + return nil } diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go index 76b37d9f79ce..82801cf83ae8 100644 --- a/trie/triedb/pathdb/history_test.go +++ b/trie/triedb/pathdb/history_test.go @@ -226,7 +226,7 @@ func TestTruncateTailHistories(t *testing.T) { // openFreezer initializes the freezer instance for storing state histories. func openFreezer(datadir string, readOnly bool) (*rawdb.ResettableFreezer, error) { - return rawdb.NewStateHistoryFreezer(datadir, readOnly, 0) + return rawdb.NewStateFreezer(datadir, readOnly, 0) } func compareSet[k comparable](a, b map[k][]byte) bool { diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go index d8c7d39fb9bd..3c7884cdf47e 100644 --- a/trie/triedb/pathdb/journal.go +++ b/trie/triedb/pathdb/journal.go @@ -21,6 +21,7 @@ import ( "errors" "fmt" "io" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -129,7 +130,7 @@ func (db *Database) loadLayers() layer { log.Info("Failed to load journal, discard it", "err", err) } // Return single layer with persistent state. - return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newNodeBuffer(db.bufferSize, nil, 0)) + return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, NewTrieNodeBuffer(db.config.SyncFlush, db.bufferSize, nil, 0)) } // loadDiskLayer reads the binary blob from the layer journal, reconstructing @@ -169,7 +170,7 @@ func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) { nodes[entry.Owner] = subset } // Calculate the internal state transitions by id difference. - base := newDiskLayer(root, id, db, nil, newNodeBuffer(db.bufferSize, nodes, id-stored)) + base := newDiskLayer(root, id, db, nil, NewTrieNodeBuffer(db.config.SyncFlush, db.bufferSize, nodes, id-stored)) return base, nil } @@ -259,8 +260,9 @@ func (dl *diskLayer) journal(w io.Writer) error { return err } // Step three, write all unwritten nodes into the journal - nodes := make([]journalNodes, 0, len(dl.buffer.nodes)) - for owner, subset := range dl.buffer.nodes { + bufferNodes := dl.buffer.getAllNodes() + nodes := make([]journalNodes, 0, len(bufferNodes)) + for owner, subset := range bufferNodes { entry := journalNodes{Owner: owner} for path, node := range subset { entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob}) @@ -270,7 +272,7 @@ func (dl *diskLayer) journal(w io.Writer) error { if err := rlp.Encode(w, nodes); err != nil { return err } - log.Debug("Journaled pathdb disk layer", "root", dl.root, "nodes", len(dl.buffer.nodes)) + log.Debug("Journaled pathdb disk layer", "root", dl.root, "nodes", len(bufferNodes)) return nil } @@ -341,6 +343,14 @@ func (db *Database) Journal(root common.Hash) error { if l == nil { return fmt.Errorf("triedb layer [%#x] missing", root) } + disk := db.tree.bottom() + if l, ok := l.(*diffLayer); ok { + log.Info("Persisting dirty state to disk", "head", l.block, "root", root, "layers", l.id-disk.id+disk.buffer.getLayers()) + } else { // disk layer only on noop runs (likely) or deep reorgs (unlikely) + log.Info("Persisting dirty state to disk", "root", root, "layers", disk.buffer.getLayers()) + } + start := time.Now() + // Run the journaling db.lock.Lock() defer db.lock.Unlock() @@ -373,6 +383,6 @@ func (db *Database) Journal(root common.Hash) error { // Set the db in read only mode to reject all following mutations db.readOnly = true - log.Info("Stored journal in triedb", "disk", diskroot, "size", common.StorageSize(journal.Len())) + log.Info("Persisted dirty state to disk", "size", common.StorageSize(journal.Len()), "elapsed", common.PrettyDuration(time.Since(start))) return nil } diff --git a/trie/triedb/pathdb/layertree.go b/trie/triedb/pathdb/layertree.go index d314779910e9..ed94d2e19eb6 100644 --- a/trie/triedb/pathdb/layertree.go +++ b/trie/triedb/pathdb/layertree.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/triestate" ) @@ -212,3 +213,46 @@ func (tree *layerTree) bottom() *diskLayer { } return current.(*diskLayer) } + +// front return the top non-fork difflayer/disklayer root hash for rewinding. +func (tree *layerTree) front() common.Hash { + tree.lock.RLock() + defer tree.lock.RUnlock() + + chain := make(map[common.Hash][]common.Hash) + var base common.Hash + for _, layer := range tree.layers { + switch dl := layer.(type) { + case *diskLayer: + if dl.stale { + log.Info("pathdb top disklayer is stale") + return base + } + base = dl.rootHash() + case *diffLayer: + if _, ok := chain[dl.parentLayer().rootHash()]; !ok { + chain[dl.parentLayer().rootHash()] = make([]common.Hash, 0) + } + chain[dl.parentLayer().rootHash()] = append(chain[dl.parentLayer().rootHash()], dl.rootHash()) + default: + log.Crit("unsupported layer type") + } + } + if (base == common.Hash{}) { + log.Info("pathdb top difflayer is empty") + return base + } + parent := base + for { + children, ok := chain[parent] + if !ok { + log.Info("pathdb top difflayer", "root", parent) + return parent + } + if len(children) != 1 { + log.Info("pathdb top difflayer is forked", "common ancestor root", parent) + return parent + } + parent = children[0] + } +} diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go index 67de225b0495..9d79df37ffd0 100644 --- a/trie/triedb/pathdb/nodebuffer.go +++ b/trie/triedb/pathdb/nodebuffer.go @@ -29,6 +29,8 @@ import ( "github.com/ethereum/go-ethereum/trie/trienode" ) +var _ trienodebuffer = &nodebuffer{} + // nodebuffer is a collection of modified trie nodes to aggregate the disk // write. The content of the nodebuffer must be checked before diving into // disk (since it basically is not-yet-written data). @@ -71,7 +73,7 @@ func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*tr if n.Hash != hash { dirtyFalseMeter.Mark(1) log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) - return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path) + return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path, n.Blob) } return n, nil } @@ -80,7 +82,7 @@ func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*tr // the ownership of the nodes map which belongs to the bottom-most diff layer. // It will just hold the node references from the given map which are safe to // copy. -func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer { +func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) trienodebuffer { var ( delta int64 overwrite int64 @@ -97,14 +99,14 @@ func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *no current = make(map[string]*trienode.Node) for path, n := range subset { current[path] = n - delta += int64(len(n.Blob) + len(path)) + delta += int64(len(n.Blob) + len(path) + len(owner)) } b.nodes[owner] = current continue } for path, n := range subset { if orig, exist := current[path]; !exist { - delta += int64(len(n.Blob) + len(path)) + delta += int64(len(n.Blob) + len(path) + len(owner)) } else { delta += int64(len(n.Blob) - len(orig.Blob)) overwrite++ @@ -217,7 +219,11 @@ func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id ui } var ( start = time.Now() - batch = db.NewBatchWithSize(int(b.size)) + // Although the calculation of b.size has been as accurate as possible, + // some omissions were still found during testing and code review, but + // we are still not sure it is completely accurate. For better protection, + // some redundancy is added here. + batch = db.NewBatchWithSize(int(float64(b.size) * DefaultBatchRedundancyRate)) ) nodes := writeNodes(batch, b.nodes, clean) rawdb.WritePersistentStateID(batch, id) @@ -273,3 +279,18 @@ func cacheKey(owner common.Hash, path []byte) []byte { } return append(owner.Bytes(), path...) } + +// getSize return the nodebuffer used size. +func (b *nodebuffer) getSize() (uint64, uint64) { + return b.size, 0 +} + +// getAllNodes return all the trie nodes are cached in nodebuffer. +func (b *nodebuffer) getAllNodes() map[common.Hash]map[string]*trienode.Node { + return b.nodes +} + +// getLayers return the size of cached difflayers. +func (b *nodebuffer) getLayers() uint64 { + return b.layers +} diff --git a/trie/triedb/pathdb/testutils.go b/trie/triedb/pathdb/testutils.go index 4406dbc52124..d6fdacb4213e 100644 --- a/trie/triedb/pathdb/testutils.go +++ b/trie/triedb/pathdb/testutils.go @@ -80,7 +80,7 @@ func (h *testHasher) Delete(key []byte) error { // Commit computes the new hash of the states and returns the set with all // state changes. -func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { +func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { var ( nodes = make(map[common.Hash][]byte) set = trienode.NewNodeSet(h.owner) @@ -108,7 +108,7 @@ func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { if root == types.EmptyRootHash && h.root != types.EmptyRootHash { set.AddNode(nil, trienode.NewDeleted()) } - return root, set + return root, set, nil } // hash performs the hash computation upon the provided states. diff --git a/trie/triestate/state.go b/trie/triestate/state.go index cb3611baf9cd..4c47e9c39734 100644 --- a/trie/triestate/state.go +++ b/trie/triestate/state.go @@ -43,7 +43,7 @@ type Trie interface { // Commit the trie and returns a set of dirty nodes generated along with // the new root hash. - Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) + Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) } // TrieLoader wraps functions to load tries. @@ -129,7 +129,10 @@ func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Addre return nil, fmt.Errorf("failed to revert state, err: %w", err) } } - root, result := tr.Commit(false) + root, result, err := tr.Commit(false) + if err != nil { + return nil, err + } if root != prevRoot { return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root) } @@ -181,7 +184,10 @@ func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error { return err } } - root, result := st.Commit(false) + root, result, err := st.Commit(false) + if err != nil { + return err + } if root != prev.Root { return errors.New("failed to reset storage trie") } @@ -232,7 +238,10 @@ func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { return err } } - root, result := st.Commit(false) + root, result, err := st.Commit(false) + if err != nil { + return err + } if root != types.EmptyRootHash { return errors.New("failed to clear storage trie") }