From 111d252d75a4839341ff461d4e0cf152ca2cc13d Mon Sep 17 00:00:00 2001 From: Thane Thomson Date: Wed, 5 Apr 2023 06:55:56 -0400 Subject: [PATCH] Fix lints (#625) This touches a large number of files, but I believe it's necessary as part of our tech debt cleanup. There are so many functions littering the codebase where: 1. We have unused parameters and they should have been refactored away, but they weren't 2. We have unused parameters and they should be named according to proper Go conventions (e.g. in interface implementations where a particular function signature is required, but the variable names should either be left out or `_` when unused) 3. We have redundant code (a whole bunch of redundant `if` statements and error checks, for example) 4. We use bad naming conventions for variables, like `copy` or `len` I'm also tired of having the linter fail locally. And if we don't do this, it will all just rot even more. This PR targets `main`, but should be applied across all the backport branches (which will, of course, require some conflict resolution, but I'm fine with doing that). I've left TODOs in some places where changes need to be made in follow-up PRs. Commits are organized by package so that it's hopefully easier to review. If you pick up on formatting changes, please see #604. --- #### PR checklist - [ ] Tests written/updated - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments --- .github/workflows/lint.yml | 2 +- Makefile | 2 +- abci/client/grpc_client.go | 4 +- abci/client/grpc_client_test.go | 2 +- abci/client/socket_client.go | 4 +- abci/client/socket_client_test.go | 5 +- abci/cmd/abci-cli/abci-cli.go | 13 +- abci/example/kvstore/helpers.go | 4 +- abci/example/kvstore/kvstore.go | 5 +- abci/example/kvstore/kvstore_test.go | 8 +- abci/server/grpc_server.go | 3 +- abci/tests/server/client.go | 2 +- abci/types/application.go | 24 +- blocksync/pool.go | 3 +- blocksync/reactor.go | 10 +- cmd/cometbft/commands/debug/kill.go | 2 +- cmd/cometbft/commands/gen_node_key.go | 2 +- cmd/cometbft/commands/gen_validator.go | 2 +- cmd/cometbft/commands/init.go | 2 +- cmd/cometbft/commands/inspect.go | 9 +- cmd/cometbft/commands/light.go | 2 +- cmd/cometbft/commands/probe_upnp.go | 2 +- cmd/cometbft/commands/reset.go | 8 +- cmd/cometbft/commands/root.go | 5 +- cmd/cometbft/commands/root_test.go | 22 +- cmd/cometbft/commands/show_node_id.go | 2 +- cmd/cometbft/commands/show_validator.go | 2 +- cmd/cometbft/commands/testnet.go | 4 +- consensus/byzantine_test.go | 1 - consensus/invalid_test.go | 13 +- consensus/reactor.go | 14 +- consensus/reactor_test.go | 53 ++--- consensus/replay_stubs.go | 20 +- consensus/replay_test.go | 43 ++-- consensus/state.go | 19 +- consensus/state_test.go | 18 +- consensus/types/height_vote_set_test.go | 13 +- consensus/wal.go | 15 +- consensus/wal_generator.go | 4 +- evidence/verify.go | 13 +- inspect/inspect.go | 15 +- inspect/inspect_test.go | 37 ++- internal/test/validator.go | 2 +- libs/autofile/cmd/logjack.go | 13 +- libs/bits/bit_array_test.go | 9 +- libs/cli/setup.go | 10 +- libs/clist/clist.go | 18 +- libs/json/helpers_test.go | 10 +- libs/protoio/io_test.go | 5 +- libs/rand/random.go | 11 +- libs/rand/random_test.go | 2 +- libs/strings/string.go | 4 +- light/provider/mock/deadmock.go | 4 +- mempool/clist_mempool.go | 6 +- mempool/clist_mempool_test.go | 14 +- mempool/reactor.go | 2 +- node/node.go | 3 +- node/setup.go | 17 +- p2p/base_reactor.go | 10 +- p2p/conn/connection.go | 13 +- p2p/conn/secret_connection_test.go | 28 +-- p2p/mock/peer.go | 5 +- p2p/mock/reactor.go | 8 +- p2p/peer_set_test.go | 4 +- p2p/pex/pex_reactor.go | 5 +- p2p/pex/pex_reactor_test.go | 27 +-- p2p/switch_test.go | 50 ++-- p2p/test_util.go | 15 +- p2p/upnp/upnp.go | 6 +- privval/signer_client_test.go | 3 +- rpc/client/http/http.go | 15 +- rpc/client/local/local.go | 65 +++--- rpc/client/mock/client.go | 45 ++-- rpc/client/mock/status.go | 2 +- rpc/core/abci.go | 4 +- rpc/core/blocks.go | 22 +- rpc/core/consensus.go | 25 +- rpc/core/dev.go | 2 +- rpc/core/evidence.go | 6 +- rpc/core/health.go | 2 +- rpc/core/mempool.go | 16 +- rpc/core/net.go | 14 +- rpc/core/net_test.go | 4 +- rpc/core/status.go | 2 +- rpc/core/tx.go | 3 +- rpc/grpc/api.go | 4 +- rpc/grpc/client_server.go | 2 +- rpc/jsonrpc/jsonrpc_test.go | 12 +- rpc/jsonrpc/server/http_json_handler_test.go | 19 +- rpc/jsonrpc/test/main.go | 2 +- scripts/metricsgen/metricsgen.go | 7 +- state/helpers_test.go | 4 - state/indexer/block/kv/util.go | 9 +- state/indexer/block/null/null.go | 4 +- state/indexer/sink/psql/backport.go | 2 +- state/indexer/sink/psql/psql.go | 9 +- state/services.go | 6 +- state/state_test.go | 6 +- state/txindex/kv/kv_test.go | 8 +- state/txindex/kv/utils.go | 8 +- state/txindex/null/null.go | 8 +- statesync/reactor.go | 4 +- statesync/reactor_test.go | 10 +- statesync/syncer_test.go | 31 +-- store/store_test.go | 15 +- test/e2e/app/app.go | 16 +- test/e2e/generator/generate.go | 8 +- test/e2e/runner/evidence.go | 3 +- test/e2e/runner/main.go | 6 +- types/event_bus.go | 44 ++-- types/priv_validator.go | 4 +- types/protobuf_test.go | 16 +- types/validator_set.go | 15 +- types/validator_set_test.go | 231 ++++++++++++------- 114 files changed, 739 insertions(+), 738 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index b3271d53895..17e9b603d73 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -31,7 +31,7 @@ jobs: go.sum - uses: golangci/golangci-lint-action@v3 with: - version: v1.51 + version: latest args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/Makefile b/Makefile index 0d63f4a8dc4..7d23666e73a 100644 --- a/Makefile +++ b/Makefile @@ -231,7 +231,7 @@ format: lint: @echo "--> Running linter" - @go run github.com/golangci/golangci-lint/cmd/golangci-lint run + @go run github.com/golangci/golangci-lint/cmd/golangci-lint@latest run .PHONY: lint vulncheck: diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index cd8f61d3f95..926e679d687 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -49,7 +49,7 @@ func NewGRPCClient(addr string, mustConnect bool) Client { return cli } -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { +func dialerFunc(_ context.Context, addr string) (net.Conn, error) { return cmtnet.Connect(addr) } @@ -202,7 +202,7 @@ func (cli *grpcClient) Query(ctx context.Context, req *types.RequestQuery) (*typ return cli.client.Query(ctx, types.ToRequestQuery(req).GetQuery(), grpc.WaitForReady(true)) } -func (cli *grpcClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) { +func (cli *grpcClient) Commit(ctx context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) { return cli.client.Commit(ctx, types.ToRequestCommit().GetCommit(), grpc.WaitForReady(true)) } diff --git a/abci/client/grpc_client_test.go b/abci/client/grpc_client_test.go index ac866d39b64..ff57d782bf8 100644 --- a/abci/client/grpc_client_test.go +++ b/abci/client/grpc_client_test.go @@ -75,6 +75,6 @@ func TestGRPC(t *testing.T) { } } -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { +func dialerFunc(_ context.Context, addr string) (net.Conn, error) { return cmtnet.Connect(addr) } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index ccac4bbab19..47382e31292 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -169,7 +169,7 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) { return } - var res = &types.Response{} + res := &types.Response{} err := types.ReadMessage(r, res) if err != nil { cli.stopForError(fmt.Errorf("read message: %w", err)) @@ -291,7 +291,7 @@ func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*t return reqRes.Response.GetQuery(), cli.Error() } -func (cli *socketClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) { +func (cli *socketClient) Commit(ctx context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) { reqRes, err := cli.queueRequest(ctx, types.ToRequestCommit()) if err != nil { return nil, err diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index ab37588e0df..f4bade22934 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -122,7 +122,8 @@ func TestBulk(t *testing.T) { } func setupClientServer(t *testing.T, app types.Application) ( - service.Service, abcicli.Client) { + service.Service, abcicli.Client, +) { t.Helper() // some port between 20k and 30k @@ -156,7 +157,7 @@ type slowApp struct { types.BaseApplication } -func (slowApp) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (slowApp) CheckTx(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error) { time.Sleep(time.Second) return &types.ResponseCheckTx{}, nil } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 290474993e6..17d9230105d 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -51,7 +51,6 @@ var RootCmd = &cobra.Command{ Short: "the ABCI CLI tool wraps an ABCI client", Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers", PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - switch cmd.Use { case "kvstore", "version", "help [command]": return nil @@ -196,6 +195,7 @@ var echoCmd = &cobra.Command{ Args: cobra.ExactArgs(1), RunE: cmdEcho, } + var infoCmd = &cobra.Command{ Use: "info", Short: "get some info about the application", @@ -281,7 +281,6 @@ var testCmd = &cobra.Command{ // Generates new Args array based off of previous call args to maintain flag persistence func persistentArgs(line []byte) []string { - // generate the arguments to run from original os.Args // to maintain flag arguments args := os.Args @@ -308,7 +307,7 @@ func compose(fs []func() error) error { return err } -func cmdTest(cmd *cobra.Command, args []string) error { +func cmdTest(cmd *cobra.Command, _ []string) error { ctx := cmd.Context() return compose( []func() error{ @@ -361,7 +360,7 @@ func cmdTest(cmd *cobra.Command, args []string) error { }) } -func cmdBatch(cmd *cobra.Command, args []string) error { +func cmdBatch(cmd *cobra.Command, _ []string) error { bufReader := bufio.NewReader(os.Stdin) LOOP: for { @@ -387,7 +386,7 @@ LOOP: return nil } -func cmdConsole(cmd *cobra.Command, args []string) error { +func cmdConsole(cmd *cobra.Command, _ []string) error { for { fmt.Printf("> ") bufReader := bufio.NewReader(os.Stdin) @@ -695,7 +694,7 @@ func cmdProcessProposal(cmd *cobra.Command, args []string) error { return nil } -func cmdKVStore(cmd *cobra.Command, args []string) error { +func cmdKVStore(*cobra.Command, []string) error { logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) // Create the application - in memory or persisted to disk @@ -734,7 +733,6 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { //-------------------------------------------------------------------------------- func printResponse(cmd *cobra.Command, args []string, rsps ...response) { - if flagVerbose { fmt.Println(">", cmd.Use, strings.Join(args, " ")) } @@ -745,7 +743,6 @@ func printResponse(cmd *cobra.Command, args []string, rsps ...response) { fmt.Printf("-> code: OK\n") } else { fmt.Printf("-> code: %d\n", rsp.Code) - } if len(rsp.Data) != 0 { diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 094f3ae1908..6dc818aeda9 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -14,7 +14,7 @@ import ( // RandVal creates one random validator, with a key derived // from the input value -func RandVal(i int) types.ValidatorUpdate { +func RandVal() types.ValidatorUpdate { pubkey := cmtrand.Bytes(32) power := cmtrand.Uint16() + 1 v := types.UpdateValidator(pubkey, int64(power), "") @@ -28,7 +28,7 @@ func RandVal(i int) types.ValidatorUpdate { func RandVals(cnt int) []types.ValidatorUpdate { res := make([]types.ValidatorUpdate, cnt) for i := 0; i < cnt; i++ { - res[i] = RandVal(i) + res[i] = RandVal() } return res } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 0f8794893b3..1909c2906fd 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -84,7 +84,7 @@ func (app *Application) SetGenBlockEvents() { // begins and let's the application know what Tendermint versions it's interacting with. Based from this information, // Tendermint will ensure it is in sync with the application by potentially replaying the blocks it has. If the // Application returns a 0 appBlockHeight, Tendermint will call InitChain to initialize the application with consensus related data -func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { +func (app *Application) Info(context.Context, *types.RequestInfo) (*types.ResponseInfo, error) { // Tendermint expects the application to persist validators, on start-up we need to reload them to memory if they exist if len(app.valAddrToPubKeyMap) == 0 && app.state.Height > 0 { validators := app.getValidators() @@ -324,8 +324,7 @@ func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinal // Commit is called after FinalizeBlock and after Tendermint state which includes the updates to // AppHash, ConsensusParams and ValidatorSet has occurred. // The KVStore persists the validator updates and the new key values -func (app *Application) Commit(_ context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) { - +func (app *Application) Commit(context.Context, *types.RequestCommit) (*types.ResponseCommit, error) { // apply the validator updates to state (note this is really the validator set at h + 2) for _, valUpdate := range app.valUpdates { app.updateValidator(valUpdate) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 9aa4e6f2d3b..60ef73fe1b8 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -133,7 +133,6 @@ func TestPersistentKVStoreInfo(t *testing.T) { resInfo, err = kvstore.Info(ctx, &types.RequestInfo{}) require.NoError(t, err) require.Equal(t, height, resInfo.LastBlockHeight) - } // add a validator, remove a validator, update a validator @@ -200,7 +199,6 @@ func TestValUpdates(t *testing.T) { vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...) vals2 = kvstore.getValidators() valsEqual(t, vals1, vals2) - } func TestCheckTx(t *testing.T) { @@ -208,7 +206,7 @@ func TestCheckTx(t *testing.T) { defer cancel() kvstore := NewInMemoryApplication() - val := RandVal(1) + val := RandVal() testCases := []struct { expCode uint32 @@ -255,7 +253,8 @@ func makeApplyBlock( kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, - txs ...[]byte) { + txs ...[]byte, +) { // make and apply block height := int64(heightInt) hash := []byte("foo") @@ -270,7 +269,6 @@ func makeApplyBlock( require.NoError(t, err) valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates) - } // order doesn't matter diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 6c0344cf0bb..e0eaefa648e 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -37,7 +37,6 @@ func NewGRPCServer(protoAddr string, app types.Application) service.Service { // OnStart starts the gRPC service. func (s *GRPCServer) OnStart() error { - ln, err := net.Listen(s.proto, s.addr) if err != nil { return err @@ -72,6 +71,6 @@ func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*ty return &types.ResponseEcho{Message: req.Message}, nil } -func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) { +func (app *gRPCApplication) Flush(context.Context, *types.RequestFlush) (*types.ResponseFlush, error) { return &types.ResponseFlush{}, nil } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index fc745211d4d..9c33c8eb7d2 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -68,7 +68,7 @@ func FinalizeBlock(ctx context.Context, client abcicli.Client, txBytes [][]byte, return nil } -func PrepareProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, txExpected [][]byte, dataExp []byte) error { +func PrepareProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, txExpected [][]byte, _ []byte) error { res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes}) for i, tx := range res.Txs { if !bytes.Equal(tx, txExpected[i]) { diff --git a/abci/types/application.go b/abci/types/application.go index 3d3a75b55a6..4ccfd229ebc 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -45,39 +45,39 @@ func NewBaseApplication() *BaseApplication { return &BaseApplication{} } -func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) { +func (BaseApplication) Info(context.Context, *RequestInfo) (*ResponseInfo, error) { return &ResponseInfo{}, nil } -func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { +func (BaseApplication) CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) { return &ResponseCheckTx{Code: CodeTypeOK}, nil } -func (BaseApplication) Commit(_ context.Context, req *RequestCommit) (*ResponseCommit, error) { +func (BaseApplication) Commit(context.Context, *RequestCommit) (*ResponseCommit, error) { return &ResponseCommit{}, nil } -func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) { +func (BaseApplication) Query(context.Context, *RequestQuery) (*ResponseQuery, error) { return &ResponseQuery{Code: CodeTypeOK}, nil } -func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) { +func (BaseApplication) InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) { return &ResponseInitChain{}, nil } -func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { +func (BaseApplication) ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) { return &ResponseListSnapshots{}, nil } -func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { +func (BaseApplication) OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { return &ResponseOfferSnapshot{}, nil } -func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { +func (BaseApplication) LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { return &ResponseLoadSnapshotChunk{}, nil } -func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { +func (BaseApplication) ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { return &ResponseApplySnapshotChunk{}, nil } @@ -94,15 +94,15 @@ func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPreparePro return &ResponsePrepareProposal{Txs: txs}, nil } -func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { +func (BaseApplication) ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) { return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil } -func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { +func (BaseApplication) ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) { return &ResponseExtendVote{}, nil } -func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { +func (BaseApplication) VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { return &ResponseVerifyVoteExtension{ Status: ResponseVerifyVoteExtension_ACCEPT, }, nil diff --git a/blocksync/pool.go b/blocksync/pool.go index 522c3e68178..919586693d9 100644 --- a/blocksync/pool.go +++ b/blocksync/pool.go @@ -664,9 +664,8 @@ OUTER_LOOP: if peerID == bpr.peerID { bpr.reset() continue OUTER_LOOP - } else { - continue WAIT_LOOP } + continue WAIT_LOOP case <-bpr.gotBlockCh: // We got a block! // Continue the for-loop and wait til Quit. diff --git a/blocksync/reactor.go b/blocksync/reactor.go index b67c2d844c4..eb4e52ee9c2 100644 --- a/blocksync/reactor.go +++ b/blocksync/reactor.go @@ -66,8 +66,8 @@ type Reactor struct { // NewReactor returns new reactor instance. func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, - blockSync bool, metrics *Metrics) *Reactor { - + blockSync bool, metrics *Metrics, +) *Reactor { if state.LastBlockHeight != store.Height() { panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())) @@ -169,15 +169,13 @@ func (bcR *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer implements Reactor by removing peer from the pool. -func (bcR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (bcR *Reactor) RemovePeer(peer p2p.Peer, _ interface{}) { bcR.pool.RemovePeer(peer.ID()) } // respondToPeer loads a block and sends it to the requesting peer, // if we have it. Otherwise, we'll respond saying we don't have it. -func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest, - src p2p.Peer) (queued bool) { - +func (bcR *Reactor) respondToPeer(msg *bcproto.BlockRequest, src p2p.Peer) (queued bool) { block := bcR.store.LoadBlock(msg.Height) if block == nil { bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) diff --git a/cmd/cometbft/commands/debug/kill.go b/cmd/cometbft/commands/debug/kill.go index 3a1c993bcdc..0fc1cefc22f 100644 --- a/cmd/cometbft/commands/debug/kill.go +++ b/cmd/cometbft/commands/debug/kill.go @@ -32,7 +32,7 @@ $ cometbft debug 34255 /path/to/cmt-debug.zip`, RunE: killCmdHandler, } -func killCmdHandler(cmd *cobra.Command, args []string) error { +func killCmdHandler(_ *cobra.Command, args []string) error { pid, err := strconv.ParseUint(args[0], 10, 64) if err != nil { return err diff --git a/cmd/cometbft/commands/gen_node_key.go b/cmd/cometbft/commands/gen_node_key.go index 952e20fe610..c073ee04995 100644 --- a/cmd/cometbft/commands/gen_node_key.go +++ b/cmd/cometbft/commands/gen_node_key.go @@ -19,7 +19,7 @@ var GenNodeKeyCmd = &cobra.Command{ RunE: genNodeKey, } -func genNodeKey(cmd *cobra.Command, args []string) error { +func genNodeKey(*cobra.Command, []string) error { nodeKeyFile := config.NodeKeyFile() if cmtos.FileExists(nodeKeyFile) { return fmt.Errorf("node key at %s already exists", nodeKeyFile) diff --git a/cmd/cometbft/commands/gen_validator.go b/cmd/cometbft/commands/gen_validator.go index d0792306ca2..5556818bf92 100644 --- a/cmd/cometbft/commands/gen_validator.go +++ b/cmd/cometbft/commands/gen_validator.go @@ -19,7 +19,7 @@ var GenValidatorCmd = &cobra.Command{ Run: genValidator, } -func genValidator(cmd *cobra.Command, args []string) { +func genValidator(*cobra.Command, []string) { pv := privval.GenFilePV("", "") jsbz, err := cmtjson.Marshal(pv) if err != nil { diff --git a/cmd/cometbft/commands/init.go b/cmd/cometbft/commands/init.go index af7f60e6638..8bb572d3303 100644 --- a/cmd/cometbft/commands/init.go +++ b/cmd/cometbft/commands/init.go @@ -21,7 +21,7 @@ var InitFilesCmd = &cobra.Command{ RunE: initFiles, } -func initFiles(cmd *cobra.Command, args []string) error { +func initFiles(*cobra.Command, []string) error { return initFilesWithConfig(config) } diff --git a/cmd/cometbft/commands/inspect.go b/cmd/cometbft/commands/inspect.go index d8ccecf04ef..2d4c5948094 100644 --- a/cmd/cometbft/commands/inspect.go +++ b/cmd/cometbft/commands/inspect.go @@ -44,7 +44,7 @@ func init() { String("db-dir", config.DBPath, "database directory") } -func runInspect(cmd *cobra.Command, args []string) error { +func runInspect(cmd *cobra.Command, _ []string) error { ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() @@ -77,11 +77,8 @@ func runInspect(cmd *cobra.Command, args []string) error { if err != nil { return err } - ins := inspect.New(config.RPC, blockStore, stateStore, txIndexer, blockIndexer, logger) + ins := inspect.New(config.RPC, blockStore, stateStore, txIndexer, blockIndexer) logger.Info("starting inspect server") - if err := ins.Run(ctx); err != nil { - return err - } - return nil + return ins.Run(ctx) } diff --git a/cmd/cometbft/commands/light.go b/cmd/cometbft/commands/light.go index 073dbc6ff10..490075f486b 100644 --- a/cmd/cometbft/commands/light.go +++ b/cmd/cometbft/commands/light.go @@ -100,7 +100,7 @@ func init() { ) } -func runProxy(cmd *cobra.Command, args []string) error { +func runProxy(_ *cobra.Command, args []string) error { // Initialize logger. logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) var option log.Option diff --git a/cmd/cometbft/commands/probe_upnp.go b/cmd/cometbft/commands/probe_upnp.go index cf22bb5ae2b..5cc50e7a0fa 100644 --- a/cmd/cometbft/commands/probe_upnp.go +++ b/cmd/cometbft/commands/probe_upnp.go @@ -18,7 +18,7 @@ var ProbeUpnpCmd = &cobra.Command{ PreRun: deprecateSnakeCase, } -func probeUpnp(cmd *cobra.Command, args []string) error { +func probeUpnp(*cobra.Command, []string) error { capabilities, err := upnp.Probe(logger) if err != nil { fmt.Println("Probe failed: ", err) diff --git a/cmd/cometbft/commands/reset.go b/cmd/cometbft/commands/reset.go index 4eadbad91a7..0b5a4867632 100644 --- a/cmd/cometbft/commands/reset.go +++ b/cmd/cometbft/commands/reset.go @@ -53,7 +53,7 @@ var ResetPrivValidatorCmd = &cobra.Command{ // XXX: this is totally unsafe. // it's only suitable for testnets. -func resetAllCmd(cmd *cobra.Command, args []string) (err error) { +func resetAllCmd(cmd *cobra.Command, _ []string) (err error) { config, err = ParseConfig(cmd) if err != nil { return err @@ -70,7 +70,7 @@ func resetAllCmd(cmd *cobra.Command, args []string) (err error) { // XXX: this is totally unsafe. // it's only suitable for testnets. -func resetPrivValidator(cmd *cobra.Command, args []string) (err error) { +func resetPrivValidator(cmd *cobra.Command, _ []string) (err error) { config, err = ParseConfig(cmd) if err != nil { return err @@ -94,7 +94,7 @@ func resetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err) } - if err := cmtos.EnsureDir(dbDir, 0700); err != nil { + if err := cmtos.EnsureDir(dbDir, 0o700); err != nil { logger.Error("unable to recreate dbDir", "err", err) } @@ -151,7 +151,7 @@ func resetState(dbDir string, logger log.Logger) error { } } - if err := cmtos.EnsureDir(dbDir, 0700); err != nil { + if err := cmtos.EnsureDir(dbDir, 0o700); err != nil { logger.Error("unable to recreate dbDir", "err", err) } return nil diff --git a/cmd/cometbft/commands/root.go b/cmd/cometbft/commands/root.go index 1d5fce3f3ae..bfadd59cf8e 100644 --- a/cmd/cometbft/commands/root.go +++ b/cmd/cometbft/commands/root.go @@ -40,7 +40,7 @@ func ParseConfig(cmd *cobra.Command) (*cfg.Config, error) { if os.Getenv("CMTHOME") != "" { home = os.Getenv("CMTHOME") } else if os.Getenv("TMHOME") != "" { - //XXX: Deprecated. + // XXX: Deprecated. home = os.Getenv("TMHOME") logger.Error("Deprecated environment variable TMHOME identified. CMTHOME should be used instead.") } else { @@ -98,7 +98,8 @@ var RootCmd = &cobra.Command{ } // deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35 -func deprecateSnakeCase(cmd *cobra.Command, args []string) { +// TODO(thane): Remove this across all releases +func deprecateSnakeCase(cmd *cobra.Command, _ []string) { if strings.Contains(cmd.CalledAs(), "_") { fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release") } diff --git a/cmd/cometbft/commands/root_test.go b/cmd/cometbft/commands/root_test.go index 40909d86eb4..4d3475af5cc 100644 --- a/cmd/cometbft/commands/root_test.go +++ b/cmd/cometbft/commands/root_test.go @@ -17,9 +17,7 @@ import ( cmtos "github.com/cometbft/cometbft/libs/os" ) -var ( - defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") -) +var defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") // clearConfig clears env vars, the given root dir, and resets viper. func clearConfig(dir string) { @@ -30,11 +28,11 @@ func clearConfig(dir string) { panic(err) } if err := os.Unsetenv("TMHOME"); err != nil { - //XXX: Deprecated. + // XXX: Deprecated. panic(err) } if err := os.Unsetenv("TM_HOME"); err != nil { - //XXX: Deprecated. + // XXX: Deprecated. panic(err) } @@ -58,7 +56,7 @@ func testRootCmd() *cobra.Command { return rootCmd } -func testSetup(rootDir string, args []string, env map[string]string) error { +func testSetup(args []string, env map[string]string) error { clearConfig(defaultRoot) rootCmd := testRootCmd() @@ -78,14 +76,14 @@ func TestRootHome(t *testing.T) { }{ {nil, nil, defaultRoot}, {[]string{"--home", newRoot}, nil, newRoot}, - {nil, map[string]string{"TMHOME": newRoot}, newRoot}, //XXX: Deprecated. + {nil, map[string]string{"TMHOME": newRoot}, newRoot}, // XXX: Deprecated. {nil, map[string]string{"CMTHOME": newRoot}, newRoot}, } for i, tc := range cases { idxString := strconv.Itoa(i) - err := testSetup(defaultRoot, tc.args, tc.env) + err := testSetup(tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.root, config.RootDir, idxString) @@ -96,7 +94,6 @@ func TestRootHome(t *testing.T) { } func TestRootFlagsEnv(t *testing.T) { - // defaults defaults := cfg.DefaultConfig() defaultLogLvl := defaults.LogLevel @@ -119,7 +116,7 @@ func TestRootFlagsEnv(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - err := testSetup(defaultRoot, tc.args, tc.env) + err := testSetup(tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.logLevel, config.LogLevel, idxString) @@ -127,7 +124,6 @@ func TestRootFlagsEnv(t *testing.T) { } func TestRootConfig(t *testing.T) { - // write non-default config nonDefaultLogLvl := "abc:debug" cvals := map[string]string{ @@ -152,7 +148,7 @@ func TestRootConfig(t *testing.T) { // XXX: path must match cfg.defaultConfigPath configFilePath := filepath.Join(defaultRoot, "config") - err := cmtos.EnsureDir(configFilePath, 0700) + err := cmtos.EnsureDir(configFilePath, 0o700) require.Nil(t, err) // write the non-defaults to a different path @@ -180,5 +176,5 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return os.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0o600) } diff --git a/cmd/cometbft/commands/show_node_id.go b/cmd/cometbft/commands/show_node_id.go index 07a1937f613..460dcb617d9 100644 --- a/cmd/cometbft/commands/show_node_id.go +++ b/cmd/cometbft/commands/show_node_id.go @@ -17,7 +17,7 @@ var ShowNodeIDCmd = &cobra.Command{ PreRun: deprecateSnakeCase, } -func showNodeID(cmd *cobra.Command, args []string) error { +func showNodeID(*cobra.Command, []string) error { nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) if err != nil { return err diff --git a/cmd/cometbft/commands/show_validator.go b/cmd/cometbft/commands/show_validator.go index 80d970d2015..c819a18fe78 100644 --- a/cmd/cometbft/commands/show_validator.go +++ b/cmd/cometbft/commands/show_validator.go @@ -19,7 +19,7 @@ var ShowValidatorCmd = &cobra.Command{ PreRun: deprecateSnakeCase, } -func showValidator(cmd *cobra.Command, args []string) error { +func showValidator(*cobra.Command, []string) error { keyFilePath := config.PrivValidatorKeyFile() if !cmtos.FileExists(keyFilePath) { return fmt.Errorf("private validator file %s does not exist", keyFilePath) diff --git a/cmd/cometbft/commands/testnet.go b/cmd/cometbft/commands/testnet.go index 4fc6501b5a8..6870876d101 100644 --- a/cmd/cometbft/commands/testnet.go +++ b/cmd/cometbft/commands/testnet.go @@ -37,7 +37,7 @@ var ( ) const ( - nodeDirPerm = 0755 + nodeDirPerm = 0o755 ) func init() { @@ -94,7 +94,7 @@ Example: RunE: testnetFiles, } -func testnetFiles(cmd *cobra.Command, args []string) error { +func testnetFiles(*cobra.Command, []string) error { if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) { return fmt.Errorf( "testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used", diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 6365e48911a..d20078caed7 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -319,7 +319,6 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { switches[i] = p2p.MakeSwitch( config.P2P, i, - "foo", "1.0.0", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) diff --git a/consensus/invalid_test.go b/consensus/invalid_test.go index dd5f8f2ceca..d6bcfdfda30 100644 --- a/consensus/invalid_test.go +++ b/consensus/invalid_test.go @@ -48,21 +48,21 @@ func TestReactorInvalidPrecommit(t *testing.T) { // and otherwise disable the priv validator byzVal.mtx.Lock() pv := byzVal.privValidator - byzVal.doPrevote = func(height int64, round int32) { - invalidDoPrevoteFunc(t, height, round, byzVal, byzR.Switch, pv) + byzVal.doPrevote = func(int64, int32) { + invalidDoPrevoteFunc(t, byzVal, byzR.Switch, pv) } byzVal.mtx.Unlock() // wait for a bunch of blocks // TODO: make this tighter by ensuring the halt happens by block 2 for i := 0; i < 10; i++ { - timeoutWaitGroup(t, N, func(j int) { + timeoutWaitGroup(N, func(j int) { <-blocksSubs[j].Out() - }, css) + }) } } -func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch, pv types.PrivValidator) { +func invalidDoPrevoteFunc(t *testing.T, cs *State, sw *p2p.Switch, pv types.PrivValidator) { // routine to: // - precommit for a random block // - send precommit to all peers @@ -89,7 +89,8 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw Type: cmtproto.PrecommitType, BlockID: types.BlockID{ Hash: blockHash, - PartSetHeader: types.PartSetHeader{Total: 1, Hash: cmtrand.Bytes(32)}}, + PartSetHeader: types.PartSetHeader{Total: 1, Hash: cmtrand.Bytes(32)}, + }, } p := precommit.ToProto() err = cs.privValidator.SignVote(cs.state.ChainID, p) diff --git a/consensus/reactor.go b/consensus/reactor.go index 1d7655ae26b..e6da916d367 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -205,7 +205,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer is a noop. -func (conR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (conR *Reactor) RemovePeer(p2p.Peer, interface{}) { if !conR.IsRunning() { return } @@ -425,7 +425,6 @@ func (conR *Reactor) subscribeToBroadcastEvents() { }); err != nil { conR.Logger.Error("Error adding listener for events", "err", err) } - } func (conR *Reactor) unsubscribeFromBroadcastEvents() { @@ -640,8 +639,8 @@ OUTER_LOOP: } func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState, - prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) { - + prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer, +) { if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { // Ensure that the peer's PartSetHeader is correct blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) @@ -695,7 +694,7 @@ func (conR *Reactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { logger := conR.Logger.With("peer", peer) // Simple hack to throttle logs upon sleep. - var sleeping = 0 + sleeping := 0 OUTER_LOOP: for { @@ -776,7 +775,6 @@ func (conR *Reactor) gossipVotesForHeight( prs *cstypes.PeerRoundState, ps *PeerState, ) bool { - // If there are lastCommits to send... if prs.Step == cstypes.RoundStepNewHeight { if ps.PickSendVote(rs.LastCommit) { @@ -832,7 +830,6 @@ func (conR *Reactor) gossipVotesForHeight( // NOTE: `queryMaj23Routine` has a simple crude design since it only comes // into play for liveness when there's a signature DDoS attack happening. func (conR *Reactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) { - OUTER_LOOP: for { // Manage disconnects from self or peer. @@ -1159,8 +1156,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false } - height, round, votesType, size := - votes.GetHeight(), votes.GetRound(), cmtproto.SignedMsgType(votes.Type()), votes.Size() + height, round, votesType, size := votes.GetHeight(), votes.GetRound(), cmtproto.SignedMsgType(votes.Type()), votes.Size() // Lazily set data using 'votes'. if votes.IsCommit() { diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index bd68f4a6b5e..fc1137ebb66 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -114,9 +114,9 @@ func TestReactorBasic(t *testing.T) { reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(t, N, func(j int) { + timeoutWaitGroup(N, func(j int) { <-blocksSubs[j].Out() - }, css) + }) } // Ensure we can process blocks with evidence @@ -209,11 +209,11 @@ func TestReactorWithEvidence(t *testing.T) { // we expect for each validator that is the proposer to propose one piece of evidence. for i := 0; i < nValidators; i++ { - timeoutWaitGroup(t, nValidators, func(j int) { + timeoutWaitGroup(nValidators, func(j int) { msg := <-blocksSubs[j].Out() block := msg.Data().(types.EventDataNewBlock).Block assert.Len(t, block.Evidence.Evidence, 1) - }, css) + }) } } @@ -238,9 +238,9 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } // wait till everyone makes the first new block - timeoutWaitGroup(t, N, func(j int) { + timeoutWaitGroup(N, func(j int) { <-blocksSubs[j].Out() - }, css) + }) } func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) { @@ -421,9 +421,9 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(t, N, func(j int) { + timeoutWaitGroup(N, func(j int) { <-blocksSubs[j].Out() - }, css) + }) // Get peer peer := reactors[1].Switch.Peers().List()[0] @@ -460,9 +460,9 @@ func TestReactorVotingPowerChange(t *testing.T) { } // wait till everyone makes block 1 - timeoutWaitGroup(t, nVals, func(j int) { + timeoutWaitGroup(nVals, func(j int) { <-blocksSubs[j].Out() - }, css) + }) //--------------------------------------------------------------------------- logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") @@ -544,9 +544,9 @@ func TestReactorValidatorSetChanges(t *testing.T) { } // wait till everyone makes block 1 - timeoutWaitGroup(t, nPeers, func(j int) { + timeoutWaitGroup(nPeers, func(j int) { <-blocksSubs[j].Out() - }, css) + }) t.Run("Testing adding one validator", func(t *testing.T) { newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() @@ -610,7 +610,6 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) t.Run("Testing adding two validators at once", func(t *testing.T) { - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3) waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) @@ -630,7 +629,6 @@ func TestReactorValidatorSetChanges(t *testing.T) { delete(activeVals, string(newValidatorPubKey3.Address())) waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) }) - } // Check we can make blocks with skip_timeout_commit=false @@ -647,9 +645,9 @@ func TestReactorWithTimeoutCommit(t *testing.T) { defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) // wait till everyone makes the first new block - timeoutWaitGroup(t, N-1, func(j int) { + timeoutWaitGroup(N-1, func(j int) { <-blocksSubs[j].Out() - }, css) + }) } func waitForAndValidateBlock( @@ -660,7 +658,7 @@ func waitForAndValidateBlock( css []*State, txs ...[]byte, ) { - timeoutWaitGroup(t, n, func(j int) { + timeoutWaitGroup(n, func(j int) { css[j].Logger.Debug("waitForAndValidateBlock") msg := <-blocksSubs[j].Out() newBlock := msg.Data().(types.EventDataNewBlock).Block @@ -676,7 +674,7 @@ func waitForAndValidateBlock( }, mempl.TxInfo{}) require.NoError(t, err) } - }, css) + }) } func waitForAndValidateBlockWithTx( @@ -687,7 +685,7 @@ func waitForAndValidateBlockWithTx( css []*State, txs ...[]byte, ) { - timeoutWaitGroup(t, n, func(j int) { + timeoutWaitGroup(n, func(j int) { ntxs := 0 BLOCK_TX_LOOP: for { @@ -710,7 +708,7 @@ func waitForAndValidateBlockWithTx( break BLOCK_TX_LOOP } } - }, css) + }) } func waitForBlockWithUpdatedValsAndValidateIt( @@ -720,7 +718,7 @@ func waitForBlockWithUpdatedValsAndValidateIt( blocksSubs []types.Subscription, css []*State, ) { - timeoutWaitGroup(t, n, func(j int) { + timeoutWaitGroup(n, func(j int) { var newBlock *types.Block LOOP: for { @@ -730,17 +728,16 @@ func waitForBlockWithUpdatedValsAndValidateIt( if newBlock.LastCommit.Size() == len(updatedVals) { css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) break LOOP - } else { - css[j].Logger.Debug( - "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", - "height", newBlock.Height, "last_commit", newBlock.LastCommit.Size(), "updated_vals", len(updatedVals), - ) } + css[j].Logger.Debug( + "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", + "height", newBlock.Height, "last_commit", newBlock.LastCommit.Size(), "updated_vals", len(updatedVals), + ) } err := validateBlock(newBlock, updatedVals) assert.Nil(t, err) - }, css) + }) } // expects high synchrony! @@ -760,7 +757,7 @@ func validateBlock(block *types.Block, activeVals map[string]struct{}) error { return nil } -func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) { +func timeoutWaitGroup(n int, f func(int)) { wg := new(sync.WaitGroup) wg.Add(n) for i := 0; i < n; i++ { diff --git a/consensus/replay_stubs.go b/consensus/replay_stubs.go index f6d6a8eb5b8..0c55552f036 100644 --- a/consensus/replay_stubs.go +++ b/consensus/replay_stubs.go @@ -20,22 +20,22 @@ func (emptyMempool) Lock() {} func (emptyMempool) Unlock() {} func (emptyMempool) Size() int { return 0 } func (emptyMempool) SizeBytes() int64 { return 0 } -func (emptyMempool) CheckTx(_ types.Tx, _ func(*abci.ResponseCheckTx), _ mempl.TxInfo) error { +func (emptyMempool) CheckTx(types.Tx, func(*abci.ResponseCheckTx), mempl.TxInfo) error { return nil } -func (txmp emptyMempool) RemoveTxByKey(txKey types.TxKey) error { +func (txmp emptyMempool) RemoveTxByKey(types.TxKey) error { return nil } -func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } -func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxTxs(int) types.Txs { return types.Txs{} } func (emptyMempool) Update( - _ int64, - _ types.Txs, - _ []*abci.ExecTxResult, - _ mempl.PreCheckFunc, - _ mempl.PostCheckFunc, + int64, + types.Txs, + []*abci.ExecTxResult, + mempl.PreCheckFunc, + mempl.PostCheckFunc, ) error { return nil } @@ -74,6 +74,6 @@ type mockProxyApp struct { finalizeBlockResponse *abci.ResponseFinalizeBlock } -func (mock *mockProxyApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { +func (mock *mockProxyApp) FinalizeBlock(context.Context, *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { return mock.finalizeBlockResponse, nil } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 0d0c9f0dcc1..bcf74387f36 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -23,7 +23,6 @@ import ( abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/abci/types/mocks" cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/crypto" cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/internal/test" "github.com/cometbft/cometbft/libs/log" @@ -67,8 +66,11 @@ func TestMain(m *testing.M) { // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, - lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store, +func startNewStateAndWaitForBlock( + t *testing.T, + consensusReplayConfig *cfg.Config, + blockDB dbm.DB, + stateStore sm.Store, ) { logger := log.TestingLogger() state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) @@ -215,7 +217,7 @@ LOOP: t.Logf("WAL panicked: %v", err) // make sure we can make blocks after a crash - startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore) + startNewStateAndWaitForBlock(t, consensusReplayConfig, blockDB, stateStore) // stop consensus state and transactions sender (initFn) cs.Stop() //nolint:errcheck // Logging this error causes failure @@ -648,8 +650,6 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin walFile := tempWALWithData(walBody) testConfig.Consensus.SetWalFile(walFile) - privVal := privval.LoadFilePV(testConfig.PrivValidatorKeyFile(), testConfig.PrivValidatorStateFile()) - wal, err := NewWAL(walFile) require.NoError(t, err) wal.SetLogger(log.TestingLogger()) @@ -662,9 +662,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin }) chain, extCommits, err = makeBlockchainFromWAL(wal) require.NoError(t, err) - pubKey, err := privVal.GetPubKey() - require.NoError(t, err) - stateDB, genesisState, store = stateAndStore(t, testConfig, pubKey, kvstore.AppVersion) + stateDB, genesisState, store = stateAndStore(t, testConfig, kvstore.AppVersion) } stateStore := sm.NewStore(stateDB, sm.StoreOptions{ @@ -925,9 +923,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { defer os.RemoveAll(config.RootDir) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) const appVersion = 0x0 - pubKey, err := privVal.GetPubKey() - require.NoError(t, err) - stateDB, state, store := stateAndStore(t, config, pubKey, appVersion) + stateDB, state, store := stateAndStore(t, config, appVersion) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -996,7 +992,7 @@ type badApp struct { onlyLastHashIsWrong bool } -func (app *badApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { +func (app *badApp) FinalizeBlock(context.Context, *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.height++ if app.onlyLastHashIsWrong { if app.height == app.numBlocks { @@ -1145,7 +1141,6 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { func stateAndStore( t *testing.T, config *cfg.Config, - pubKey crypto.PubKey, appVersion uint64, ) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() @@ -1189,10 +1184,10 @@ func (bs *mockBlockStore) Base() int64 { return bs.base func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) } func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } -func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { +func (bs *mockBlockStore) LoadBlockByHash([]byte) *types.Block { return bs.chain[int64(len(bs.chain))-1] } -func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil } +func (bs *mockBlockStore) LoadBlockMetaByHash([]byte) *types.BlockMeta { return nil } func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] bps, err := block.MakePartSet(types.BlockPartSizeBytes) @@ -1202,10 +1197,11 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { Header: block.Header, } } -func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlockWithExtendedCommit(block *types.Block, blockParts *types.PartSet, seenCommit *types.ExtendedCommit) { +func (bs *mockBlockStore) LoadBlockPart(int64, int) *types.Part { return nil } +func (bs *mockBlockStore) SaveBlockWithExtendedCommit(*types.Block, *types.PartSet, *types.ExtendedCommit) { } -func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { + +func (bs *mockBlockStore) SaveBlock(*types.Block, *types.PartSet, *types.Commit) { } func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { @@ -1215,11 +1211,12 @@ func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return bs.extCommits[height-1].ToCommit() } + func (bs *mockBlockStore) LoadBlockExtendedCommit(height int64) *types.ExtendedCommit { return bs.extCommits[height-1] } -func (bs *mockBlockStore) PruneBlocks(height int64, state sm.State) (uint64, int64, error) { +func (bs *mockBlockStore) PruneBlocks(height int64, _ sm.State) (uint64, int64, error) { evidencePoint := height pruned := uint64(0) for i := int64(0); i < height-1; i++ { @@ -1251,10 +1248,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { config := ResetConfig("handshake_test_") defer os.RemoveAll(config.RootDir) - privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) - pubKey, err := privVal.GetPubKey() - require.NoError(t, err) - stateDB, state, store := stateAndStore(t, config, pubKey, 0x0) + stateDB, state, store := stateAndStore(t, config, 0x0) stateStore := sm.NewStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: false, }) @@ -1276,6 +1270,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) { if err := handshaker.Handshake(proxyApp); err != nil { t.Fatalf("Error on abci handshake: %v", err) } + var err error // reload the state, check the validator set was updated state, err = stateStore.Load() require.NoError(t, err) diff --git a/consensus/state.go b/consensus/state.go index 47a4c733fcb..3c376d1c3ff 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -469,7 +469,6 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error // SetProposal inputs a proposal. func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { - if peerID == "" { cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} } else { @@ -482,7 +481,6 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { // AddProposalBlockPart inputs a part of the proposal block. func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.ID) error { - if peerID == "" { cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} } else { @@ -496,11 +494,11 @@ func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Par // SetProposalAndBlock inputs the proposal and all block parts. func (cs *State) SetProposalAndBlock( proposal *types.Proposal, - block *types.Block, + block *types.Block, //nolint:revive parts *types.PartSet, peerID p2p.ID, ) error { - + // TODO: Since the block parameter is not used, we should instead expose just a SetProposal method. if err := cs.SetProposal(proposal, peerID); err != nil { return err } @@ -987,7 +985,6 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { default: panic(fmt.Sprintf("invalid timeout step: %v", ti.Step)) } - } func (cs *State) handleTxsAvailable() { @@ -1056,11 +1053,9 @@ func (cs *State) enterNewRound(height int64, round int32) { // but we fire an event, so update the round step first cs.updateRoundStep(round, cstypes.RoundStepNewRound) cs.Validators = validators - if round == 0 { - // We've already reset these upon new height, - // and meanwhile we might have received a proposal - // for round 0. - } else { + // If round == 0, we've already reset these upon new height, and meanwhile + // we might have received a proposal for round 0. + if round != 0 { logger.Debug("resetting proposal info") cs.Proposal = nil cs.ProposalBlock = nil @@ -1240,7 +1235,6 @@ func (cs *State) isProposalComplete() bool { } // if this is false the proposer is lying or we haven't received the POL yet return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() - } // Create the next block to propose and return it. Returns nil block upon error. @@ -1958,7 +1952,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (add return added, err } - var pbb = new(cmtproto.Block) + pbb := new(cmtproto.Block) err = proto.Unmarshal(bz, pbb) if err != nil { return added, err @@ -2019,7 +2013,6 @@ func (cs *State) handleCompleteProposal(blockHeight int64) { // Attempt to add the vote. if its a duplicate signature, dupeout the validator func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { added, err := cs.addVote(vote, peerID) - if err != nil { // If the vote height is off, we'll just ignore it, // But if it's a conflicting sig, add it to the cs.evpool. diff --git a/consensus/state_test.go b/consensus/state_test.go index a2e64972fff..89a8ae61e60 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -134,7 +134,6 @@ func TestStateProposerSelection2(t *testing.T) { ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } - } // a non-validator should timeout into the prevote round @@ -695,7 +694,7 @@ func TestStateLockPOLRelock(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round++ // moving to the next round - //XXX: this isnt guaranteed to get there before the timeoutPropose ... + // XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -1017,7 +1016,7 @@ func TestStateLockPOLSafety1(t *testing.T) { round++ // moving to the next round ensureNewRound(newRoundCh, height, round) - //XXX: this isnt guaranteed to get there before the timeoutPropose ... + // XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -1180,7 +1179,6 @@ func TestStateLockPOLSafety2(t *testing.T) { ensureNoNewUnlock(unlockCh) ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash1) - } // 4 vals. @@ -1543,7 +1541,6 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { } }) } - } // TestVerifyVoteExtensionNotCalledOnAbsentPrecommit tests that the VerifyVoteExtension @@ -1606,7 +1603,6 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Height: height, VoteExtension: []byte("extension"), }) - } // TestPrepareProposalReceivesVoteExtensions tests that the PrepareProposal method @@ -1696,7 +1692,7 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { require.NotZero(t, len(vote.ExtensionSignature)) cve := cmtproto.CanonicalVoteExtension{ Extension: vote.VoteExtension, - Height: height - 1, //the vote extension was signed in the previous height + Height: height - 1, // the vote extension was signed in the previous height Round: int64(rpp.LocalLastCommit.Round), ChainId: test.DefaultTestChainID, } @@ -1905,7 +1901,7 @@ func TestVoteExtensionEnableHeight(t *testing.T) { // 4 vals, 3 Nil Precommits at P0 // What we want: // P0 waits for timeoutPrecommit before starting next round -func TestWaitingTimeoutOnNilPolka(t *testing.T) { +func TestWaitingTimeoutOnNilPolka(*testing.T) { cs1, vss := randState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round @@ -2057,7 +2053,6 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { assert.True(t, rs.Step == cstypes.RoundStepCommit) assert.True(t, rs.ProposalBlock == nil) assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) - } // What we want: @@ -2435,7 +2430,6 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { t.Errorf("should not output stats message after receiving the known block part!") case <-time.After(50 * time.Millisecond): } - } func TestStateOutputVoteStats(t *testing.T) { @@ -2468,7 +2462,6 @@ func TestStateOutputVoteStats(t *testing.T) { t.Errorf("should not output stats message after receiving the known vote or vote from bigger height") case <-time.After(50 * time.Millisecond): } - } func TestSignSameVoteTwice(t *testing.T) { @@ -2517,7 +2510,8 @@ func signAddPrecommitWithExtension( hash []byte, header types.PartSetHeader, extension []byte, - stub *validatorStub) { + stub *validatorStub, +) { v, err := stub.signVote(cmtproto.PrecommitType, hash, header, extension, true) require.NoError(t, err, "failed to sign vote") addVotes(cs, v) diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 5cac9e5d827..9827a6a48c1 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -28,19 +28,19 @@ func TestPeerCatchupRounds(t *testing.T) { hvs := NewExtendedHeightVoteSet(test.DefaultTestChainID, 1, valSet) - vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) + vote999_0 := makeVoteHR(1, 0, 999, privVals) added, err := hvs.AddVote(vote999_0, "peer1", true) if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals) + vote1000_0 := makeVoteHR(1, 0, 1000, privVals) added, err = hvs.AddVote(vote1000_0, "peer1", true) if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals) + vote1001_0 := makeVoteHR(1, 0, 1001, privVals) added, err = hvs.AddVote(vote1001_0, "peer1", true) if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -54,26 +54,25 @@ func TestPeerCatchupRounds(t *testing.T) { t.Error("Expected to successfully add vote from another peer") } } + func TestInconsistentExtensionData(t *testing.T) { valSet, privVals := types.RandValidatorSet(10, 1) hvsE := NewExtendedHeightVoteSet(test.DefaultTestChainID, 1, valSet) - voteNoExt := makeVoteHR(t, 1, 0, 20, privVals) + voteNoExt := makeVoteHR(1, 0, 20, privVals) voteNoExt.Extension, voteNoExt.ExtensionSignature = nil, nil require.Panics(t, func() { _, _ = hvsE.AddVote(voteNoExt, "peer1", false) }) hvsNoE := NewHeightVoteSet(test.DefaultTestChainID, 1, valSet) - voteExt := makeVoteHR(t, 1, 0, 20, privVals) + voteExt := makeVoteHR(1, 0, 20, privVals) require.Panics(t, func() { _, _ = hvsNoE.AddVote(voteExt, "peer1", true) }) - } func makeVoteHR( - t *testing.T, height int64, valIndex, round int32, diff --git a/consensus/wal.go b/consensus/wal.go index 82ab330d8b1..2a17da1d441 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -89,7 +89,7 @@ var _ WAL = &BaseWAL{} // NewWAL returns a new write-ahead logger based on `baseWAL`, which implements // WAL. It's flushed and synced to disk every 2s and once when stopped. func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { - err := cmtos.EnsureDir(filepath.Dir(walFile), 0700) + err := cmtos.EnsureDir(filepath.Dir(walFile), 0o700) if err != nil { return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) } @@ -230,7 +230,8 @@ type WALSearchOptions struct { // CONTRACT: caller must close group reader. func (wal *BaseWAL) SearchForEndHeight( height int64, - options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + options *WALSearchOptions, +) (rd io.ReadCloser, found bool, err error) { var ( msg *TimedWALMessage gr *auto.GroupReader @@ -400,7 +401,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { return nil, DataCorruptionError{fmt.Errorf("checksums do not match: read: %v, actual: %v", crc, actualCRC)} } - var res = new(cmtcons.TimedWALMessage) + res := new(cmtcons.TimedWALMessage) err = proto.Unmarshal(data, res) if err != nil { return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} @@ -422,10 +423,10 @@ type nilWAL struct{} var _ WAL = nilWAL{} -func (nilWAL) Write(m WALMessage) error { return nil } -func (nilWAL) WriteSync(m WALMessage) error { return nil } -func (nilWAL) FlushAndSync() error { return nil } -func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { +func (nilWAL) Write(WALMessage) error { return nil } +func (nilWAL) WriteSync(WALMessage) error { return nil } +func (nilWAL) FlushAndSync() error { return nil } +func (nilWAL) SearchForEndHeight(int64, *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } func (nilWAL) Start() error { return nil } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 56abdfb359b..c44e9f68ad0 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -217,8 +217,8 @@ func (w *byteBufferWAL) WriteSync(m WALMessage) error { func (w *byteBufferWAL) FlushAndSync() error { return nil } func (w *byteBufferWAL) SearchForEndHeight( - height int64, - options *WALSearchOptions, + int64, + *WALSearchOptions, ) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } diff --git a/evidence/verify.go b/evidence/verify.go index 3ccdd0f4292..313a5b91a8e 100644 --- a/evidence/verify.go +++ b/evidence/verify.go @@ -95,7 +95,6 @@ func (evpool *Pool) verify(evidence types.Evidence) error { default: return fmt.Errorf("unrecognized evidence type: %T", evidence) } - } // VerifyLightClientAttack verifies LightClientAttackEvidence against the state of the full node. This involves @@ -108,8 +107,16 @@ func (evpool *Pool) verify(evidence types.Evidence) error { // CONTRACT: must run ValidateBasic() on the evidence before verifying // // must check that the evidence has not expired (i.e. is outside the maximum age threshold) -func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader, - commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error { +func VerifyLightClientAttack( + e *types.LightClientAttackEvidence, + commonHeader, trustedHeader *types.SignedHeader, + commonVals *types.ValidatorSet, + now time.Time, //nolint:revive + trustPeriod time.Duration, //nolint:revive +) error { + // TODO: Should the current time and trust period be used in this method? + // If not, why were the parameters present? + // In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single // verification jump between the common header and the conflicting one if commonHeader.Height != e.ConflictingBlock.Height { diff --git a/inspect/inspect.go b/inspect/inspect.go index b09faa23eed..ad87551b900 100644 --- a/inspect/inspect.go +++ b/inspect/inspect.go @@ -21,9 +21,7 @@ import ( "golang.org/x/sync/errgroup" ) -var ( - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -) +var logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) // Inspector manages an RPC service that exports methods to debug a failed node. // After a node shuts down due to a consensus failure, it will no longer start @@ -50,7 +48,13 @@ type Inspector struct { // The caller is responsible for starting and stopping the Inspector service. // //nolint:lll -func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, txidx txindex.TxIndexer, blkidx indexer.BlockIndexer, lg log.Logger) *Inspector { +func New( + cfg *config.RPCConfig, + bs state.BlockStore, + ss state.Store, + txidx txindex.TxIndexer, + blkidx indexer.BlockIndexer, +) *Inspector { routes := rpc.Routes(*cfg, ss, bs, txidx, blkidx, logger) eb := types.NewEventBus() eb.SetLogger(logger.With("module", "events")) @@ -82,9 +86,8 @@ func NewFromConfig(cfg *config.Config) (*Inspector, error) { if err != nil { return nil, err } - lg := logger.With("module", "inspect") ss := state.NewStore(sDB, state.StoreOptions{}) - return New(cfg.RPC, bs, ss, txidx, blkidx, lg), nil + return New(cfg.RPC, bs, ss, txidx, blkidx), nil } // Run starts the Inspector servers and blocks until the servers shut down. The passed diff --git a/inspect/inspect_test.go b/inspect/inspect_test.go index 69aa9f7a082..38245ac0c62 100644 --- a/inspect/inspect_test.go +++ b/inspect/inspect_test.go @@ -14,7 +14,6 @@ import ( "github.com/cometbft/cometbft/config" "github.com/cometbft/cometbft/inspect" "github.com/cometbft/cometbft/internal/test" - "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/pubsub/query" httpclient "github.com/cometbft/cometbft/rpc/client/http" indexermocks "github.com/cometbft/cometbft/state/indexer/mocks" @@ -35,7 +34,6 @@ func TestInspectConstructor(t *testing.T) { require.NoError(t, err) require.NotNil(t, d) }) - } func TestInspectRun(t *testing.T) { @@ -55,7 +53,6 @@ func TestInspectRun(t *testing.T) { cancel() stoppedWG.Wait() }) - } func TestBlock(t *testing.T) { @@ -77,8 +74,7 @@ func TestBlock(t *testing.T) { blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} wg.Add(1) @@ -130,8 +126,7 @@ func TestTxSearch(t *testing.T) { Return([]*abcitypes.TxResult{testTxResult}, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} wg.Add(1) @@ -150,7 +145,7 @@ func TestTxSearch(t *testing.T) { cli, err := httpclient.New(rpcConfig.ListenAddress, "/websocket") require.NoError(t, err) - var page = 1 + page := 1 resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") require.NoError(t, err) require.Len(t, resultTxSearch.Txs, 1) @@ -163,6 +158,7 @@ func TestTxSearch(t *testing.T) { stateStoreMock.AssertExpectations(t) blockStoreMock.AssertExpectations(t) } + func TestTx(t *testing.T) { testHash := []byte("test") testTx := []byte("tx") @@ -178,8 +174,7 @@ func TestTx(t *testing.T) { }, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} wg.Add(1) @@ -209,6 +204,7 @@ func TestTx(t *testing.T) { stateStoreMock.AssertExpectations(t) blockStoreMock.AssertExpectations(t) } + func TestConsensusParams(t *testing.T) { testHeight := int64(1) testMaxGas := int64(55) @@ -226,8 +222,7 @@ func TestConsensusParams(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -277,8 +272,7 @@ func TestBlockResults(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -325,8 +319,7 @@ func TestCommit(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -379,8 +372,7 @@ func TestBlockByHash(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -432,8 +424,7 @@ func TestBlockchain(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -485,8 +476,7 @@ func TestValidators(t *testing.T) { txIndexerMock := &txindexmocks.TxIndexer{} blkIdxMock := &indexermocks.BlockIndexer{} rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -546,8 +536,7 @@ func TestBlockSearch(t *testing.T) { mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). Return([]int64{testHeight}, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() - d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock, l) + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, txIndexerMock, blkIdxMock) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} diff --git a/internal/test/validator.go b/internal/test/validator.go index 73733a018a9..ddc471ee8e6 100644 --- a/internal/test/validator.go +++ b/internal/test/validator.go @@ -10,7 +10,7 @@ import ( "github.com/cometbft/cometbft/types" ) -func Validator(ctx context.Context, votingPower int64) (*types.Validator, types.PrivValidator, error) { +func Validator(_ context.Context, votingPower int64) (*types.Validator, types.PrivValidator, error) { privVal := types.NewMockPV() pubKey, err := privVal.GetPubKey() if err != nil { diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go index f6be50332de..92386e50023 100644 --- a/libs/autofile/cmd/logjack.go +++ b/libs/autofile/cmd/logjack.go @@ -12,12 +12,14 @@ import ( cmtos "github.com/cometbft/cometbft/libs/os" ) -const Version = "0.0.1" -const readBufferSize = 1024 // 1KB at a time +const ( + Version = "0.0.1" + readBufferSize = 1024 // 1KB at a time +) // Parse command-line options func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { - var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) + flagSet := flag.NewFlagSet(os.Args[0], flag.ExitOnError) var chopSizeStr, limitSizeStr string flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") @@ -78,10 +80,9 @@ func main() { } if err == io.EOF { os.Exit(0) - } else { - fmt.Println("logjack errored") - os.Exit(1) } + fmt.Println("logjack errored") + os.Exit(1) } _, err = group.Write(buf[:n]) if err != nil { diff --git a/libs/bits/bit_array_test.go b/libs/bits/bit_array_test.go index 4694da9a919..c9bfbb3c21d 100644 --- a/libs/bits/bit_array_test.go +++ b/libs/bits/bit_array_test.go @@ -28,7 +28,6 @@ func randBitArray(bits int) (*BitArray, []byte) { } func TestAnd(t *testing.T) { - bA1, _ := randBitArray(51) bA2, _ := randBitArray(31) bA3 := bA1.And(bA2) @@ -53,7 +52,6 @@ func TestAnd(t *testing.T) { } func TestOr(t *testing.T) { - bA1, _ := randBitArray(51) bA2, _ := randBitArray(31) bA3 := bA1.Or(bA2) @@ -143,7 +141,7 @@ func TestPickRandom(t *testing.T) { } } -func TestBytes(t *testing.T) { +func TestBytes(_ *testing.T) { bA := NewBitArray(4) bA.SetIndex(0, true) check := func(bA *BitArray, bz []byte) { @@ -188,7 +186,7 @@ func TestEmptyFull(t *testing.T) { } } -func TestUpdateNeverPanics(t *testing.T) { +func TestUpdateNeverPanics(_ *testing.T) { newRandBitArray := func(n int) *BitArray { ba, _ := randBitArray(n) return ba @@ -210,7 +208,7 @@ func TestUpdateNeverPanics(t *testing.T) { } } -func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { +func TestNewBitArrayNeverCrashesOnNegatives(_ *testing.T) { bitList := []int{-127, -128, -1 << 31} for _, bits := range bitList { _ = NewBitArray(bits) @@ -218,7 +216,6 @@ func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { } func TestJSONMarshalUnmarshal(t *testing.T) { - bA1 := NewBitArray(0) bA2 := NewBitArray(1) diff --git a/libs/cli/setup.go b/libs/cli/setup.go index 521695bdbd4..9154fa9860c 100644 --- a/libs/cli/setup.go +++ b/libs/cli/setup.go @@ -125,7 +125,7 @@ func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { } // Bind all flags and read the config into viper -func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { +func bindFlagsLoadViper(cmd *cobra.Command, _ []string) error { // cmd.Flags() includes flags from this command and all persistent flags from the parent if err := viper.BindPFlags(cmd.Flags()); err != nil { return err @@ -138,17 +138,15 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config // If a config file is found, read it in. - if err := viper.ReadInConfig(); err == nil { - // stderr, so if we redirect output to json file, this doesn't appear - // fmt.Fprintln(os.Stderr, "Using config file:", viper.ConfigFileUsed()) - } else if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + err := viper.ReadInConfig() + if _, ok := err.(viper.ConfigFileNotFoundError); !ok { // ignore not found error, return other errors return err } return nil } -func validateOutput(cmd *cobra.Command, args []string) error { +func validateOutput(_ *cobra.Command, _ []string) error { // validate output format output := viper.GetString(OutputFlag) switch output { diff --git a/libs/clist/clist.go b/libs/clist/clist.go index b18306490f9..5eb48f00a12 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -223,7 +223,7 @@ type CList struct { waitCh chan struct{} head *CElement // first element tail *CElement // last element - len int // list length + curLen int // list length maxLen int // max list length } @@ -234,7 +234,7 @@ func (l *CList) Init() *CList { l.waitCh = make(chan struct{}) l.head = nil l.tail = nil - l.len = 0 + l.curLen = 0 l.mtx.Unlock() return l } @@ -252,9 +252,9 @@ func newWithMax(maxLength int) *CList { func (l *CList) Len() int { l.mtx.RLock() - len := l.len + curLen := l.curLen l.mtx.RUnlock() - return len + return curLen } func (l *CList) Front() *CElement { @@ -329,14 +329,14 @@ func (l *CList) PushBack(v interface{}) *CElement { } // Release waiters on FrontWait/BackWait maybe - if l.len == 0 { + if l.curLen == 0 { l.wg.Done() close(l.waitCh) } - if l.len >= l.maxLen { + if l.curLen >= l.maxLen { panic(fmt.Sprintf("clist: maximum length list reached %d", l.maxLen)) } - l.len++ + l.curLen++ // Modify the tail if l.tail == nil { @@ -373,13 +373,13 @@ func (l *CList) Remove(e *CElement) interface{} { } // If we're removing the only item, make CList FrontWait/BackWait wait. - if l.len == 1 { + if l.curLen == 1 { l.wg = waitGroup1() // WaitGroups are difficult to re-use. l.waitCh = make(chan struct{}) } // Update l.len - l.len-- + l.curLen-- // Connect next/prev and set head/tail if prev == nil { diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go index 46d5b471b45..1776c9723cb 100644 --- a/libs/json/helpers_test.go +++ b/libs/json/helpers_test.go @@ -33,8 +33,10 @@ type Boat struct { func (b Boat) Drive() error { return nil } // These are public and private encryption keys. -type PublicKey [8]byte -type PrivateKey [8]byte +type ( + PublicKey [8]byte + PrivateKey [8]byte +) // Custom has custom marshalers and unmarshalers, taking pointer receivers. type CustomPtr struct { @@ -45,7 +47,7 @@ func (c *CustomPtr) MarshalJSON() ([]byte, error) { return []byte("\"custom\""), nil } -func (c *CustomPtr) UnmarshalJSON(bz []byte) error { +func (c *CustomPtr) UnmarshalJSON(_ []byte) error { c.Value = "custom" return nil } @@ -60,7 +62,7 @@ func (c CustomValue) MarshalJSON() ([]byte, error) { return []byte("\"custom\""), nil } -func (c CustomValue) UnmarshalJSON(bz []byte) error { +func (c CustomValue) UnmarshalJSON(_ []byte) error { return nil } diff --git a/libs/protoio/io_test.go b/libs/protoio/io_test.go index c6d3c10654f..b95c187df0f 100644 --- a/libs/protoio/io_test.go +++ b/libs/protoio/io_test.go @@ -97,10 +97,7 @@ func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { if i != size { panic("not enough messages read") } - if err := reader.Close(); err != nil { - return err - } - return nil + return reader.Close() } type buffer struct { diff --git a/libs/rand/random.go b/libs/rand/random.go index 73f56b76224..053e03e15e8 100644 --- a/libs/rand/random.go +++ b/libs/rand/random.go @@ -164,13 +164,12 @@ MAIN_LOOP: if v >= 62 { // only 62 characters in strChars val >>= 6 continue - } else { - chars = append(chars, strChars[v]) - if len(chars) == length { - break MAIN_LOOP - } - val >>= 6 } + chars = append(chars, strChars[v]) + if len(chars) == length { + break MAIN_LOOP + } + val >>= 6 } } diff --git a/libs/rand/random_test.go b/libs/rand/random_test.go index 10bb601b5e7..ec4aa327185 100644 --- a/libs/rand/random_test.go +++ b/libs/rand/random_test.go @@ -68,7 +68,7 @@ func testThemAll() string { return out.String() } -func TestRngConcurrencySafety(t *testing.T) { +func TestRngConcurrencySafety(_ *testing.T) { var wg sync.WaitGroup for i := 0; i < 100; i++ { wg.Add(1) diff --git a/libs/strings/string.go b/libs/strings/string.go index 37026dcc208..f012d761b0e 100644 --- a/libs/strings/string.go +++ b/libs/strings/string.go @@ -59,9 +59,7 @@ func IsASCIIText(s string) bool { return false } for _, b := range []byte(s) { - if 32 <= b && b <= 126 { - // good - } else { + if b < 32 || b > 126 { return false } } diff --git a/light/provider/mock/deadmock.go b/light/provider/mock/deadmock.go index 8e388107380..789cc255fd9 100644 --- a/light/provider/mock/deadmock.go +++ b/light/provider/mock/deadmock.go @@ -20,10 +20,10 @@ func (p *deadMock) ChainID() string { return p.chainID } func (p *deadMock) String() string { return "deadMock" } -func (p *deadMock) LightBlock(_ context.Context, height int64) (*types.LightBlock, error) { +func (p *deadMock) LightBlock(context.Context, int64) (*types.LightBlock, error) { return nil, provider.ErrNoResponse } -func (p *deadMock) ReportEvidence(_ context.Context, ev types.Evidence) error { +func (p *deadMock) ReportEvidence(context.Context, types.Evidence) error { return provider.ErrNoResponse } diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index e0501cebb2a..649e785fbf9 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -74,7 +74,6 @@ func NewCListMempool( height int64, options ...CListMempoolOption, ) *CListMempool { - mp := &CListMempool{ config: cfg, proxyAppConn: proxyAppConn, @@ -205,7 +204,6 @@ func (mem *CListMempool) CheckTx( cb func(*abci.ResponseCheckTx), txInfo TxInfo, ) error { - mem.updateMtx.RLock() // use defer to unlock mutex because application (*local client*) might panic defer mem.updateMtx.RUnlock() @@ -470,9 +468,7 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { postCheckErr = mem.postCheck(tx, r.CheckTx) } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Good, nothing to do. - } else { + if (r.CheckTx.Code != abci.CodeTypeOK) || postCheckErr != nil { // Tx became invalidated due to newly committed block. mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) mem.removeTx(tx, mem.recheckCursor) diff --git a/mempool/clist_mempool_test.go b/mempool/clist_mempool_test.go index 061975ecf91..1e4062c657f 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -33,16 +33,17 @@ import ( // test. type cleanupFunc func() -func newMempoolWithAppMock(cc proxy.ClientCreator, client abciclient.Client) (*CListMempool, cleanupFunc, error) { +func newMempoolWithAppMock(client abciclient.Client) (*CListMempool, cleanupFunc, error) { conf := test.ResetTestRoot("mempool_test") - mp, cu := newMempoolWithAppAndConfigMock(cc, conf, client) + mp, cu := newMempoolWithAppAndConfigMock(conf, client) return mp, cu, nil } -func newMempoolWithAppAndConfigMock(cc proxy.ClientCreator, +func newMempoolWithAppAndConfigMock( cfg *config.Config, - client abciclient.Client) (*CListMempool, cleanupFunc) { + client abciclient.Client, +) (*CListMempool, cleanupFunc) { appConnMem := client appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) err := appConnMem.Start() @@ -250,9 +251,7 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { mockClient.On("Error").Return(nil).Times(4) mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) - app := kvstore.NewInMemoryApplication() - cc := proxy.NewLocalClientCreator(app) - mp, cleanup, err := newMempoolWithAppMock(cc, mockClient) + mp, cleanup, err := newMempoolWithAppMock(mockClient) require.NoError(t, err) defer cleanup() @@ -631,7 +630,6 @@ func TestMempoolTxsBytes(t *testing.T) { assert.EqualValues(t, 20, mp.SizeBytes()) assert.NoError(t, mp.RemoveTxByKey(types.Tx(tx1).Key())) assert.EqualValues(t, 10, mp.SizeBytes()) - } // This will non-deterministically catch some concurrency failures like diff --git a/mempool/reactor.go b/mempool/reactor.go index afb463c0537..de323f863a3 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -83,7 +83,7 @@ func (memR *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer implements Reactor. -func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (memR *Reactor) RemovePeer(peer p2p.Peer, _ interface{}) { memR.ids.Reclaim(peer) // broadcast routine checks if peer is gone and returns } diff --git a/node/node.go b/node/node.go index 7810b0c0997..44fc9fa3dba 100644 --- a/node/node.go +++ b/node/node.go @@ -266,7 +266,6 @@ func NewNode(config *cfg.Config, *config.StateSync, proxyApp.Snapshot(), proxyApp.Query(), - config.StateSync.TempDir, ssMetrics, ) stateSyncReactor.SetLogger(logger.With("module", "statesync")) @@ -425,7 +424,7 @@ func (n *Node) OnStart() error { if !ok { return fmt.Errorf("this blocksync reactor does not support switching from state sync") } - err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, + err := startStateSync(n.stateSyncReactor, bcR, n.stateSyncProvider, n.config.StateSync, n.stateStore, n.blockStore, n.stateSyncGenesis) if err != nil { return fmt.Errorf("failed to start state sync: %w", err) diff --git a/node/setup.go b/node/setup.go index c118d724bcb..ce965a1e9ea 100644 --- a/node/setup.go +++ b/node/setup.go @@ -469,9 +469,14 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, } // startStateSync starts an asynchronous state sync process, then switches to block sync mode. -func startStateSync(ssR *statesync.Reactor, bcR blockSyncReactor, conR *cs.Reactor, - stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, - stateStore sm.Store, blockStore *store.BlockStore, state sm.State, +func startStateSync( + ssR *statesync.Reactor, + bcR blockSyncReactor, + stateProvider statesync.StateProvider, + config *cfg.StateSyncConfig, + stateStore sm.Store, + blockStore *store.BlockStore, + state sm.State, ) error { ssR.Logger.Info("Starting state sync") @@ -580,11 +585,7 @@ func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) error { if err != nil { return fmt.Errorf("failed to save genesis doc due to marshaling error: %w", err) } - if err := db.SetSync(genesisDocKey, b); err != nil { - return err - } - - return nil + return db.SetSync(genesisDocKey, b) } func createAndStartPrivValidatorSocketClient( diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index aaee128f99d..bfac2340847 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -60,8 +60,8 @@ func NewBaseReactor(name string, impl Reactor) *BaseReactor { func (br *BaseReactor) SetSwitch(sw *Switch) { br.Switch = sw } -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(e Envelope) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } +func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } +func (*BaseReactor) AddPeer(Peer) {} +func (*BaseReactor) RemovePeer(Peer, interface{}) {} +func (*BaseReactor) Receive(Envelope) {} +func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index cc2dcaf98d2..dbcb5bf107f 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -47,8 +47,10 @@ const ( defaultPongTimeout = 45 * time.Second ) -type receiveCbFunc func(chID byte, msgBytes []byte) -type errorCbFunc func(interface{}) +type ( + receiveCbFunc func(chID byte, msgBytes []byte) + errorCbFunc func(interface{}) +) /* Each peer has one `MConnection` (multiplex connection) instance. @@ -190,8 +192,8 @@ func NewMConnectionWithConfig( } // Create channels - var channelsIdx = map[byte]*Channel{} - var channels = []*Channel{} + channelsIdx := map[byte]*Channel{} + channels := []*Channel{} for _, desc := range chDescs { channel := newChannel(mconn, *desc) @@ -657,6 +659,7 @@ FOR_LOOP: // Cleanup close(c.pong) + //nolint:revive for range c.pong { // Drain } @@ -856,7 +859,7 @@ func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { // Not goroutine-safe func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) - var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data) + recvCap, recvReceived := ch.desc.RecvMessageCapacity, len(ch.recving)+len(packet.Data) if recvCap < recvReceived { return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived) } diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 516302c35b9..5b5655e3d5f 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -129,7 +129,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { return nil, true, err } // In parallel, handle some reads and writes. - var trs, ok = async.Parallel( + trs, ok := async.Parallel( func(_ int) (interface{}, bool, error) { // Node writes: for _, nodeWrite := range nodeWrites { @@ -182,7 +182,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { } // Run foo & bar in parallel - var trs, ok = async.Parallel( + trs, ok := async.Parallel( genNodeRunner("foo", fooConn, fooWrites, &fooReads), genNodeRunner("bar", barConn, barWrites, &barReads), ) @@ -194,9 +194,9 @@ func TestSecretConnectionReadWrite(t *testing.T) { compareWritesReads := func(writes []string, reads []string) { for { // Pop next write & corresponding reads - var read = "" - var write = writes[0] - var readCount = 0 + read := "" + write := writes[0] + readCount := 0 for _, readChunk := range reads { read += readChunk readCount++ @@ -229,7 +229,7 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { if *update { t.Logf("Updating golden test vector file %s", goldenFilepath) data := createGoldenTestVectors(t) - err := cmtos.WriteFile(goldenFilepath, []byte(data), 0644) + err := cmtos.WriteFile(goldenFilepath, []byte(data), 0o644) require.NoError(t, err) } f, err := os.Open(goldenFilepath) @@ -259,11 +259,11 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { } func TestNilPubkey(t *testing.T) { - var fooConn, barConn = makeKVStoreConnPair() + fooConn, barConn := makeKVStoreConnPair() defer fooConn.Close() defer barConn.Close() - var fooPrvKey = ed25519.GenPrivKey() - var barPrvKey = privKeyWithNilPubKey{ed25519.GenPrivKey()} + fooPrvKey := ed25519.GenPrivKey() + barPrvKey := privKeyWithNilPubKey{ed25519.GenPrivKey()} go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests @@ -273,11 +273,11 @@ func TestNilPubkey(t *testing.T) { } func TestNonEd25519Pubkey(t *testing.T) { - var fooConn, barConn = makeKVStoreConnPair() + fooConn, barConn := makeKVStoreConnPair() defer fooConn.Close() defer barConn.Close() - var fooPrvKey = ed25519.GenPrivKey() - var barPrvKey = sr25519.GenPrivKey() + fooPrvKey := ed25519.GenPrivKey() + barPrvKey := sr25519.GenPrivKey() go MakeSecretConnection(fooConn, fooPrvKey) //nolint:errcheck // ignore for tests @@ -309,7 +309,7 @@ func readLots(t *testing.T, wg *sync.WaitGroup, conn io.Reader, n int) { // Creates the data for a test vector file. // The file format is: // Hex(diffie_hellman_secret), loc_is_least, Hex(recvSecret), Hex(sendSecret), Hex(challenge) -func createGoldenTestVectors(t *testing.T) string { +func createGoldenTestVectors(*testing.T) string { data := "" for i := 0; i < 32; i++ { randSecretVector := cmtrand.Bytes(32) @@ -342,7 +342,7 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection ) // Make connections from both sides in parallel. - var trs, ok = async.Parallel( + trs, ok := async.Parallel( func(_ int) (val interface{}, abort bool, err error) { fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) if err != nil { diff --git a/p2p/mock/peer.go b/p2p/mock/peer.go index 3e137af89f8..b4111004c81 100644 --- a/p2p/mock/peer.go +++ b/p2p/mock/peer.go @@ -43,8 +43,8 @@ func NewPeer(ip net.IP) *Peer { } func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(e p2p.Envelope) bool { return true } -func (mp *Peer) Send(e p2p.Envelope) bool { return true } +func (mp *Peer) TrySend(_ p2p.Envelope) bool { return true } +func (mp *Peer) Send(_ p2p.Envelope) bool { return true } func (mp *Peer) NodeInfo() p2p.NodeInfo { return p2p.DefaultNodeInfo{ DefaultNodeID: mp.addr.ID, @@ -61,6 +61,7 @@ func (mp *Peer) Get(key string) interface{} { } return nil } + func (mp *Peer) Set(key string, value interface{}) { mp.kv[key] = value } diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go index adc0b2113ed..64d93a97358 100644 --- a/p2p/mock/reactor.go +++ b/p2p/mock/reactor.go @@ -19,7 +19,7 @@ func NewReactor() *Reactor { return r } -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } -func (r *Reactor) AddPeer(peer p2p.Peer) {} -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} -func (r *Reactor) Receive(e p2p.Envelope) {} +func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } +func (r *Reactor) AddPeer(_ p2p.Peer) {} +func (r *Reactor) RemovePeer(_ p2p.Peer, _ interface{}) {} +func (r *Reactor) Receive(_ p2p.Envelope) {} diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 9d08e437c74..64911ecebff 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -19,8 +19,8 @@ type mockPeer struct { } func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(e Envelope) bool { return true } -func (mp *mockPeer) Send(e Envelope) bool { return true } +func (mp *mockPeer) TrySend(Envelope) bool { return true } +func (mp *mockPeer) Send(Envelope) bool { return true } func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } func (mp *mockPeer) ID() ID { return mp.id } diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 780c2bad582..0457df2c626 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -52,8 +52,7 @@ const ( defaultBanTime = 24 * time.Hour ) -type errMaxAttemptsToDial struct { -} +type errMaxAttemptsToDial struct{} func (e errMaxAttemptsToDial) Error() string { return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) @@ -216,7 +215,7 @@ func (r *Reactor) AddPeer(p Peer) { } // RemovePeer implements Reactor by resetting peer's requests info. -func (r *Reactor) RemovePeer(p Peer, reason interface{}) { +func (r *Reactor) RemovePeer(p Peer, _ interface{}) { id := string(p.ID()) r.requestsSent.Delete(id) r.lastReceivedRequests.Delete(id) diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 5c2bebf6649..03134995954 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -19,9 +19,7 @@ import ( tmp2p "github.com/cometbft/cometbft/proto/tendermint/p2p" ) -var ( - cfg *config.P2PConfig -) +var cfg *config.P2PConfig func init() { cfg = config.DefaultP2PConfig() @@ -81,7 +79,7 @@ func TestPEXReactorRunning(t *testing.T) { // create switches for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { + switches[i] = p2p.MakeSwitch(cfg, i, func(i int, sw *p2p.Switch) *p2p.Switch { books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) books[i].SetLogger(logger.With("pex", i)) sw.SetAddrBook(books[i]) @@ -224,8 +222,10 @@ func TestCheckSeeds(t *testing.T) { // 4. test create peer with all seeds having unresolvable DNS fails badPeerConfig := &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, + Seeds: []string{ + "ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", + "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", + }, } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Error(t, peerSwitch.Start()) @@ -233,9 +233,11 @@ func TestCheckSeeds(t *testing.T) { // 5. test create peer with one good seed address succeeds badPeerConfig = &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", + Seeds: []string{ + "ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String()}, + seed.NetAddress().String(), + }, } peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) require.Nil(t, peerSwitch.Start()) @@ -415,7 +417,7 @@ func TestPEXReactorSeedModeFlushStop(t *testing.T) { // create switches for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { + switches[i] = p2p.MakeSwitch(cfg, i, func(i int, sw *p2p.Switch) *p2p.Switch { books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) books[i].SetLogger(logger.With("pex", i)) sw.SetAddrBook(books[i]) @@ -582,8 +584,6 @@ func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Sw peer := p2p.MakeSwitch( cfg, id, - "127.0.0.1", - "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false) book.SetLogger(log.TestingLogger()) @@ -614,8 +614,6 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) seed := p2p.MakeSwitch( cfg, id, - "127.0.0.1", - "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) book.SetLogger(log.TestingLogger()) @@ -668,7 +666,7 @@ func teardownReactor(book AddrBook) { } func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) + sw := p2p.MakeSwitch(cfg, 0, func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) sw.SetLogger(log.TestingLogger()) for _, r := range reactors { sw.AddReactor(r.String(), r) @@ -678,7 +676,6 @@ func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { } func TestPexVectors(t *testing.T) { - addr := tmp2p.NetAddress{ ID: "1", IP: "127.0.0.1", diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 4dce6fa105d..ad4040760fa 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -27,9 +27,7 @@ import ( p2pproto "github.com/cometbft/cometbft/proto/tendermint/p2p" ) -var ( - cfg *config.P2PConfig -) +var cfg *config.P2PConfig func init() { cfg = config.DefaultP2PConfig() @@ -67,9 +65,9 @@ func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { return tr.channels } -func (tr *TestReactor) AddPeer(peer Peer) {} +func (tr *TestReactor) AddPeer(Peer) {} -func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} +func (tr *TestReactor) RemovePeer(Peer, interface{}) {} func (tr *TestReactor) Receive(e Envelope) { if tr.logMessages { @@ -91,16 +89,17 @@ func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { // convenience method for creating two switches connected to each other. // XXX: note this uses net.Pipe and not a proper TCP conn -func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { +func MakeSwitchPair(initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { // Create two switches that will be interconnected. switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) return switches[0], switches[1] } -func initSwitchFunc(i int, sw *Switch) *Switch { +func initSwitchFunc(_ int, sw *Switch) *Switch { sw.SetAddrBook(&AddrBookMock{ Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{})}) + OurAddrs: make(map[string]struct{}), + }) // Make two reactors of two channels each sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ @@ -116,7 +115,7 @@ func initSwitchFunc(i int, sw *Switch) *Switch { } func TestSwitches(t *testing.T) { - s1, s2 := MakeSwitchPair(t, initSwitchFunc) + s1, s2 := MakeSwitchPair(initSwitchFunc) t.Cleanup(func() { if err := s1.Stop(); err != nil { t.Error(err) @@ -205,7 +204,7 @@ func assertMsgReceivedWithTimeout( } func TestSwitchFiltersOutItself(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc) + s1 := MakeSwitch(cfg, 1, initSwitchFunc) // simulate s1 having a public IP by creating a remote peer with the same ID rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} @@ -241,8 +240,6 @@ func TestSwitchPeerFilter(t *testing.T) { sw = MakeSwitch( cfg, 1, - "testing", - "123.123.123", initSwitchFunc, SwitchPeerFilters(filters...), ) @@ -291,8 +288,6 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { sw = MakeSwitch( cfg, 1, - "testing", - "123.123.123", initSwitchFunc, SwitchFilterTimeout(5*time.Millisecond), SwitchPeerFilters(filters...), @@ -328,7 +323,7 @@ func TestSwitchPeerFilterTimeout(t *testing.T) { } func TestSwitchPeerFilterDuplicate(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -376,7 +371,7 @@ func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { assert, require := assert.New(t), require.New(t) - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() if err != nil { t.Error(err) @@ -436,7 +431,7 @@ func TestSwitchStopPeerForError(t *testing.T) { p2pMetrics := PrometheusMetrics(namespace) // make two connected switches - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { + sw1, sw2 := MakeSwitchPair(func(i int, sw *Switch) *Switch { // set metrics on sw1 if i == 0 { opt := WithMetrics(p2pMetrics) @@ -471,7 +466,7 @@ func TestSwitchStopPeerForError(t *testing.T) { } func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -521,7 +516,7 @@ func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { } func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -554,7 +549,7 @@ func TestSwitchDialPeersAsync(t *testing.T) { return } - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.Start() require.NoError(t, err) t.Cleanup(func() { @@ -620,7 +615,7 @@ func TestSwitchAcceptRoutine(t *testing.T) { } // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + sw := MakeSwitch(cfg, 1, initSwitchFunc) err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) require.NoError(t, err) err = sw.Start() @@ -702,12 +697,14 @@ func (et errorTransport) NetAddress() NetAddress { panic("not implemented") } -func (et errorTransport) Accept(c peerConfig) (Peer, error) { +func (et errorTransport) Accept(peerConfig) (Peer, error) { return nil, et.acceptErr } + func (errorTransport) Dial(NetAddress, peerConfig) (Peer, error) { panic("not implemented") } + func (errorTransport) Cleanup(Peer) { panic("not implemented") } @@ -749,7 +746,7 @@ type mockReactor struct { initCalledBeforeRemoveFinished uint32 } -func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { +func (r *mockReactor) RemovePeer(Peer, interface{}) { atomic.StoreUint32(&r.removePeerInProgress, 1) defer atomic.StoreUint32(&r.removePeerInProgress, 0) time.Sleep(100 * time.Millisecond) @@ -774,7 +771,7 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { + sw := MakeSwitch(cfg, 1, func(i int, sw *Switch) *Switch { sw.AddReactor("mock", reactor) return sw }) @@ -813,7 +810,7 @@ func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { } func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { + s1, s2 := MakeSwitchPair(func(i int, sw *Switch) *Switch { // Make bar reactors of bar channels each sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ {ID: byte(0x00), Priority: 10}, @@ -862,8 +859,7 @@ func BenchmarkSwitchBroadcast(b *testing.B) { } func TestSwitchRemovalErr(t *testing.T) { - - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { + sw1, sw2 := MakeSwitchPair(func(i int, sw *Switch) *Switch { return initSwitchFunc(i, sw) }) assert.Equal(t, len(sw1.Peers().List()), 1) diff --git a/p2p/test_util.go b/p2p/test_util.go index 2941c102d7c..3fbb68bb655 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -23,10 +23,10 @@ type mockNodeInfo struct { addr *NetAddress } -func (ni mockNodeInfo) ID() ID { return ni.addr.ID } -func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, nil } -func (ni mockNodeInfo) Validate() error { return nil } -func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil } +func (ni mockNodeInfo) ID() ID { return ni.addr.ID } +func (ni mockNodeInfo) NetAddress() (*NetAddress, error) { return ni.addr, nil } +func (ni mockNodeInfo) Validate() error { return nil } +func (ni mockNodeInfo) CompatibleWith(NodeInfo) error { return nil } func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { sw.peers.Add(peer) //nolint:errcheck // ignore error @@ -83,7 +83,7 @@ func MakeConnectedSwitches(cfg *config.P2PConfig, ) []*Switch { switches := make([]*Switch, n) for i := 0; i < n; i++ { - switches[i] = MakeSwitch(cfg, i, TestHost, "123.123.123", initSwitch) + switches[i] = MakeSwitch(cfg, i, initSwitch) } if err := StartSwitches(switches); err != nil { @@ -178,11 +178,9 @@ func StartSwitches(switches []*Switch) error { func MakeSwitch( cfg *config.P2PConfig, i int, - network, version string, initSwitch func(int, *Switch) *Switch, opts ...SwitchOption, ) *Switch { - nodeKey := NodeKey{ PrivKey: ed25519.GenPrivKey(), } @@ -291,7 +289,7 @@ type AddrBookMock struct { var _ AddrBook = (*AddrBookMock)(nil) -func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { +func (book *AddrBookMock) AddAddress(addr *NetAddress, _ *NetAddress) error { book.Addrs[addr.String()] = struct{}{} return nil } @@ -305,6 +303,7 @@ func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { _, ok := book.Addrs[addr.String()] return ok } + func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { delete(book.Addrs, addr.String()) } diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go index 45da9d33cb8..9941143f219 100644 --- a/p2p/upnp/upnp.go +++ b/p2p/upnp/upnp.go @@ -299,7 +299,6 @@ type statusInfo struct { } func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { - message := "\r\n" + "" @@ -350,7 +349,8 @@ func (n *upnpNAT) AddPortMapping( externalPort, internalPort int, description string, - timeout int) (mappedExternalPort int, err error) { + timeout int, +) (mappedExternalPort int, err error) { // A single concatenation would break ARM compilation. message := "\r\n" + "" + strconv.Itoa(externalPort) @@ -381,8 +381,8 @@ func (n *upnpNAT) AddPortMapping( return mappedExternalPort, err } +//nolint:revive func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - message := "\r\n" + "" + strconv.Itoa(externalPort) + "" + protocol + "" + diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index a7a4fbd2a03..81e69a5002f 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -389,8 +389,7 @@ func TestSignerSignVoteErrors(t *testing.T) { } } -func brokenHandler(privVal types.PrivValidator, request privvalproto.Message, - chainID string) (privvalproto.Message, error) { +func brokenHandler(_ types.PrivValidator, request privvalproto.Message, _ string) (privvalproto.Message, error) { var res privvalproto.Message var err error diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 30a31f77b6f..f9ccaeb5a4c 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -235,7 +235,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions, +) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.caller.Call(ctx, "abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, @@ -505,7 +506,6 @@ func (c *baseRPCClient) TxSearch( perPage *int, orderBy string, ) (*ctypes.ResultTxSearch, error) { - result := new(ctypes.ResultTxSearch) params := map[string]interface{}{ "query": query, @@ -534,7 +534,6 @@ func (c *baseRPCClient) BlockSearch( page, perPage *int, orderBy string, ) (*ctypes.ResultBlockSearch, error) { - result := new(ctypes.ResultBlockSearch) params := map[string]interface{}{ "query": query, @@ -654,9 +653,9 @@ func (w *WSEvents) OnStop() { // Channel is never closed to prevent clients from seeing an erroneous event. // // It returns an error if WSEvents is not running. -func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { - +func (w *WSEvents) Subscribe(ctx context.Context, _, query string, + outCapacity ...int, +) (out <-chan ctypes.ResultEvent, err error) { if !w.IsRunning() { return nil, errNotRunning } @@ -684,7 +683,7 @@ func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, // subscriber from query. // // It returns an error if WSEvents is not running. -func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { +func (w *WSEvents) Unsubscribe(ctx context.Context, _, query string) error { if !w.IsRunning() { return errNotRunning } @@ -707,7 +706,7 @@ func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // given subscriber from all the queries. // // It returns an error if WSEvents is not running. -func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { +func (w *WSEvents) UnsubscribeAll(ctx context.Context, _ string) error { if !w.IsRunning() { return errNotRunning } diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 39f98b6f060..7115af1deb8 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -65,11 +65,11 @@ func (c *Local) SetLogger(l log.Logger) { c.Logger = l } -func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c *Local) Status(context.Context) (*ctypes.ResultStatus, error) { return c.env.Status(c.ctx) } -func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c *Local) ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) { return c.env.ABCIInfo(c.ctx) } @@ -78,63 +78,64 @@ func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) } func (c *Local) ABCIQueryWithOptions( - ctx context.Context, + _ context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions, +) (*ctypes.ResultABCIQuery, error) { return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } -func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Local) BroadcastTxCommit(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(c.ctx, tx) } -func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxAsync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(c.ctx, tx) } -func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxSync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(c.ctx, tx) } -func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) UnconfirmedTxs(_ context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { return c.env.UnconfirmedTxs(c.ctx, limit) } -func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) { return c.env.NumUnconfirmedTxs(c.ctx) } -func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Local) CheckTx(_ context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { return c.env.CheckTx(c.ctx, tx) } -func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c *Local) NetInfo(context.Context) (*ctypes.ResultNetInfo, error) { return c.env.NetInfo(c.ctx) } -func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c *Local) DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(c.ctx) } -func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c *Local) ConsensusState(context.Context) (*ctypes.ResultConsensusState, error) { return c.env.GetConsensusState(c.ctx) } -func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Local) ConsensusParams(_ context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { return c.env.ConsensusParams(c.ctx, height) } -func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c *Local) Health(context.Context) (*ctypes.ResultHealth, error) { return c.env.Health(c.ctx) } -func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (c *Local) DialSeeds(_ context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { return c.env.UnsafeDialSeeds(c.ctx, seeds) } func (c *Local) DialPeers( - ctx context.Context, + _ context.Context, peers []string, persistent, unconditional, @@ -143,47 +144,47 @@ func (c *Local) DialPeers( return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) } -func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Local) BlockchainInfo(_ context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) } -func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c *Local) Genesis(context.Context) (*ctypes.ResultGenesis, error) { return c.env.Genesis(c.ctx) } -func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { +func (c *Local) GenesisChunked(_ context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { return c.env.GenesisChunked(c.ctx, id) } -func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c *Local) Block(_ context.Context, height *int64) (*ctypes.ResultBlock, error) { return c.env.Block(c.ctx, height) } -func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (c *Local) BlockByHash(_ context.Context, hash []byte) (*ctypes.ResultBlock, error) { return c.env.BlockByHash(c.ctx, hash) } -func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Local) BlockResults(_ context.Context, height *int64) (*ctypes.ResultBlockResults, error) { return c.env.BlockResults(c.ctx, height) } -func (c *Local) Header(ctx context.Context, height *int64) (*ctypes.ResultHeader, error) { +func (c *Local) Header(_ context.Context, height *int64) (*ctypes.ResultHeader, error) { return c.env.Header(c.ctx, height) } -func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { +func (c *Local) HeaderByHash(_ context.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { return c.env.HeaderByHash(c.ctx, hash) } -func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c *Local) Commit(_ context.Context, height *int64) (*ctypes.ResultCommit, error) { return c.env.Commit(c.ctx, height) } -func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c *Local) Validators(_ context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { return c.env.Validators(c.ctx, height, page, perPage) } -func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (c *Local) Tx(_ context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { return c.env.Tx(c.ctx, hash, prove) } @@ -207,7 +208,7 @@ func (c *Local) BlockSearch( return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy) } -func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Local) BroadcastEvidence(_ context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(c.ctx, ev) } @@ -215,7 +216,8 @@ func (c *Local) Subscribe( ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int, +) (out <-chan ctypes.ResultEvent, err error) { q, err := cmtquery.New(query) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) @@ -246,7 +248,8 @@ func (c *Local) eventsRoutine( sub types.Subscription, subscriber string, q cmtpubsub.Query, - outc chan<- ctypes.ResultEvent) { + outc chan<- ctypes.ResultEvent, +) { for { select { case msg := <-sub.Out(): diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 447ae9c694e..0607954251f 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -83,11 +83,11 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { return nil, c.Error } -func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c Client) Status(context.Context) (*ctypes.ResultStatus, error) { return c.env.Status(&rpctypes.Context{}) } -func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c Client) ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) { return c.env.ABCIInfo(&rpctypes.Context{}) } @@ -96,55 +96,56 @@ func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) } func (c Client) ABCIQueryWithOptions( - ctx context.Context, + _ context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions, +) (*ctypes.ResultABCIQuery, error) { return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } -func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c Client) BroadcastTxCommit(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxAsync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxSync(_ context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) } -func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c Client) CheckTx(_ context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { return c.env.CheckTx(&rpctypes.Context{}, tx) } -func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c Client) NetInfo(_ context.Context) (*ctypes.ResultNetInfo, error) { return c.env.NetInfo(&rpctypes.Context{}) } -func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c Client) ConsensusState(_ context.Context) (*ctypes.ResultConsensusState, error) { return c.env.GetConsensusState(&rpctypes.Context{}) } -func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c Client) DumpConsensusState(_ context.Context) (*ctypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(&rpctypes.Context{}) } -func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c Client) ConsensusParams(_ context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { return c.env.ConsensusParams(&rpctypes.Context{}, height) } -func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c Client) Health(_ context.Context) (*ctypes.ResultHealth, error) { return c.env.Health(&rpctypes.Context{}) } -func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (c Client) DialSeeds(_ context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) } func (c Client) DialPeers( - ctx context.Context, + _ context.Context, peers []string, persistent, unconditional, @@ -153,30 +154,30 @@ func (c Client) DialPeers( return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) } -func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c Client) BlockchainInfo(_ context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } -func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c Client) Genesis(context.Context) (*ctypes.ResultGenesis, error) { return c.env.Genesis(&rpctypes.Context{}) } -func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c Client) Block(_ context.Context, height *int64) (*ctypes.ResultBlock, error) { return c.env.Block(&rpctypes.Context{}, height) } -func (c Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (c Client) BlockByHash(_ context.Context, hash []byte) (*ctypes.ResultBlock, error) { return c.env.BlockByHash(&rpctypes.Context{}, hash) } -func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c Client) Commit(_ context.Context, height *int64) (*ctypes.ResultCommit, error) { return c.env.Commit(&rpctypes.Context{}, height) } -func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c Client) Validators(_ context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { return c.env.Validators(&rpctypes.Context{}, height, page, perPage) } -func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c Client) BroadcastEvidence(_ context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) } diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index a68bcf0d7aa..69b60674778 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -17,7 +17,7 @@ var ( _ client.StatusClient = (*StatusRecorder)(nil) ) -func (m *StatusMock) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (m *StatusMock) Status(context.Context) (*ctypes.ResultStatus, error) { res, err := m.GetResponse(nil) if err != nil { return nil, err diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 3152c080df7..cdf7fad22c1 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -13,7 +13,7 @@ import ( // ABCIQuery queries the application for some information. // More: https://docs.cometbft.com/main/rpc/#/ABCI/abci_query func (env *Environment) ABCIQuery( - ctx *rpctypes.Context, + _ *rpctypes.Context, path string, data bytes.HexBytes, height int64, @@ -34,7 +34,7 @@ func (env *Environment) ABCIQuery( // ABCIInfo gets some info about the application. // More: https://docs.cometbft.com/main/rpc/#/ABCI/abci_info -func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { +func (env *Environment) ABCIInfo(_ *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { resInfo, err := env.ProxyAppQuery.Info(context.TODO(), proxy.RequestInfo) if err != nil { return nil, err diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index ce8b1871b5b..a29680b798b 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -25,9 +25,9 @@ import ( // // More: https://docs.cometbft.com/main/rpc/#/Info/blockchain func (env *Environment) BlockchainInfo( - ctx *rpctypes.Context, - minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - + _ *rpctypes.Context, + minHeight, maxHeight int64, +) (*ctypes.ResultBlockchainInfo, error) { const limit int64 = 20 var err error minHeight, maxHeight, err = filterMinMax( @@ -49,7 +49,8 @@ func (env *Environment) BlockchainInfo( return &ctypes.ResultBlockchainInfo{ LastHeight: env.BlockStore.Height(), - BlockMetas: blockMetas}, nil + BlockMetas: blockMetas, + }, nil } // error if either min or max are negative or min > max @@ -88,7 +89,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Header gets block header at a given height. // If no height is provided, it will fetch the latest header. // More: https://docs.cometbft.com/main/rpc/#/Info/header -func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) { +func (env *Environment) Header(_ *rpctypes.Context, heightPtr *int64) (*ctypes.ResultHeader, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -104,7 +105,7 @@ func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // HeaderByHash gets header by hash. // More: https://docs.cometbft.com/main/rpc/#/Info/header_by_hash -func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { +func (env *Environment) HeaderByHash(_ *rpctypes.Context, hash bytes.HexBytes) (*ctypes.ResultHeader, error) { // N.B. The hash parameter is HexBytes so that the reflective parameter // decoding logic in the HTTP service will correctly translate from JSON. // See https://github.com/tendermint/tendermint/issues/6802 for context. @@ -120,7 +121,7 @@ func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) // Block gets block at a given height. // If no height is provided, it will fetch the latest block. // More: https://docs.cometbft.com/main/rpc/#/Info/block -func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { +func (env *Environment) Block(_ *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -136,7 +137,7 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes. // BlockByHash gets block by hash. // More: https://docs.cometbft.com/main/rpc/#/Info/block_by_hash -func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (env *Environment) BlockByHash(_ *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { block := env.BlockStore.LoadBlockByHash(hash) if block == nil { return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil @@ -149,7 +150,7 @@ func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.cometbft.com/main/rpc/#/Info/commit -func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { +func (env *Environment) Commit(_ *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -180,7 +181,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // Thus response.results.deliver_tx[5] is the results of executing // getBlock(h).Txs[5] // More: https://docs.cometbft.com/main/rpc/#/Info/block_results -func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { +func (env *Environment) BlockResults(_ *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -208,7 +209,6 @@ func (env *Environment) BlockSearch( pagePtr, perPagePtr *int, orderBy string, ) (*ctypes.ResultBlockSearch, error) { - // skip if block indexing is disabled if _, ok := env.BlockIndexer.(*blockidxnull.BlockerIndexer); ok { return nil, errors.New("block indexing is disabled") diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index b39c090ff3e..f2e3df0d24c 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -16,10 +16,10 @@ import ( // // More: https://docs.cometbft.com/main/rpc/#/Info/validators func (env *Environment) Validators( - ctx *rpctypes.Context, + _ *rpctypes.Context, heightPtr *int64, - pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { - + pagePtr, perPagePtr *int, +) (*ctypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) if err != nil { @@ -46,13 +46,14 @@ func (env *Environment) Validators( BlockHeight: height, Validators: v, Count: len(v), - Total: totalCount}, nil + Total: totalCount, + }, nil } // DumpConsensusState dumps consensus state. // UNSTABLE // More: https://docs.cometbft.com/main/rpc/#/Info/dump_consensus_state -func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { +func (env *Environment) DumpConsensusState(*rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. peers := env.P2PPeers.Peers().List() peerStates := make([]ctypes.PeerStateInfo, len(peers)) @@ -79,13 +80,14 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul } return &ctypes.ResultDumpConsensusState{ RoundState: roundState, - Peers: peerStates}, nil + Peers: peerStates, + }, nil } // ConsensusState returns a concise summary of the consensus state. // UNSTABLE // More: https://docs.cometbft.com/main/rpc/#/Info/consensus_state -func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { +func (env *Environment) GetConsensusState(*rpctypes.Context) (*ctypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() return &ctypes.ResultConsensusState{RoundState: bz}, err @@ -95,9 +97,9 @@ func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.Result // If no height is provided, it will fetch the latest consensus params. // More: https://docs.cometbft.com/main/rpc/#/Info/consensus_params func (env *Environment) ConsensusParams( - ctx *rpctypes.Context, - heightPtr *int64) (*ctypes.ResultConsensusParams, error) { - + _ *rpctypes.Context, + heightPtr *int64, +) (*ctypes.ResultConsensusParams, error) { // The latest consensus params that we know is the consensus params after the // last block. height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) @@ -111,5 +113,6 @@ func (env *Environment) ConsensusParams( } return &ctypes.ResultConsensusParams{ BlockHeight: height, - ConsensusParams: consensusParams}, nil + ConsensusParams: consensusParams, + }, nil } diff --git a/rpc/core/dev.go b/rpc/core/dev.go index 90f035531f8..389c96ee03c 100644 --- a/rpc/core/dev.go +++ b/rpc/core/dev.go @@ -6,7 +6,7 @@ import ( ) // UnsafeFlushMempool removes all transactions from the mempool. -func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(*rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() return &ctypes.ResultUnsafeFlushMempool{}, nil } diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index 38bb862562e..a71a1f9b1e3 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -12,9 +12,9 @@ import ( // BroadcastEvidence broadcasts evidence of the misbehavior. // More: https://docs.cometbft.com/main/rpc/#/Evidence/broadcast_evidence func (env *Environment) BroadcastEvidence( - ctx *rpctypes.Context, - ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { - + _ *rpctypes.Context, + ev types.Evidence, +) (*ctypes.ResultBroadcastEvidence, error) { if ev == nil { return nil, errors.New("no evidence was provided") } diff --git a/rpc/core/health.go b/rpc/core/health.go index 322e6af7aa9..c2918970ce0 100644 --- a/rpc/core/health.go +++ b/rpc/core/health.go @@ -8,6 +8,6 @@ import ( // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. // More: https://docs.cometbft.com/main/rpc/#/Info/health -func (env *Environment) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { +func (env *Environment) Health(*rpctypes.Context) (*ctypes.ResultHealth, error) { return &ctypes.ResultHealth{}, nil } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 17e2c323910..aa05f33ec40 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -19,9 +19,8 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor transaction results. // More: https://docs.cometbft.com/main/rpc/#/Tx/broadcast_tx_async -func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (env *Environment) BroadcastTxAsync(_ *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { err := env.Mempool.CheckTx(tx, nil, mempl.TxInfo{}) - if err != nil { return nil, err } @@ -38,7 +37,6 @@ func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ct case <-ctx.Context().Done(): case resCh <- res: } - }, mempl.TxInfo{}) if err != nil { return nil, err @@ -148,7 +146,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // including their number. // More: https://docs.cometbft.com/main/rpc/#/Info/unconfirmed_txs -func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (env *Environment) UnconfirmedTxs(_ *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { // reuse per_page validator limit := env.validatePerPage(limitPtr) @@ -157,22 +155,24 @@ func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*c Count: len(txs), Total: env.Mempool.Size(), TotalBytes: env.Mempool.SizeBytes(), - Txs: txs}, nil + Txs: txs, + }, nil } // NumUnconfirmedTxs gets number of unconfirmed transactions. // More: https://docs.cometbft.com/main/rpc/#/Info/num_unconfirmed_txs -func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (env *Environment) NumUnconfirmedTxs(*rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { return &ctypes.ResultUnconfirmedTxs{ Count: env.Mempool.Size(), Total: env.Mempool.Size(), - TotalBytes: env.Mempool.SizeBytes()}, nil + TotalBytes: env.Mempool.SizeBytes(), + }, nil } // CheckTx checks the transaction without executing it. The transaction won't // be added to the mempool either. // More: https://docs.cometbft.com/main/rpc/#/Tx/check_tx -func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (env *Environment) CheckTx(_ *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { res, err := env.ProxyAppMempool.CheckTx(context.TODO(), &abci.RequestCheckTx{Tx: tx}) if err != nil { return nil, err diff --git a/rpc/core/net.go b/rpc/core/net.go index 0a619910e61..12ed51a769d 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -12,7 +12,7 @@ import ( // NetInfo returns network info. // More: https://docs.cometbft.com/main/rpc/#/Info/net_info -func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { +func (env *Environment) NetInfo(*rpctypes.Context) (*ctypes.ResultNetInfo, error) { peersList := env.P2PPeers.Peers().List() peers := make([]ctypes.Peer, 0, len(peersList)) for _, peer := range peersList { @@ -39,7 +39,7 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, e } // UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { +func (env *Environment) UnsafeDialSeeds(_ *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { if len(seeds) == 0 { return &ctypes.ResultDialSeeds{}, errors.New("no seeds provided") } @@ -53,10 +53,10 @@ func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) ( // UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), // optionally making them persistent. func (env *Environment) UnsafeDialPeers( - ctx *rpctypes.Context, + _ *rpctypes.Context, peers []string, - persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) { - + persistent, unconditional, private bool, +) (*ctypes.ResultDialPeers, error) { if len(peers) == 0 { return &ctypes.ResultDialPeers{}, errors.New("no peers provided") } @@ -96,7 +96,7 @@ func (env *Environment) UnsafeDialPeers( // Genesis returns genesis file. // More: https://docs.cometbft.com/main/rpc/#/Info/genesis -func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { +func (env *Environment) Genesis(*rpctypes.Context) (*ctypes.ResultGenesis, error) { if len(env.genChunks) > 1 { return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") } @@ -104,7 +104,7 @@ func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, e return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil } -func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { +func (env *Environment) GenesisChunked(_ *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { if env.genChunks == nil { return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") } diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go index 29feccecb7a..5791621129e 100644 --- a/rpc/core/net_test.go +++ b/rpc/core/net_test.go @@ -13,7 +13,7 @@ import ( ) func TestUnsafeDialSeeds(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) err := sw.Start() require.NoError(t, err) @@ -48,7 +48,7 @@ func TestUnsafeDialSeeds(t *testing.T) { } func TestUnsafeDialPeers(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", + sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, func(n int, sw *p2p.Switch) *p2p.Switch { return sw }) sw.SetAddrBook(&p2p.AddrBookMock{ Addrs: make(map[string]struct{}), diff --git a/rpc/core/status.go b/rpc/core/status.go index 29cba7cbeed..6f56d1a9cdc 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -13,7 +13,7 @@ import ( // Status returns CometBFT status including node info, pubkey, latest block // hash, app hash, block height and time. // More: https://docs.cometbft.com/main/rpc/#/Info/status -func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { +func (env *Environment) Status(*rpctypes.Context) (*ctypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash cmtbytes.HexBytes diff --git a/rpc/core/tx.go b/rpc/core/tx.go index d84ed3a955a..710cbb46441 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -17,7 +17,7 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.cometbft.com/main/rpc/#/Info/tx -func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (env *Environment) Tx(_ *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { // if index is disabled, return error if _, ok := env.TxIndexer.(*null.TxIndex); ok { return nil, fmt.Errorf("transaction indexing is disabled") @@ -58,7 +58,6 @@ func (env *Environment) TxSearch( pagePtr, perPagePtr *int, orderBy string, ) (*ctypes.ResultTxSearch, error) { - // if index is disabled, return error if _, ok := env.TxIndexer.(*null.TxIndex); ok { return nil, errors.New("transaction indexing is disabled") diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index 08031991dc4..d237953b1a6 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -12,12 +12,12 @@ type broadcastAPI struct { env *core.Environment } -func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { +func (bapi *broadcastAPI) Ping(context.Context, *RequestPing) (*ResponsePing, error) { // kvstore so we can check if the server is up return &ResponsePing{}, nil } -func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { +func (bapi *broadcastAPI) BroadcastTx(_ context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { // NOTE: there's no way to get client's remote address // see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go res, err := bapi.env.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 3856d3ecca1..3d89db88ffa 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -35,6 +35,6 @@ func StartGRPCClient(protoAddr string) BroadcastAPIClient { return NewBroadcastAPIClient(conn) } -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { +func dialerFunc(_ context.Context, addr string) (net.Conn, error) { return cmtnet.Connect(addr) } diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 093b0491fef..1f12c817a0d 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -71,27 +71,27 @@ var Routes = map[string]*server.RPCFunc{ "echo_default": server.NewRPCFunc(EchoWithDefault, "arg", server.Cacheable("arg")), } -func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoResult(_ *types.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoWSResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoWSResult(_ *types.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoIntResult(ctx *types.Context, v int) (*ResultEchoInt, error) { +func EchoIntResult(_ *types.Context, v int) (*ResultEchoInt, error) { return &ResultEchoInt{v}, nil } -func EchoBytesResult(ctx *types.Context, v []byte) (*ResultEchoBytes, error) { +func EchoBytesResult(_ *types.Context, v []byte) (*ResultEchoBytes, error) { return &ResultEchoBytes{v}, nil } -func EchoDataBytesResult(ctx *types.Context, v cmtbytes.HexBytes) (*ResultEchoDataBytes, error) { +func EchoDataBytesResult(_ *types.Context, v cmtbytes.HexBytes) (*ResultEchoDataBytes, error) { return &ResultEchoDataBytes{v}, nil } -func EchoWithDefault(ctx *types.Context, v *int) (*ResultEchoWithDefault, error) { +func EchoWithDefault(_ *types.Context, v *int) (*ResultEchoWithDefault, error) { val := -1 if v != nil { val = *v diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 2d29cb4bbcc..9bb7948b5dd 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -195,17 +195,16 @@ func TestRPCNotificationInBatch(t *testing.T) { if tt.expectCount > 1 { t.Errorf("#%d: expected an array, couldn't unmarshal it\nblob: %s", i, blob) continue - } else { - // we were expecting an error here, so let's unmarshal a single response - var response types.RPCResponse - err = json.Unmarshal(blob, &response) - if err != nil { - t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) - continue - } - // have a single-element result - responses = []types.RPCResponse{response} } + // we were expecting an error here, so let's unmarshal a single response + var response types.RPCResponse + err = json.Unmarshal(blob, &response) + if err != nil { + t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) + continue + } + // have a single-element result + responses = []types.RPCResponse{response} } if tt.expectCount != len(responses) { t.Errorf("#%d: expected %d response(s), but got %d\nblob: %s", i, tt.expectCount, len(responses), blob) diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index b3cb9cf03c5..74ed3495185 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -15,7 +15,7 @@ var routes = map[string]*rpcserver.RPCFunc{ "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name,num"), } -func HelloWorld(ctx *rpctypes.Context, name string, num int) (Result, error) { +func HelloWorld(_ *rpctypes.Context, name string, num int) (Result, error) { return Result{fmt.Sprintf("hi %s %d", name, num)}, nil } diff --git a/scripts/metricsgen/metricsgen.go b/scripts/metricsgen/metricsgen.go index 1da45cb83f7..eb1163ca78c 100644 --- a/scripts/metricsgen/metricsgen.go +++ b/scripts/metricsgen/metricsgen.go @@ -145,6 +145,7 @@ func main() { log.Fatalf("Generating code: %v", err) } } + func ignoreTestFiles(f fs.FileInfo) bool { return !strings.Contains(f.Name(), "_test.go") } @@ -168,6 +169,8 @@ func ParseMetricsDir(dir string, structName string) (TemplateData, error) { // Grab the package name. var pkgName string var pkg *ast.Package + // TODO(thane): Figure out a more readable way of implementing this. + //nolint:revive for pkgName, pkg = range d { } td := TemplateData{ @@ -210,9 +213,7 @@ func GenerateMetricsFile(w io.Writer, td TemplateData) error { } func findMetricsStruct(files map[string]*ast.File, structName string) (*ast.StructType, string, error) { - var ( - st *ast.StructType - ) + var st *ast.StructType for _, file := range files { mPkgName, err := extractMetricsPackageName(file.Imports) if err != nil { diff --git a/state/helpers_test.go b/state/helpers_test.go index f0de48bbadc..57cecd87962 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "testing" "time" dbm "github.com/cometbft/cometbft-db" @@ -169,7 +168,6 @@ func genValSet(size int) *types.ValidatorSet { } func makeHeaderPartsResponsesValPubKeyChange( - t *testing.T, state sm.State, pubkey crypto.PubKey, ) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { @@ -188,7 +186,6 @@ func makeHeaderPartsResponsesValPubKeyChange( } func makeHeaderPartsResponsesValPowerChange( - t *testing.T, state sm.State, power int64, ) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { @@ -207,7 +204,6 @@ func makeHeaderPartsResponsesValPowerChange( } func makeHeaderPartsResponsesParams( - t *testing.T, state sm.State, params cmtproto.ConsensusParams, ) (types.Header, types.BlockID, *abci.ResponseFinalizeBlock) { diff --git a/state/indexer/block/kv/util.go b/state/indexer/block/kv/util.go index 9ccb84720fa..a39821eb9f2 100644 --- a/state/indexer/block/kv/util.go +++ b/state/indexer/block/kv/util.go @@ -147,12 +147,11 @@ func dedupHeight(conditions []syntax.Condition) (dedupConditions []syntax.Condit if c.Op == syntax.TEq { if found || heightRangeExists { continue - } else { - heightCondition = append(heightCondition, c) - heightInfo.height = int64(c.Arg.Number()) - - found = true } + heightCondition = append(heightCondition, c) + heightInfo.height = int64(c.Arg.Number()) + + found = true } else { heightInfo.onlyHeightEq = false heightRangeExists = true diff --git a/state/indexer/block/null/null.go b/state/indexer/block/null/null.go index 2af842c74a8..a8d63cd76ea 100644 --- a/state/indexer/block/null/null.go +++ b/state/indexer/block/null/null.go @@ -14,7 +14,7 @@ var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) // TxIndex implements a no-op block indexer. type BlockerIndexer struct{} -func (idx *BlockerIndexer) Has(height int64) (bool, error) { +func (idx *BlockerIndexer) Has(int64) (bool, error) { return false, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } @@ -22,6 +22,6 @@ func (idx *BlockerIndexer) Index(types.EventDataNewBlockEvents) error { return nil } -func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { +func (idx *BlockerIndexer) Search(context.Context, *query.Query) ([]int64, error) { return []int64{}, nil } diff --git a/state/indexer/sink/psql/backport.go b/state/indexer/sink/psql/backport.go index 429cf03d310..01d7e1bc994 100644 --- a/state/indexer/sink/psql/backport.go +++ b/state/indexer/sink/psql/backport.go @@ -70,7 +70,7 @@ type BackportBlockIndexer struct{ psql *EventSink } // Has is implemented to satisfy the BlockIndexer interface, but it is not // supported by the psql event sink and reports an error for all inputs. -func (BackportBlockIndexer) Has(height int64) (bool, error) { +func (BackportBlockIndexer) Has(_ int64) (bool, error) { return false, errors.New("the BlockIndexer.Has method is not supported") } diff --git a/state/indexer/sink/psql/psql.go b/state/indexer/sink/psql/psql.go index 79c641b0790..a4845ce3048 100644 --- a/state/indexer/sink/psql/psql.go +++ b/state/indexer/sink/psql/psql.go @@ -219,7 +219,6 @@ INSERT INTO `+tableTxResults+` (block_id, index, created_at, tx_hash, tx_result) return fmt.Errorf("indexing transaction events: %w", err) } return nil - }); err != nil { return err } @@ -228,22 +227,22 @@ INSERT INTO `+tableTxResults+` (block_id, index, created_at, tx_hash, tx_result) } // SearchBlockEvents is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) { +func (es *EventSink) SearchBlockEvents(_ context.Context, _ *query.Query) ([]int64, error) { return nil, errors.New("block search is not supported via the postgres event sink") } // SearchTxEvents is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { +func (es *EventSink) SearchTxEvents(_ context.Context, _ *query.Query) ([]*abci.TxResult, error) { return nil, errors.New("tx search is not supported via the postgres event sink") } // GetTxByHash is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) { +func (es *EventSink) GetTxByHash(_ []byte) (*abci.TxResult, error) { return nil, errors.New("getTxByHash is not supported via the postgres event sink") } // HasBlock is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) HasBlock(h int64) (bool, error) { +func (es *EventSink) HasBlock(_ int64) (bool, error) { return false, errors.New("hasBlock is not supported via the postgres event sink") } diff --git a/state/services.go b/state/services.go index b1506e9efb4..280a945668f 100644 --- a/state/services.go +++ b/state/services.go @@ -59,10 +59,10 @@ type EvidencePool interface { // to the consensus evidence pool interface type EmptyEvidencePool struct{} -func (EmptyEvidencePool) PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) { +func (EmptyEvidencePool) PendingEvidence(int64) (ev []types.Evidence, size int64) { return nil, 0 } func (EmptyEvidencePool) AddEvidence(types.Evidence) error { return nil } func (EmptyEvidencePool) Update(State, types.EvidenceList) {} -func (EmptyEvidencePool) CheckEvidence(evList types.EvidenceList) error { return nil } -func (EmptyEvidencePool) ReportConflictingVotes(voteA, voteB *types.Vote) {} +func (EmptyEvidencePool) CheckEvidence(types.EvidenceList) error { return nil } +func (EmptyEvidencePool) ReportConflictingVotes(*types.Vote, *types.Vote) {} diff --git a/state/state_test.go b/state/state_test.go index 2e6be6ea915..69e5f33ae3c 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -277,7 +277,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { changeIndex++ power++ } - header, blockID, responses := makeHeaderPartsResponsesValPowerChange(t, state, power) + header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, power) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) @@ -953,7 +953,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { pubkey := ed25519.GenPrivKey().PubKey() // Swap the first validator with a new one (validator set size stays the same). - header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(t, state, pubkey) + header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey) // Save state etc. var validatorUpdates []*types.Validator @@ -1036,7 +1036,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex++ cp = params[changeIndex] } - header, blockID, responses := makeHeaderPartsResponsesParams(t, state, cp.ToProto()) + header, blockID, responses := makeHeaderPartsResponsesParams(state, cp.ToProto()) validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 899a099892e..ec694ea8f4a 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -155,7 +155,6 @@ func TestTxSearch(t *testing.T) { } func TestTxSearchEventMatch(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ @@ -251,6 +250,7 @@ func TestTxSearchEventMatch(t *testing.T) { }) } } + func TestTxSearchWithCancelation(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) @@ -421,11 +421,11 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { for _, tc := range testCases { results, err := indexer.Search(ctx, query.MustCompile(tc.q)) assert.NoError(t, err) - len := 0 + n := 0 if tc.found { - len = 1 + n = 1 } - assert.Len(t, results, len) + assert.Len(t, results, n) assert.True(t, !tc.found || proto.Equal(txResult, results[0])) } diff --git a/state/txindex/kv/utils.go b/state/txindex/kv/utils.go index 3f00d342be4..40e2b5d4c42 100644 --- a/state/txindex/kv/utils.go +++ b/state/txindex/kv/utils.go @@ -45,6 +45,7 @@ func ParseEventSeqFromEventKey(key []byte) (int64, error) { return eventSeq, nil } + func dedupHeight(conditions []cmtsyntax.Condition) (dedupConditions []cmtsyntax.Condition, heightInfo HeightInfo) { heightInfo.heightEqIdx = -1 heightRangeExists := false @@ -57,11 +58,10 @@ func dedupHeight(conditions []cmtsyntax.Condition) (dedupConditions []cmtsyntax. if c.Op == cmtsyntax.TEq { if heightRangeExists || found { continue - } else { - found = true - heightCondition = append(heightCondition, c) - heightInfo.height = int64(c.Arg.Number()) } + found = true + heightCondition = append(heightCondition, c) + heightInfo.height = int64(c.Arg.Number()) } else { heightInfo.onlyHeightEq = false heightRangeExists = true diff --git a/state/txindex/null/null.go b/state/txindex/null/null.go index 3e881e826fa..c44a39ebea7 100644 --- a/state/txindex/null/null.go +++ b/state/txindex/null/null.go @@ -15,20 +15,20 @@ var _ txindex.TxIndexer = (*TxIndex)(nil) type TxIndex struct{} // Get on a TxIndex is disabled and panics when invoked. -func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { +func (txi *TxIndex) Get(_ []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } // AddBatch is a noop and always returns nil. -func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { +func (txi *TxIndex) AddBatch(_ *txindex.Batch) error { return nil } // Index is a noop and always returns nil. -func (txi *TxIndex) Index(result *abci.TxResult) error { +func (txi *TxIndex) Index(_ *abci.TxResult) error { return nil } -func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { +func (txi *TxIndex) Search(_ context.Context, _ *query.Query) ([]*abci.TxResult, error) { return []*abci.TxResult{}, nil } diff --git a/statesync/reactor.go b/statesync/reactor.go index dfc911be722..a7374a29182 100644 --- a/statesync/reactor.go +++ b/statesync/reactor.go @@ -47,10 +47,8 @@ func NewReactor( cfg config.StateSyncConfig, conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, - tempDir string, metrics *Metrics, ) *Reactor { - r := &Reactor{ cfg: cfg, conn: conn, @@ -97,7 +95,7 @@ func (r *Reactor) AddPeer(peer p2p.Peer) { } // RemovePeer implements p2p.Reactor. -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { +func (r *Reactor) RemovePeer(peer p2p.Peer, _ interface{}) { r.mtx.RLock() defer r.mtx.RUnlock() if r.syncer != nil { diff --git a/statesync/reactor_test.go b/statesync/reactor_test.go index e5678111d61..a057cb69781 100644 --- a/statesync/reactor_test.go +++ b/statesync/reactor_test.go @@ -26,11 +26,13 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) { "chunk is returned": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, []byte{1, 2, 3}, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}}, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}, + }, "empty chunk is returned, as nil": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, []byte{}, - &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}}, + &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: nil}, + }, "nil (missing) chunk is returned as missing": { &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, nil, @@ -71,7 +73,7 @@ func TestReactor_Receive_ChunkRequest(t *testing.T) { // Start a reactor and send a ssproto.ChunkRequest, then wait for and check response cfg := config.DefaultStateSyncConfig() - r := NewReactor(*cfg, conn, nil, "", NopMetrics()) + r := NewReactor(*cfg, conn, nil, NopMetrics()) err := r.Start() require.NoError(t, err) t.Cleanup(func() { @@ -161,7 +163,7 @@ func TestReactor_Receive_SnapshotsRequest(t *testing.T) { // Start a reactor and send a SnapshotsRequestMessage, then wait for and check responses cfg := config.DefaultStateSyncConfig() - r := NewReactor(*cfg, conn, nil, "", NopMetrics()) + r := NewReactor(*cfg, conn, nil, NopMetrics()) err := r.Start() require.NoError(t, err) t.Cleanup(func() { diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go index 7abef23fd36..4fbb47a2e13 100644 --- a/statesync/syncer_test.go +++ b/statesync/syncer_test.go @@ -29,7 +29,7 @@ import ( const testAppVersion = 9 // Sets up a basic syncer that can be used to test OfferSnapshot requests -func setupOfferSyncer(t *testing.T) (*syncer, *proxymocks.AppConnSnapshot) { +func setupOfferSyncer() (*syncer, *proxymocks.AppConnSnapshot) { connQuery := &proxymocks.AppConnQuery{} connSnapshot := &proxymocks.AppConnSnapshot{} stateProvider := &mocks.StateProvider{} @@ -124,17 +124,17 @@ func TestSyncer_SyncAny(t *testing.T) { // Both peers report back with snapshots. One of them also returns a snapshot we don't want, in // format 2, which will be rejected by the ABCI application. - new, err := syncer.AddSnapshot(peerA, s) + isNew, err := syncer.AddSnapshot(peerA, s) require.NoError(t, err) - assert.True(t, new) + assert.True(t, isNew) - new, err = syncer.AddSnapshot(peerB, s) + isNew, err = syncer.AddSnapshot(peerB, s) require.NoError(t, err) - assert.False(t, new) + assert.False(t, isNew) - new, err = syncer.AddSnapshot(peerB, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}) + isNew, err = syncer.AddSnapshot(peerB, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}) require.NoError(t, err) - assert.True(t, new) + assert.True(t, isNew) // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. @@ -232,13 +232,13 @@ func TestSyncer_SyncAny(t *testing.T) { } func TestSyncer_SyncAny_noSnapshots(t *testing.T) { - syncer, _ := setupOfferSyncer(t) + syncer, _ := setupOfferSyncer() _, _, err := syncer.SyncAny(0, func() {}) assert.Equal(t, errNoSnapshots, err) } func TestSyncer_SyncAny_abort(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} _, err := syncer.AddSnapshot(simplePeer("id"), s) @@ -253,7 +253,7 @@ func TestSyncer_SyncAny_abort(t *testing.T) { } func TestSyncer_SyncAny_reject(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() // s22 is tried first, then s12, then s11, then errNoSnapshots s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -284,7 +284,7 @@ func TestSyncer_SyncAny_reject(t *testing.T) { } func TestSyncer_SyncAny_reject_format(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() // s22 is tried first, which reject s22 and s12, then s11 will abort. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -311,7 +311,7 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { } func TestSyncer_SyncAny_reject_sender(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() peerA := simplePeer("a") peerB := simplePeer("b") @@ -349,7 +349,7 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { } func TestSyncer_SyncAny_abciError(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -385,7 +385,7 @@ func TestSyncer_offerSnapshot(t *testing.T) { for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - syncer, connSnapshot := setupOfferSyncer(t) + syncer, connSnapshot := setupOfferSyncer() s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} connSnapshot.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s), @@ -447,7 +447,8 @@ func TestSyncer_applyChunks_Results(t *testing.T) { connSnapshot.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + }, nil) } err = syncer.applyChunks(chunks) diff --git a/store/store_test.go b/store/store_test.go index ca56a7bd036..ae49361ddfe 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -1,7 +1,6 @@ package store import ( - "bytes" "fmt" "os" "runtime/debug" @@ -17,7 +16,6 @@ import ( "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/internal/test" - "github.com/cometbft/cometbft/libs/log" cmtrand "github.com/cometbft/cometbft/libs/rand" cmtstore "github.com/cometbft/cometbft/proto/tendermint/store" cmtversion "github.com/cometbft/cometbft/proto/tendermint/version" @@ -53,7 +51,7 @@ func makeTestExtCommit(height int64, timestamp time.Time) *types.ExtendedCommit } } -func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { +func makeStateAndBlockStore() (sm.State, *BlockStore, cleanupFunc) { config := test.ResetTestRoot("blockchain_reactor_test") // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB()) // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) @@ -138,7 +136,7 @@ func newInMemoryBlockStore() (*BlockStore, dbm.DB) { // TODO: This test should be simplified ... func TestBlockStoreSaveLoadBlock(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + state, bs, cleanup := makeStateAndBlockStore() defer cleanup() require.Equal(t, bs.Base(), int64(0), "initially the base should be zero") require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") @@ -209,7 +207,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { Height: 5, ChainID: "block_test", Time: cmttime.Now(), - ProposerAddress: cmtrand.Bytes(crypto.AddressSize)}, + ProposerAddress: cmtrand.Bytes(crypto.AddressSize), + }, makeTestExtCommit(5, cmttime.Now()).ToCommit(), ), parts: validPartSet, @@ -389,7 +388,7 @@ func TestSaveBlockWithExtendedCommitPanicOnAbsentExtension(t *testing.T) { }, } { t.Run(testCase.name, func(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + state, bs, cleanup := makeStateAndBlockStore() defer cleanup() h := bs.Height() + 1 block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) @@ -430,7 +429,7 @@ func TestLoadBlockExtendedCommit(t *testing.T) { }, } { t.Run(testCase.name, func(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + state, bs, cleanup := makeStateAndBlockStore() defer cleanup() h := bs.Height() + 1 block := state.MakeBlock(h, test.MakeNTxs(h, 10), new(types.Commit), nil, state.Validators.GetProposer().Address) @@ -689,7 +688,7 @@ func TestLoadBlockMetaByHash(t *testing.T) { } func TestBlockFetchAtHeight(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) + state, bs, cleanup := makeStateAndBlockStore() defer cleanup() require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") block := state.MakeBlock(bs.Height()+1, nil, new(types.Commit), nil, state.Validators.GetProposer().Address) diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 8992695f67c..025da74aa5f 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -125,7 +125,7 @@ func NewApplication(cfg *Config) (*Application, error) { } // Info implements ABCI. -func (app *Application) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { +func (app *Application) Info(_ context.Context, _ *abci.RequestInfo) (*abci.ResponseInfo, error) { return &abci.ResponseInfo{ Version: version.ABCIVersion, AppVersion: appVersion, @@ -150,7 +150,7 @@ func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) app.state.Set(prefixReservedKey+suffixVoteExtHeight, strconv.FormatInt(req.ConsensusParams.Abci.VoteExtensionsEnableHeight, 10)) app.logger.Info("setting initial height in app_state", "initial_height", req.InitialHeight) app.state.Set(prefixReservedKey+suffixInitialHeight, strconv.FormatInt(req.InitialHeight, 10)) - //Get validators from genesis + // Get validators from genesis if req.Validators != nil { for _, val := range req.Validators { val := val @@ -187,7 +187,7 @@ func (app *Application) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*a // FinalizeBlock implements ABCI. func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - var txs = make([]*abci.ExecTxResult, len(req.Txs)) + txs := make([]*abci.ExecTxResult, len(req.Txs)) for i, tx := range req.Txs { key, value, err := parseTx(tx) @@ -269,7 +269,7 @@ func (app *Application) Query(_ context.Context, req *abci.RequestQuery) (*abci. } // ListSnapshots implements ABCI. -func (app *Application) ListSnapshots(_ context.Context, req *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { +func (app *Application) ListSnapshots(_ context.Context, _ *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { snapshots, err := app.snapshots.List() if err != nil { panic(err) @@ -336,8 +336,8 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA // The special vote extension-generated transaction must fit within an empty block // and takes precedence over all other transactions coming from the mempool. func (app *Application) PrepareProposal( - _ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - + _ context.Context, req *abci.RequestPrepareProposal, +) (*abci.ResponsePrepareProposal, error) { _, areExtensionsEnabled := app.checkHeightAndExtensions(true, req.Height, "PrepareProposal") txs := make([][]byte, 0, len(req.Txs)+1) @@ -638,7 +638,7 @@ func (app *Application) verifyAndSum( } cve := cmtproto.CanonicalVoteExtension{ Extension: vote.VoteExtension, - Height: currentHeight - 1, //the vote extension was signed in the previous height + Height: currentHeight - 1, // the vote extension was signed in the previous height Round: int64(extCommit.Round), ChainId: chainID, } @@ -728,7 +728,7 @@ func (app *Application) verifyExtensionTx(height int64, payload string) error { return fmt.Errorf("failed to sum and verify in process proposal: %w", err) } - //Final check that the proposer behaved correctly + // Final check that the proposer behaved correctly if int64(expSum) != sum { return fmt.Errorf("sum is not consistent with vote extension payload: %d!=%d", expSum, sum) } diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 7733e06c5a3..c8d27866327 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -171,7 +171,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { manifest.Nodes[fmt.Sprintf("seed%02d", i)] = generateNode( - r, e2e.ModeSeed, 0, manifest.InitialHeight, false) + r, e2e.ModeSeed, 0, false) } // Next, we generate validators. We make sure a BFT quorum of validators start @@ -187,7 +187,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st } name := fmt.Sprintf("validator%02d", i) manifest.Nodes[name] = generateNode( - r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) + r, e2e.ModeValidator, startAt, i <= 2) if startAt == 0 { (*manifest.Validators)[name] = int64(30 + r.Intn(71)) @@ -216,7 +216,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st nextStartAt += 5 } manifest.Nodes[fmt.Sprintf("full%02d", i)] = generateNode( - r, e2e.ModeFull, startAt, manifest.InitialHeight, false) + r, e2e.ModeFull, startAt, false) } // We now set up peer discovery for nodes. Seed nodes are fully meshed with @@ -279,7 +279,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}, upgradeVersion st // here, since we need to know the overall network topology and startup // sequencing. func generateNode( - r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, + r *rand.Rand, mode e2e.Mode, startAt int64, forceArchive bool, ) *e2e.ManifestNode { node := e2e.ManifestNode{ Version: nodeVersions.Choose(r).(string), diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 42646adfb14..ce778bb53ae 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -96,7 +96,7 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } else { var dve *types.DuplicateVoteEvidence dve, err = generateDuplicateVoteEvidence( - ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, + privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, ) if dve.VoteA.Height < testnet.VoteExtensionsEnableHeight { dve.VoteA.Extension = nil @@ -201,7 +201,6 @@ func generateLightClientAttackEvidence( // generateDuplicateVoteEvidence picks a random validator from the val set and // returns duplicate vote evidence against the validator func generateDuplicateVoteEvidence( - ctx context.Context, privVals []types.MockPV, height int64, vals *types.ValidatorSet, diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index 901b6d850e7..9b9fc790044 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -327,11 +327,7 @@ Does not run any perturbations. return err } - if err := Cleanup(cli.testnet); err != nil { - return err - } - - return nil + return Cleanup(cli.testnet) }, }) diff --git a/types/event_bus.go b/types/event_bus.go index 8ed23ab8005..0abb87aa51f 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -232,82 +232,82 @@ func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpd type NopEventBus struct{} func (NopEventBus) Subscribe( - ctx context.Context, - subscriber string, - query cmtpubsub.Query, - out chan<- interface{}, + context.Context, + string, + cmtpubsub.Query, + chan<- interface{}, ) error { return nil } -func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query cmtpubsub.Query) error { +func (NopEventBus) Unsubscribe(context.Context, string, cmtpubsub.Query) error { return nil } -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { +func (NopEventBus) UnsubscribeAll(context.Context, string) error { return nil } -func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { +func (NopEventBus) PublishEventNewBlock(EventDataNewBlock) error { return nil } -func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { +func (NopEventBus) PublishEventNewBlockHeader(EventDataNewBlockHeader) error { return nil } -func (NopEventBus) PublishEventNewBlockEvents(data EventDataNewBlockEvents) error { +func (NopEventBus) PublishEventNewBlockEvents(EventDataNewBlockEvents) error { return nil } -func (NopEventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { +func (NopEventBus) PublishEventNewEvidence(EventDataNewEvidence) error { return nil } -func (NopEventBus) PublishEventVote(data EventDataVote) error { +func (NopEventBus) PublishEventVote(EventDataVote) error { return nil } -func (NopEventBus) PublishEventTx(data EventDataTx) error { +func (NopEventBus) PublishEventTx(EventDataTx) error { return nil } -func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { +func (NopEventBus) PublishEventNewRoundStep(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { +func (NopEventBus) PublishEventTimeoutPropose(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { +func (NopEventBus) PublishEventTimeoutWait(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { +func (NopEventBus) PublishEventNewRound(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { +func (NopEventBus) PublishEventCompleteProposal(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { +func (NopEventBus) PublishEventPolka(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { +func (NopEventBus) PublishEventUnlock(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { +func (NopEventBus) PublishEventRelock(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventLock(data EventDataRoundState) error { +func (NopEventBus) PublishEventLock(EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { +func (NopEventBus) PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates) error { return nil } diff --git a/types/priv_validator.go b/types/priv_validator.go index b12dd6e6765..340794d00c5 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -142,12 +142,12 @@ type ErroringMockPV struct { var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") // Implements PrivValidator. -func (pv *ErroringMockPV) SignVote(chainID string, vote *cmtproto.Vote) error { +func (pv *ErroringMockPV) SignVote(string, *cmtproto.Vote) error { return ErroringMockPVErr } // Implements PrivValidator. -func (pv *ErroringMockPV) SignProposal(chainID string, proposal *cmtproto.Proposal) error { +func (pv *ErroringMockPV) SignProposal(string, *cmtproto.Proposal) error { return ErroringMockPVErr } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 375b304d282..73a2c02a16b 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -14,11 +14,11 @@ import ( func TestABCIPubKey(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() - err := testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519) + err := testABCIPubKey(t, pkEd) assert.NoError(t, err) } -func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) error { +func testABCIPubKey(t *testing.T, pk crypto.PubKey) error { abciPubKey, err := cryptoenc.PubKeyToProto(pk) require.NoError(t, err) pk2, err := cryptoenc.PubKeyFromProto(abciPubKey) @@ -54,12 +54,12 @@ func TestABCIValidators(t *testing.T) { type pubKeyEddie struct{} -func (pubKeyEddie) Address() Address { return []byte{} } -func (pubKeyEddie) Bytes() []byte { return []byte{} } -func (pubKeyEddie) VerifySignature(msg []byte, sig []byte) bool { return false } -func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } -func (pubKeyEddie) String() string { return "" } -func (pubKeyEddie) Type() string { return "pubKeyEddie" } +func (pubKeyEddie) Address() Address { return []byte{} } +func (pubKeyEddie) Bytes() []byte { return []byte{} } +func (pubKeyEddie) VerifySignature([]byte, []byte) bool { return false } +func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } +func (pubKeyEddie) String() string { return "" } +func (pubKeyEddie) Type() string { return "pubKeyEddie" } func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { pubkey := ed25519.GenPrivKey().PubKey() diff --git a/types/validator_set.go b/types/validator_set.go index 4b509d605a1..330d540baf4 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -105,9 +105,9 @@ func (vals *ValidatorSet) IsNilOrEmpty() bool { // CopyIncrementProposerPriority increments ProposerPriority and updates the // proposer on a copy, and returns it. func (vals *ValidatorSet) CopyIncrementProposerPriority(times int32) *ValidatorSet { - copy := vals.Copy() - copy.IncrementProposerPriority(times) - return copy + cp := vals.Copy() + cp.IncrementProposerPriority(times) + return cp } // IncrementProposerPriority increments ProposerPriority of each validator and @@ -429,7 +429,6 @@ func verifyUpdates( vals *ValidatorSet, removedPower int64, ) (tvpAfterUpdatesBeforeRemovals int64, err error) { - delta := func(update *Validator, vals *ValidatorSet) int64 { _, val := vals.GetByAddress(update.Address) if val != nil { @@ -493,7 +492,6 @@ func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotal valUpdate.ProposerPriority = val.ProposerPriority } } - } // Merges the vals' validator list with the updates list. @@ -660,7 +658,8 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { // VerifyCommit verifies +2/3 of the set had signed the given commit and all // other signatures are valid func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, - height int64, commit *Commit) error { + height int64, commit *Commit, +) error { return VerifyCommit(chainID, vals, blockID, height, commit) } @@ -668,7 +667,8 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, // VerifyCommitLight verifies +2/3 of the set had signed the given commit. func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, - height int64, commit *Commit) error { + height int64, commit *Commit, +) error { return VerifyCommitLight(chainID, vals, blockID, height, commit) } @@ -745,7 +745,6 @@ func (vals *ValidatorSet) StringIndented(indent string) string { indent, indent, strings.Join(valStrings, "\n"+indent+" "), indent) - } //------------------------------------- diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 11f1ab24834..04cc26da669 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -45,9 +45,11 @@ func TestValidatorSetBasic(t *testing.T) { assert.Zero(t, vset.Size()) assert.Equal(t, int64(0), vset.TotalVotingPower()) assert.Nil(t, vset.GetProposer()) - assert.Equal(t, []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, + assert.Equal(t, []byte{ + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, - 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, vset.Hash()) + 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, + }, vset.Hash()) // add val = randValidator(vset.TotalVotingPower()) assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) @@ -74,7 +76,6 @@ func TestValidatorSetBasic(t *testing.T) { assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) _, val = vset.GetByAddress(val.Address) assert.Equal(t, proposerPriority, val.ProposerPriority) - } func TestValidatorSetValidateBasic(t *testing.T) { @@ -132,7 +133,6 @@ func TestValidatorSetValidateBasic(t *testing.T) { assert.NoError(t, err) } } - } func TestCopy(t *testing.T) { @@ -478,28 +478,39 @@ func TestAveragingInIncrementProposerPriority(t *testing.T) { times int32 avg int64 }{ - 0: {ValidatorSet{ - Validators: []*Validator{ - {Address: []byte("a"), ProposerPriority: 1}, - {Address: []byte("b"), ProposerPriority: 2}, - {Address: []byte("c"), ProposerPriority: 3}}}, - 1, 2}, - 1: {ValidatorSet{ - Validators: []*Validator{ - {Address: []byte("a"), ProposerPriority: 10}, - {Address: []byte("b"), ProposerPriority: -10}, - {Address: []byte("c"), ProposerPriority: 1}}}, + 0: { + ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 1}, + {Address: []byte("b"), ProposerPriority: 2}, + {Address: []byte("c"), ProposerPriority: 3}, + }, + }, + 1, 2, + }, + 1: { + ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 10}, + {Address: []byte("b"), ProposerPriority: -10}, + {Address: []byte("c"), ProposerPriority: 1}, + }, + }, // this should average twice but the average should be 0 after the first iteration // (voting power is 0 -> no changes) 11, 0, // 1 / 3 }, - 2: {ValidatorSet{ - Validators: []*Validator{ - {Address: []byte("a"), ProposerPriority: 100}, - {Address: []byte("b"), ProposerPriority: -10}, - {Address: []byte("c"), ProposerPriority: 1}}}, - 1, 91 / 3}, + 2: { + ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 100}, + {Address: []byte("b"), ProposerPriority: -10}, + {Address: []byte("c"), ProposerPriority: 1}, + }, + }, + 1, 91 / 3, + }, } for i, tc := range tcs { // work on copy to have the old ProposerPriorities: @@ -523,103 +534,125 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { vals := ValidatorSet{Validators: []*Validator{ {Address: []byte{0}, ProposerPriority: 0, VotingPower: vp0}, {Address: []byte{1}, ProposerPriority: 0, VotingPower: vp1}, - {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}}} + {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}, + }} tcs := []struct { vals *ValidatorSet wantProposerPrioritys []int64 times int32 wantProposer *Validator }{ - 0: { vals.Copy(), []int64{ // Acumm+VotingPower-Avg: 0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12) 0 + vp1, - 0 + vp2}, + 0 + vp2, + }, 1, - vals.Validators[0]}, + vals.Validators[0], + }, 1: { vals.Copy(), []int64{ (0 + vp0 - total) + vp0 - total - avg, // this will be mostest on 2nd iter, too (0 + vp1) + vp1, - (0 + vp2) + vp2}, + (0 + vp2) + vp2, + }, 2, - vals.Validators[0]}, // increment twice -> expect average to be subtracted twice + vals.Validators[0], + }, // increment twice -> expect average to be subtracted twice 2: { vals.Copy(), []int64{ 0 + 3*(vp0-total) - avg, // still mostest 0 + 3*vp1, - 0 + 3*vp2}, + 0 + 3*vp2, + }, 3, - vals.Validators[0]}, + vals.Validators[0], + }, 3: { vals.Copy(), []int64{ 0 + 4*(vp0-total), // still mostest 0 + 4*vp1, - 0 + 4*vp2}, + 0 + 4*vp2, + }, 4, - vals.Validators[0]}, + vals.Validators[0], + }, 4: { vals.Copy(), []int64{ 0 + 4*(vp0-total) + vp0, // 4 iters was mostest 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower) - 0 + 5*vp2}, + 0 + 5*vp2, + }, 5, - vals.Validators[1]}, + vals.Validators[1], + }, 5: { vals.Copy(), []int64{ 0 + 6*vp0 - 5*total, // mostest again 0 + 6*vp1 - total, // mostest once up to here - 0 + 6*vp2}, + 0 + 6*vp2, + }, 6, - vals.Validators[0]}, + vals.Validators[0], + }, 6: { vals.Copy(), []int64{ 0 + 7*vp0 - 6*total, // in 7 iters this val is mostest 6 times 0 + 7*vp1 - total, // in 7 iters this val is mostest 1 time - 0 + 7*vp2}, + 0 + 7*vp2, + }, 7, - vals.Validators[0]}, + vals.Validators[0], + }, 7: { vals.Copy(), []int64{ 0 + 8*vp0 - 7*total, // mostest again 0 + 8*vp1 - total, - 0 + 8*vp2}, + 0 + 8*vp2, + }, 8, - vals.Validators[0]}, + vals.Validators[0], + }, 8: { vals.Copy(), []int64{ 0 + 9*vp0 - 7*total, 0 + 9*vp1 - total, - 0 + 9*vp2 - total}, // mostest + 0 + 9*vp2 - total, + }, // mostest 9, - vals.Validators[2]}, + vals.Validators[2], + }, 9: { vals.Copy(), []int64{ 0 + 10*vp0 - 8*total, // after 10 iters this is mostest again 0 + 10*vp1 - total, // after 6 iters this val is "mostest" once and not in between - 0 + 10*vp2 - total}, // in between 10 iters this val is "mostest" once + 0 + 10*vp2 - total, + }, // in between 10 iters this val is "mostest" once 10, - vals.Validators[0]}, + vals.Validators[0], + }, 10: { vals.Copy(), []int64{ 0 + 11*vp0 - 9*total, - 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between - 0 + 11*vp2 - total}, // after 10 iters this val is "mostest" once + 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between + 0 + 11*vp2 - total, + }, // after 10 iters this val is "mostest" once 11, - vals.Validators[0]}, + vals.Validators[0], + }, } for i, tc := range tcs { tc.vals.IncrementProposerPriority(tc.times) @@ -665,7 +698,6 @@ func TestSafeSubClip(t *testing.T) { //------------------------------------------------------------------- func TestEmptySet(t *testing.T) { - var valList []*Validator valSet := NewValidatorSet(valList) assert.Panics(t, func() { valSet.IncrementProposerPriority(1) }) @@ -689,11 +721,9 @@ func TestEmptySet(t *testing.T) { // Attempt delete from empty set assert.Error(t, valSet.UpdateWithChangeSet(delList)) - } func TestUpdatesForNewValidatorSet(t *testing.T) { - v1 := newValidator([]byte("v1"), 100) v2 := newValidator([]byte("v2"), 100) valList := []*Validator{v1, v2} @@ -720,7 +750,6 @@ func TestUpdatesForNewValidatorSet(t *testing.T) { v3 = newValidator([]byte("v3"), 30) valList = []*Validator{v1, v2, v3} assert.Panics(t, func() { NewValidatorSet(valList) }) - } type testVal struct { @@ -1010,19 +1039,23 @@ func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) { }{ 0: { // order of changes should not matter, the final validator sets should be the same []testVal{{"v4", 40}, {"v3", 30}, {"v2", 10}, {"v1", 10}}, - []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}}, + []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}, + }, 1: { // order of additions should not matter []testVal{{"v2", 20}, {"v1", 10}}, - []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}}, + []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}, + }, 2: { // order of removals should not matter []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}}, - []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}}, + []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}, + }, 3: { // order of mixed operations should not matter []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}}, - []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}}, + []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}, + }, } for i, tt := range valSetUpdatesOrderTests { @@ -1067,41 +1100,50 @@ func TestValSetApplyUpdatesTestsExecute(t *testing.T) { 0: { // prepend []testVal{{"v4", 44}, {"v5", 55}}, []testVal{{"v1", 11}}, - []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}}, + []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}, + }, 1: { // append []testVal{{"v4", 44}, {"v5", 55}}, []testVal{{"v6", 66}}, - []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}, + }, 2: { // insert []testVal{{"v4", 44}, {"v6", 66}}, []testVal{{"v5", 55}}, - []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}, + }, 3: { // insert multi []testVal{{"v4", 44}, {"v6", 66}, {"v9", 99}}, []testVal{{"v5", 55}, {"v7", 77}, {"v8", 88}}, - []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}}, + []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}, + }, // changes 4: { // head []testVal{{"v1", 111}, {"v2", 22}}, []testVal{{"v1", 11}}, - []testVal{{"v1", 11}, {"v2", 22}}}, + []testVal{{"v1", 11}, {"v2", 22}}, + }, 5: { // tail []testVal{{"v1", 11}, {"v2", 222}}, []testVal{{"v2", 22}}, - []testVal{{"v1", 11}, {"v2", 22}}}, + []testVal{{"v1", 11}, {"v2", 22}}, + }, 6: { // middle []testVal{{"v1", 11}, {"v2", 222}, {"v3", 33}}, []testVal{{"v2", 22}}, - []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}, + }, 7: { // multi []testVal{{"v1", 111}, {"v2", 222}, {"v3", 333}}, []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}, - []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}, + }, // additions and changes 8: { []testVal{{"v1", 111}, {"v2", 22}}, []testVal{{"v1", 11}, {"v3", 33}, {"v4", 44}}, - []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}}, + []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}, + }, } for i, tt := range valSetUpdatesBasicTests { @@ -1127,7 +1169,7 @@ type testVSetCfg struct { expErr error } -func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg { +func randTestVSetCfg(nBase, nAddMax int) testVSetCfg { if nBase <= 0 || nAddMax < 0 { panic(fmt.Sprintf("bad parameters %v %v", nBase, nAddMax)) } @@ -1179,7 +1221,6 @@ func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg { sort.Sort(testValsByVotingPower(cfg.expectedVals)) return cfg - } func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, valsLists ...[]testVal) { @@ -1225,15 +1266,15 @@ func TestValSetUpdatePriorityOrderTests(t *testing.T) { // generate a configuration with 100 validators, // randomly select validators for updates and deletes, and // generate 10 new validators to be added - 3: randTestVSetCfg(t, 100, 10), + 3: randTestVSetCfg(100, 10), - 4: randTestVSetCfg(t, 1000, 100), + 4: randTestVSetCfg(1000, 100), - 5: randTestVSetCfg(t, 10, 100), + 5: randTestVSetCfg(10, 100), - 6: randTestVSetCfg(t, 100, 1000), + 6: randTestVSetCfg(100, 1000), - 7: randTestVSetCfg(t, 1000, 1000), + 7: randTestVSetCfg(1000, 1000), } for _, cfg := range testCases { @@ -1325,28 +1366,56 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { { name: "4 no false overflow error messages for adds, updates and deletes", startVals: []testVal{ - {"v1", MaxTotalVotingPower / 4}, {"v2", MaxTotalVotingPower / 4}, - {"v3", MaxTotalVotingPower / 4}, {"v4", MaxTotalVotingPower / 4}}, + {"v1", MaxTotalVotingPower / 4}, + {"v2", MaxTotalVotingPower / 4}, + {"v3", MaxTotalVotingPower / 4}, + {"v4", MaxTotalVotingPower / 4}, + }, deletedVals: []testVal{{"v2", 0}}, updatedVals: []testVal{ - {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}}, + {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}, + }, addedVals: []testVal{{"v5", 3}}, expectedVals: []testVal{ - {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}}, + {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}, + }, expErr: nil, }, { name: "5 check panic on overflow is prevented: update 8 validators with power int64(math.MaxInt64)/8", startVals: []testVal{ - {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1}, - {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}}, + {"v1", 1}, + {"v2", 1}, + {"v3", 1}, + {"v4", 1}, + {"v5", 1}, + {"v6", 1}, + {"v7", 1}, + {"v8", 1}, + {"v9", 1}, + }, updatedVals: []testVal{ - {"v1", MaxTotalVotingPower}, {"v2", MaxTotalVotingPower}, {"v3", MaxTotalVotingPower}, - {"v4", MaxTotalVotingPower}, {"v5", MaxTotalVotingPower}, {"v6", MaxTotalVotingPower}, - {"v7", MaxTotalVotingPower}, {"v8", MaxTotalVotingPower}, {"v9", 8}}, + {"v1", MaxTotalVotingPower}, + {"v2", MaxTotalVotingPower}, + {"v3", MaxTotalVotingPower}, + {"v4", MaxTotalVotingPower}, + {"v5", MaxTotalVotingPower}, + {"v6", MaxTotalVotingPower}, + {"v7", MaxTotalVotingPower}, + {"v8", MaxTotalVotingPower}, + {"v9", 8}, + }, expectedVals: []testVal{ - {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1}, - {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}}, + {"v1", 1}, + {"v2", 1}, + {"v3", 1}, + {"v4", 1}, + {"v5", 1}, + {"v6", 1}, + {"v7", 1}, + {"v8", 1}, + {"v9", 1}, + }, expErr: ErrTotalVotingPowerOverflow, }, }