diff --git a/cmd/sonictool/app/chain.go b/cmd/sonictool/app/chain.go index 9896502ce..83ab18499 100644 --- a/cmd/sonictool/app/chain.go +++ b/cmd/sonictool/app/chain.go @@ -41,21 +41,21 @@ func exportEvents(ctx *cli.Context) error { defer writer.(*gzip.Writer).Close() } - from := idx.Epoch(1) + from := idx.EpochID(1) if len(ctx.Args()) > 1 { n, err := strconv.ParseUint(ctx.Args().Get(1), 10, 32) if err != nil { return err } - from = idx.Epoch(n) + from = idx.EpochID(n) } - to := idx.Epoch(0) + to := idx.EpochID(0) if len(ctx.Args()) > 2 { n, err := strconv.ParseUint(ctx.Args().Get(2), 10, 32) if err != nil { return err } - to = idx.Epoch(n) + to = idx.EpochID(n) } gdbParams := db.GossipDbParameters{ diff --git a/cmd/sonictool/app/heal.go b/cmd/sonictool/app/heal.go index 5702bf3f1..08f3748e0 100644 --- a/cmd/sonictool/app/heal.go +++ b/cmd/sonictool/app/heal.go @@ -74,7 +74,7 @@ func heal(ctx *cli.Context) error { cancelCtx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) defer stop() - recoveredBlock, err := db.HealChaindata(chaindataDir, cacheRatio, cfg, idx.Block(archiveCheckpointBlock)) + recoveredBlock, err := db.HealChaindata(chaindataDir, cacheRatio, cfg, idx.BlockID(archiveCheckpointBlock)) if err != nil { return err } @@ -93,7 +93,7 @@ func heal(ctx *cli.Context) error { return nil } -func healLiveFromArchive(ctx context.Context, carmenLiveDir, carmenArchiveDir string, recoveredBlock idx.Block) error { +func healLiveFromArchive(ctx context.Context, carmenLiveDir, carmenArchiveDir string, recoveredBlock idx.BlockID) error { if err := os.RemoveAll(carmenLiveDir); err != nil { return fmt.Errorf("failed to remove broken live state: %w", err) } diff --git a/cmd/sonictool/chain/export_events.go b/cmd/sonictool/chain/export_events.go index 492e404c7..9e0e123ee 100644 --- a/cmd/sonictool/chain/export_events.go +++ b/cmd/sonictool/chain/export_events.go @@ -24,7 +24,7 @@ var ( // always print out progress. This avoids the user wondering what's going on. const statsReportLimit = 8 * time.Second -func ExportEvents(gdbParams db.GossipDbParameters, w io.Writer, from, to idx.Epoch) (err error) { +func ExportEvents(gdbParams db.GossipDbParameters, w io.Writer, from, to idx.EpochID) (err error) { chaindataDir := filepath.Join(gdbParams.DataDir, "chaindata") dbs, err := db.MakeDbProducer(chaindataDir, cachescale.Identity) if err != nil { diff --git a/cmd/sonictool/chain/import_events.go b/cmd/sonictool/chain/import_events.go index f08806630..4f57e3914 100644 --- a/cmd/sonictool/chain/import_events.go +++ b/cmd/sonictool/chain/import_events.go @@ -120,7 +120,7 @@ func importEventsFile(srv *gossip.Service, fn string) error { batch := make(inter.EventPayloads, 0, 8*1024) batchSize := 0 maxBatchSize := 8 * 1024 * 1024 - epoch := idx.Epoch(0) + epoch := idx.EpochID(0) txs := 0 events := 0 diff --git a/cmd/sonictool/check/archive.go b/cmd/sonictool/check/archive.go index 8ea8e82e4..4d06b4d14 100644 --- a/cmd/sonictool/check/archive.go +++ b/cmd/sonictool/check/archive.go @@ -43,7 +43,7 @@ func checkArchiveBlockRoots(dataDir string, cacheRatio cachescale.Func) error { invalidBlocks := 0 lastBlockIdx := gdb.GetLatestBlockIndex() - for i := idx.Block(1); i <= lastBlockIdx; i++ { + for i := idx.BlockID(1); i <= lastBlockIdx; i++ { block := gdb.GetBlock(i) if block == nil { return fmt.Errorf("verification failed - unable to get block %d from gdb", i) diff --git a/cmd/sonictool/db/heal.go b/cmd/sonictool/db/heal.go index 42455fa51..bbd95bf7a 100644 --- a/cmd/sonictool/db/heal.go +++ b/cmd/sonictool/db/heal.go @@ -20,7 +20,7 @@ import ( "time" ) -func HealChaindata(chaindataDir string, cacheRatio cachescale.Func, cfg *config.Config, lastCarmenBlock idx.Block) (idx.Block, error) { +func HealChaindata(chaindataDir string, cacheRatio cachescale.Func, cfg *config.Config, lastCarmenBlock idx.BlockID) (idx.BlockID, error) { producer := &DummyScopedProducer{integration.GetRawDbProducer(chaindataDir, integration.DBCacheConfig{ Cache: cacheRatio.U64(480 * opt.MiB), Fdlimit: makeDatabaseHandles(), @@ -43,7 +43,7 @@ func HealChaindata(chaindataDir string, cacheRatio cachescale.Func, cfg *config. if err != nil { return 0, fmt.Errorf("failed to open 'lachesis' database: %w", err) } - cGetEpochDB := func(epoch idx.Epoch) kvdb.Store { + cGetEpochDB := func(epoch idx.EpochID) kvdb.Store { name := fmt.Sprintf("lachesis-%d", epoch) cEpochDB, err := producer.OpenDB(name) if err != nil { @@ -71,8 +71,8 @@ func HealChaindata(chaindataDir string, cacheRatio cachescale.Func, cfg *config. } // healGossipDb reverts the gossip database into state, into which can be reverted carmen -func healGossipDb(producer kvdb.FlushableDBProducer, cfg gossip.StoreConfig, lastCarmenBlock idx.Block) ( - epochState *iblockproc.EpochState, lastBlock idx.Block, err error) { +func healGossipDb(producer kvdb.FlushableDBProducer, cfg gossip.StoreConfig, lastCarmenBlock idx.BlockID) ( + epochState *iblockproc.EpochState, lastBlock idx.BlockID, err error) { gdb, err := gossip.NewStore(producer, cfg) // requires FlushIDKey present (not clean) in all dbs if err != nil { @@ -106,10 +106,10 @@ func healGossipDb(producer kvdb.FlushableDBProducer, cfg gossip.StoreConfig, las } // getLastEpochWithState finds the last closed epoch with the state available -func getLastEpochWithState(gdb *gossip.Store, lastCarmenBlock idx.Block) (epochIdx idx.Epoch, blockState *iblockproc.BlockState, epochState *iblockproc.EpochState) { +func getLastEpochWithState(gdb *gossip.Store, lastCarmenBlock idx.BlockID) (epochIdx idx.EpochID, blockState *iblockproc.BlockState, epochState *iblockproc.EpochState) { currentEpoch := gdb.GetEpoch() - epochsToTry := idx.Epoch(10000) - endEpoch := idx.Epoch(1) + epochsToTry := idx.EpochID(10000) + endEpoch := idx.EpochID(1) if currentEpoch > epochsToTry { endEpoch = currentEpoch - epochsToTry } diff --git a/cmd/sonictool/genesis/export.go b/cmd/sonictool/genesis/export.go index 52d8fe43e..c8bc58ac7 100644 --- a/cmd/sonictool/genesis/export.go +++ b/cmd/sonictool/genesis/export.go @@ -82,7 +82,7 @@ func ExportGenesis(ctx context.Context, gdb *gossip.Store, includeArchive bool, return nil } -func exportEpochsSection(ctx context.Context, gdb *gossip.Store, writer *unitWriter, from, to idx.Epoch) error { +func exportEpochsSection(ctx context.Context, gdb *gossip.Store, writer *unitWriter, from, to idx.EpochID) error { log.Info("Exporting epochs", "from", from, "to", to) for i := to; i >= from; i-- { er := gdb.GetFullEpochRecord(i) @@ -111,7 +111,7 @@ func exportEpochsSection(ctx context.Context, gdb *gossip.Store, writer *unitWri return nil } -func exportBlocksSection(ctx context.Context, gdb *gossip.Store, writer *unitWriter, to idx.Block, maxBlocks int64) error { +func exportBlocksSection(ctx context.Context, gdb *gossip.Store, writer *unitWriter, to idx.BlockID, maxBlocks int64) error { toBlock := int64(to) fromBlock := int64(0) if maxBlocks != 0 && toBlock > 1+maxBlocks { @@ -119,7 +119,7 @@ func exportBlocksSection(ctx context.Context, gdb *gossip.Store, writer *unitWri } log.Info("Exporting blocks", "from", fromBlock, "to", toBlock) for i := toBlock; i >= fromBlock; i-- { - i := idx.Block(i) + i := idx.BlockID(i) br := gdb.GetFullBlockRecord(i) if br == nil { return fmt.Errorf("the block record for block %d is missing in gdb", i) @@ -177,7 +177,7 @@ func exportFwaSection(ctx context.Context, gdb *gossip.Store, writer *unitWriter return nil } -func getEpochBlock(epoch idx.Epoch, store *gossip.Store) idx.Block { +func getEpochBlock(epoch idx.EpochID, store *gossip.Store) idx.BlockID { bs, _ := store.GetHistoryBlockEpochState(epoch) if bs == nil { return 0 diff --git a/cmd/sonictool/genesis/import.go b/cmd/sonictool/genesis/import.go index 88886cf93..d15849c13 100644 --- a/cmd/sonictool/genesis/import.go +++ b/cmd/sonictool/genesis/import.go @@ -60,7 +60,7 @@ func ImportGenesisStore(params ImportParams) error { if err != nil { return err } - cGetEpochDB := func(epoch idx.Epoch) kvdb.Store { + cGetEpochDB := func(epoch idx.EpochID) kvdb.Store { db, err := dbs.OpenDB(fmt.Sprintf("lachesis-%d", epoch)) if err != nil { panic(fmt.Errorf("failed to open epoch db: %w", err)) diff --git a/config/make_node.go b/config/make_node.go index 470e7a9b6..294d6db8e 100644 --- a/config/make_node.go +++ b/config/make_node.go @@ -118,9 +118,9 @@ func MakeNode(ctx *cli.Context, cfg *Config) (*node.Node, *gossip.Service, func( } return evmcore.NewTxPool(cfg.TxPool, reader.Config(), reader) } - haltCheck := func(oldEpoch, newEpoch idx.Epoch, age time.Time) bool { + haltCheck := func(oldEpoch, newEpoch idx.EpochID, age time.Time) bool { stop := ctx.GlobalIsSet(flags.ExitWhenAgeFlag.Name) && ctx.GlobalDuration(flags.ExitWhenAgeFlag.Name) >= time.Since(age) - stop = stop || ctx.GlobalIsSet(flags.ExitWhenEpochFlag.Name) && idx.Epoch(ctx.GlobalUint64(flags.ExitWhenEpochFlag.Name)) <= newEpoch + stop = stop || ctx.GlobalIsSet(flags.ExitWhenEpochFlag.Name) && idx.EpochID(ctx.GlobalUint64(flags.ExitWhenEpochFlag.Name)) <= newEpoch if stop { go func() { // do it in a separate thread to avoid deadlock diff --git a/ethapi/api.go b/ethapi/api.go index 6730c5af0..dbb81ce55 100644 --- a/ethapi/api.go +++ b/ethapi/api.go @@ -133,8 +133,8 @@ func (s *PublicEthereumAPI) FeeHistory(ctx context.Context, blockCount math.HexO return nil, err } oldest := last - if oldest > idx.Block(blockCount) { - oldest -= idx.Block(blockCount - 1) + if oldest > idx.BlockID(blockCount) { + oldest -= idx.BlockID(blockCount - 1) } else { oldest = 0 } diff --git a/ethapi/backend.go b/ethapi/backend.go index 31e6a156c..9db186dcc 100644 --- a/ethapi/backend.go +++ b/ethapi/backend.go @@ -41,12 +41,12 @@ import ( // PeerProgress is synchronization status of a peer type PeerProgress struct { - CurrentEpoch idx.Epoch - CurrentBlock idx.Block + CurrentEpoch idx.EpochID + CurrentBlock idx.BlockID CurrentBlockHash hash.Event CurrentBlockTime inter.Timestamp - HighestBlock idx.Block - HighestEpoch idx.Epoch + HighestBlock idx.BlockID + HighestEpoch idx.EpochID } // Backend interface provides the common API services (that are provided by @@ -70,7 +70,7 @@ type Backend interface { HeaderByHash(ctx context.Context, hash common.Hash) (*evmcore.EvmHeader, error) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*evmcore.EvmBlock, error) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (state.StateDB, *evmcore.EvmHeader, error) - ResolveRpcBlockNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (idx.Block, error) + ResolveRpcBlockNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (idx.BlockID, error) BlockByHash(ctx context.Context, hash common.Hash) (*evmcore.EvmBlock, error) GetReceiptsByNumber(ctx context.Context, number rpc.BlockNumber) (types.Receipts, error) GetEVM(ctx context.Context, msg *core.Message, state vm.StateDB, header *evmcore.EvmHeader, vmConfig *vm.Config) (*vm.EVM, func() error, error) @@ -95,12 +95,12 @@ type Backend interface { GetEventPayload(ctx context.Context, shortEventID string) (*inter.EventPayload, error) GetEvent(ctx context.Context, shortEventID string) (*inter.Event, error) GetHeads(ctx context.Context, epoch rpc.BlockNumber) (hash.Events, error) - CurrentEpoch(ctx context.Context) idx.Epoch + CurrentEpoch(ctx context.Context) idx.EpochID SealedEpochTiming(ctx context.Context) (start inter.Timestamp, end inter.Timestamp) // Lachesis aBFT API GetEpochBlockState(ctx context.Context, epoch rpc.BlockNumber) (*iblockproc.BlockState, *iblockproc.EpochState, error) - GetDowntime(ctx context.Context, vid idx.ValidatorID) (idx.Block, inter.Timestamp, error) + GetDowntime(ctx context.Context, vid idx.ValidatorID) (idx.BlockID, inter.Timestamp, error) GetUptime(ctx context.Context, vid idx.ValidatorID) (*big.Int, error) GetOriginatedFee(ctx context.Context, vid idx.ValidatorID) (*big.Int, error) } diff --git a/ethapi/mock_backend.go b/ethapi/mock_backend.go index a53a224d2..3f73a1fac 100644 --- a/ethapi/mock_backend.go +++ b/ethapi/mock_backend.go @@ -142,10 +142,10 @@ func (mr *MockBackendMockRecorder) CurrentBlock() *gomock.Call { } // CurrentEpoch mocks base method. -func (m *MockBackend) CurrentEpoch(ctx context.Context) idx.Epoch { +func (m *MockBackend) CurrentEpoch(ctx context.Context) idx.EpochID { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CurrentEpoch", ctx) - ret0, _ := ret[0].(idx.Epoch) + ret0, _ := ret[0].(idx.EpochID) return ret0 } @@ -170,10 +170,10 @@ func (mr *MockBackendMockRecorder) ExtRPCEnabled() *gomock.Call { } // GetDowntime mocks base method. -func (m *MockBackend) GetDowntime(ctx context.Context, vid idx.ValidatorID) (idx.Block, inter.Timestamp, error) { +func (m *MockBackend) GetDowntime(ctx context.Context, vid idx.ValidatorID) (idx.BlockID, inter.Timestamp, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDowntime", ctx, vid) - ret0, _ := ret[0].(idx.Block) + ret0, _ := ret[0].(idx.BlockID) ret1, _ := ret[1].(inter.Timestamp) ret2, _ := ret[2].(error) return ret0, ret1, ret2 @@ -483,10 +483,10 @@ func (mr *MockBackendMockRecorder) RPCTxFeeCap() *gomock.Call { } // ResolveRpcBlockNumberOrHash mocks base method. -func (m *MockBackend) ResolveRpcBlockNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (idx.Block, error) { +func (m *MockBackend) ResolveRpcBlockNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (idx.BlockID, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ResolveRpcBlockNumberOrHash", ctx, blockNrOrHash) - ret0, _ := ret[0].(idx.Block) + ret0, _ := ret[0].(idx.BlockID) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/eventcheck/epochcheck/epoch_check.go b/eventcheck/epochcheck/epoch_check.go index 620743893..9178384dd 100644 --- a/eventcheck/epochcheck/epoch_check.go +++ b/eventcheck/epochcheck/epoch_check.go @@ -26,7 +26,7 @@ var ( // Reader returns currents epoch and its validators group. type Reader interface { base.Reader - GetEpochRules() (opera.Rules, idx.Epoch) + GetEpochRules() (opera.Rules, idx.EpochID) } // Checker which require only current epoch info @@ -51,8 +51,8 @@ func CalcGasPowerUsed(e inter.EventPayloadI, rules opera.Rules) uint64 { gasCfg := rules.Economy.Gas parentsGas := uint64(0) - if idx.Event(len(e.Parents())) > rules.Dag.MaxFreeParents { - parentsGas = uint64(idx.Event(len(e.Parents()))-rules.Dag.MaxFreeParents) * gasCfg.ParentGas + if idx.EventID(len(e.Parents())) > rules.Dag.MaxFreeParents { + parentsGas = uint64(idx.EventID(len(e.Parents()))-rules.Dag.MaxFreeParents) * gasCfg.ParentGas } extraGas := uint64(len(e.Extra())) * gasCfg.ExtraDataGas @@ -110,7 +110,7 @@ func (v *Checker) Validate(e inter.EventPayloadI) error { if e.Epoch() != epoch { return base.ErrNotRelevant } - if idx.Event(len(e.Parents())) > rules.Dag.MaxParents { + if idx.EventID(len(e.Parents())) > rules.Dag.MaxParents { return ErrTooManyParents } if uint32(len(e.Extra())) > rules.Dag.MaxExtraData { diff --git a/eventcheck/gaspowercheck/gas_power_check.go b/eventcheck/gaspowercheck/gas_power_check.go index 737766690..14800221d 100644 --- a/eventcheck/gaspowercheck/gas_power_check.go +++ b/eventcheck/gaspowercheck/gas_power_check.go @@ -26,7 +26,7 @@ type ValidatorState struct { // ValidationContext for gaspower checking type ValidationContext struct { - Epoch idx.Epoch + Epoch idx.EpochID Configs [inter.GasPowerConfigs]Config EpochStart inter.Timestamp Validators *ltypes.Validators diff --git a/eventcheck/heavycheck/heavy_check.go b/eventcheck/heavycheck/heavy_check.go index cf9025467..caa00a4ca 100644 --- a/eventcheck/heavycheck/heavy_check.go +++ b/eventcheck/heavycheck/heavy_check.go @@ -26,9 +26,9 @@ var ( // Reader is accessed by the validator to get the current state. type Reader interface { - GetEpochPubKeys() (map[idx.ValidatorID]validatorpk.PubKey, idx.Epoch) - GetEpochPubKeysOf(idx.Epoch) map[idx.ValidatorID]validatorpk.PubKey - GetEpochBlockStart(idx.Epoch) idx.Block + GetEpochPubKeys() (map[idx.ValidatorID]validatorpk.PubKey, idx.EpochID) + GetEpochPubKeysOf(idx.EpochID) map[idx.ValidatorID]validatorpk.PubKey + GetEpochBlockStart(idx.EpochID) idx.BlockID } // Checker which requires only parents list + current epoch info @@ -105,7 +105,7 @@ func verifySignature(signedHash hash.Hash, sig inter.Signature, pubkey validator return crypto.VerifySignature(pubkey.Raw, signedHash.Bytes(), sig.Bytes()) } -func (v *Checker) ValidateEventLocator(e inter.SignedEventLocator, authEpoch idx.Epoch, authErr error, checkPayload func() bool) error { +func (v *Checker) ValidateEventLocator(e inter.SignedEventLocator, authEpoch idx.EpochID, authErr error, checkPayload func() bool) error { pubkeys := v.reader.GetEpochPubKeysOf(authEpoch) if len(pubkeys) == 0 { return authErr diff --git a/evmcore/dummy_block.go b/evmcore/dummy_block.go index ef3112799..4409cab68 100644 --- a/evmcore/dummy_block.go +++ b/evmcore/dummy_block.go @@ -52,7 +52,7 @@ type ( PrevRandao common.Hash // == mixHash/mixDigest - Epoch idx.Epoch + Epoch idx.EpochID } EvmBlock struct { diff --git a/gossip/basiccheck_test.go b/gossip/basiccheck_test.go index 1106eca61..1c7569392 100644 --- a/gossip/basiccheck_test.go +++ b/gossip/basiccheck_test.go @@ -21,7 +21,7 @@ type LLRBasicCheckTestSuite struct { env *testEnv me *inter.MutableEventPayload - startEpoch idx.Epoch + startEpoch idx.EpochID } func (s *LLRBasicCheckTestSuite) SetupSuite() { @@ -41,7 +41,7 @@ func (s *LLRBasicCheckTestSuite) SetupSuite() { s.env = env s.me = mutableEventPayloadFromImmutable(e) - s.startEpoch = idx.Epoch(startEpoch) + s.startEpoch = idx.EpochID(startEpoch) } func (s *LLRBasicCheckTestSuite) TearDownSuite() { @@ -82,11 +82,11 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate checkInited ErrNoParents", func() { - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) - s.me.SetSeq(idx.Event(2)) + s.me.SetSeq(idx.EventID(2)) parents := hash.Events{} s.me.SetParents(parents) }, @@ -95,9 +95,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate ErrHugeValue-1", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) s.me.SetGasPowerUsed(math.MaxInt64 - 1) @@ -107,9 +107,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate ErrHugeValue-2", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) s.me.SetGasPowerLeft(inter.GasPowerLeft{Gas: [2]uint64{math.MaxInt64 - 1, math.MaxInt64}}) @@ -119,9 +119,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate ErrZeroTime-1", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) s.me.SetCreationTime(0) @@ -131,9 +131,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate ErrZeroTime-2", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) s.me.SetMedianTime(0) @@ -143,9 +143,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate checkTxs validateTx ErrNegativeValue-1", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) h := hash.BytesToEvent(bytes.Repeat([]byte{math.MaxUint8}, 32)) @@ -169,9 +169,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate checkTxs validateTx ErrNegativeValue-2", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) h := hash.BytesToEvent(bytes.Repeat([]byte{math.MaxUint8}, 32)) @@ -195,9 +195,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate checkTxs validateTx ErrIntrinsicGas", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) h := hash.BytesToEvent(bytes.Repeat([]byte{math.MaxUint8}, 32)) @@ -222,9 +222,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate checkTxs validateTx ErrTipAboveFeeCap", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) h := hash.BytesToEvent(bytes.Repeat([]byte{math.MaxUint8}, 32)) @@ -252,9 +252,9 @@ func (s *LLRBasicCheckTestSuite) TestBasicCheckValidate() { { "Validate returns nil", func() { - s.me.SetSeq(idx.Event(1)) - s.me.SetEpoch(idx.Epoch(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetEpoch(idx.EpochID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) h := hash.BytesToEvent(bytes.Repeat([]byte{math.MaxUint8}, 32)) diff --git a/gossip/blockproc/drivermodule/driver_txs.go b/gossip/blockproc/drivermodule/driver_txs.go index 20f3a2d52..881da3998 100644 --- a/gossip/blockproc/drivermodule/driver_txs.go +++ b/gossip/blockproc/drivermodule/driver_txs.go @@ -72,7 +72,7 @@ func InternalTxBuilder(statedb state.StateDB) func(calldata []byte, addr common. } } -func maxBlockIdx(a, b idx.Block) idx.Block { +func maxBlockIdx(a, b idx.BlockID) idx.BlockID { if a > b { return a } @@ -225,7 +225,7 @@ func (p *DriverTxListener) OnNewLog(l *types.Log) { // epochsNum < 2^24 to avoid overflow epochsNum := new(big.Int).SetBytes(l.Data[29:32]).Uint64() - p.bs.AdvanceEpochs += idx.Epoch(epochsNum) + p.bs.AdvanceEpochs += idx.EpochID(epochsNum) if p.bs.AdvanceEpochs > maxAdvanceEpochs { p.bs.AdvanceEpochs = maxAdvanceEpochs } diff --git a/gossip/c_event_callbacks.go b/gossip/c_event_callbacks.go index 445ee813a..b1345d1f4 100644 --- a/gossip/c_event_callbacks.go +++ b/gossip/c_event_callbacks.go @@ -122,7 +122,7 @@ func processLastEvent(lasts *concurrent.ValidatorEventsSet, e *inter.EventPayloa return lasts } -func (s *Service) switchEpochTo(newEpoch idx.Epoch) { +func (s *Service) switchEpochTo(newEpoch idx.EpochID) { s.store.cache.EventIDs.Reset(newEpoch) s.store.SetHighestLamport(0) // reset dag indexer @@ -141,7 +141,7 @@ func (s *Service) switchEpochTo(newEpoch idx.Epoch) { s.feed.newEpoch.Send(newEpoch) } -func (s *Service) SwitchEpochTo(newEpoch idx.Epoch) error { +func (s *Service) SwitchEpochTo(newEpoch idx.EpochID) error { bs, es := s.store.GetHistoryBlockEpochState(newEpoch) if bs == nil { return errNonExistingEpoch @@ -162,7 +162,7 @@ func (s *Service) SwitchEpochTo(newEpoch idx.Epoch) error { return nil } -func (s *Service) processEventEpochIndex(e *inter.EventPayload, oldEpoch, newEpoch idx.Epoch) { +func (s *Service) processEventEpochIndex(e *inter.EventPayload, oldEpoch, newEpoch idx.EpochID) { // index DAG heads and last events s.store.SetHeads(oldEpoch, processEventHeads(s.store.GetHeads(oldEpoch), e)) s.store.SetLastEvents(oldEpoch, processLastEvent(s.store.GetLastEvents(oldEpoch), e)) diff --git a/gossip/c_llr_callbacks.go b/gossip/c_llr_callbacks.go index 65852790d..ec0a0cdd6 100644 --- a/gossip/c_llr_callbacks.go +++ b/gossip/c_llr_callbacks.go @@ -22,7 +22,7 @@ import ( // defaultBlobGasPrice Sonic does not support blobs, so this price is constant var defaultBlobGasPrice = big.NewInt(1) // TODO issue #147 -func indexRawReceipts(s *Store, receiptsForStorage []*types.ReceiptForStorage, txs types.Transactions, blockIdx idx.Block, blockHash common.Hash, config *params.ChainConfig, time uint64, baseFee *big.Int, blobGasPrice *big.Int) (types.Receipts, error) { +func indexRawReceipts(s *Store, receiptsForStorage []*types.ReceiptForStorage, txs types.Transactions, blockIdx idx.BlockID, blockHash common.Hash, config *params.ChainConfig, time uint64, baseFee *big.Int, blobGasPrice *big.Int) (types.Receipts, error) { s.evm.SetRawReceipts(blockIdx, receiptsForStorage) receipts, err := evmstore.UnwrapStorageReceipts(receiptsForStorage, blockIdx, config, blockHash, time, baseFee, blobGasPrice, txs) diff --git a/gossip/checker_helpers.go b/gossip/checker_helpers.go index 0b9cae857..15c5d895e 100644 --- a/gossip/checker_helpers.go +++ b/gossip/checker_helpers.go @@ -23,7 +23,7 @@ func (r *GasPowerCheckReader) GetValidationContext() *gaspowercheck.ValidationCo } // NewGasPowerContext reads current validation context for gaspowercheck -func NewGasPowerContext(s *Store, validators *ltypes.Validators, epoch idx.Epoch, cfg opera.EconomyRules) *gaspowercheck.ValidationContext { +func NewGasPowerContext(s *Store, validators *ltypes.Validators, epoch idx.EpochID, cfg opera.EconomyRules) *gaspowercheck.ValidationContext { // engineMu is locked here short := cfg.ShortGasPower @@ -67,7 +67,7 @@ func NewGasPowerContext(s *Store, validators *ltypes.Validators, epoch idx.Epoch // ValidatorsPubKeys stores info to authenticate validators type ValidatorsPubKeys struct { - Epoch idx.Epoch + Epoch idx.EpochID PubKeys map[idx.ValidatorID]validatorpk.PubKey } @@ -78,14 +78,14 @@ type HeavyCheckReader struct { } // GetEpochPubKeys is safe for concurrent use -func (r *HeavyCheckReader) GetEpochPubKeys() (map[idx.ValidatorID]validatorpk.PubKey, idx.Epoch) { +func (r *HeavyCheckReader) GetEpochPubKeys() (map[idx.ValidatorID]validatorpk.PubKey, idx.EpochID) { auth := r.Pubkeys.Load().(*ValidatorsPubKeys) return auth.PubKeys, auth.Epoch } // GetEpochPubKeysOf is safe for concurrent use -func (r *HeavyCheckReader) GetEpochPubKeysOf(epoch idx.Epoch) map[idx.ValidatorID]validatorpk.PubKey { +func (r *HeavyCheckReader) GetEpochPubKeysOf(epoch idx.EpochID) map[idx.ValidatorID]validatorpk.PubKey { auth := readEpochPubKeys(r.Store, epoch) if auth == nil { return nil @@ -94,7 +94,7 @@ func (r *HeavyCheckReader) GetEpochPubKeysOf(epoch idx.Epoch) map[idx.ValidatorI } // GetEpochBlockStart is safe for concurrent use -func (r *HeavyCheckReader) GetEpochBlockStart(epoch idx.Epoch) idx.Block { +func (r *HeavyCheckReader) GetEpochBlockStart(epoch idx.EpochID) idx.BlockID { bs, _ := r.Store.GetHistoryBlockEpochState(epoch) if bs == nil { return 0 @@ -103,7 +103,7 @@ func (r *HeavyCheckReader) GetEpochBlockStart(epoch idx.Epoch) idx.Block { } // readEpochPubKeys reads epoch pubkeys -func readEpochPubKeys(s *Store, epoch idx.Epoch) *ValidatorsPubKeys { +func readEpochPubKeys(s *Store, epoch idx.EpochID) *ValidatorsPubKeys { es := s.GetHistoryEpochState(epoch) if es == nil { return nil diff --git a/gossip/common_test.go b/gossip/common_test.go index 80cedab0b..0be4ae270 100644 --- a/gossip/common_test.go +++ b/gossip/common_test.go @@ -137,7 +137,7 @@ func (m testConfirmedEventsModule) Start(bs iblockproc.BlockState, es iblockproc return testConfirmedEventsProcessor{p, m.env} } -func newTestEnv(firstEpoch idx.Epoch, validatorsNum idx.Validator, tb testing.TB) *testEnv { +func newTestEnv(firstEpoch idx.EpochID, validatorsNum idx.Validator, tb testing.TB) *testEnv { rules := opera.FakeNetRules() rules.Epochs.MaxEpochDuration = inter.Timestamp(maxEpochDuration) rules.Blocks.MaxEmptyBlockSkipPeriod = 0 @@ -192,7 +192,7 @@ func newTestEnv(firstEpoch idx.Epoch, validatorsNum idx.Validator, tb testing.TB PubKey: pubkey, } cfg.EmitIntervals = emitter.EmitIntervals{} - cfg.MaxParents = idx.Event(validatorsNum/2 + 1) + cfg.MaxParents = idx.EventID(validatorsNum/2 + 1) cfg.MaxTxsPerAddress = 10000000 _ = valKeystore.Add(pubkey, crypto.FromECDSA(makefakegenesis.FakeKey(vid)), validatorpk.FakePassword) _ = valKeystore.Unlock(pubkey, validatorpk.FakePassword) @@ -243,7 +243,7 @@ func (env *testEnv) ApplyTxs(spent time.Duration, txs ...*types.Transaction) (ty baseFee := big.NewInt(0) blobGasPrice := big.NewInt(1) - receipts := env.store.evm.GetReceipts(idx.Block(b.Block.Number.Uint64()), config, b.Block.Hash, time, baseFee, blobGasPrice, b.Block.Transactions) + receipts := env.store.evm.GetReceipts(idx.BlockID(b.Block.Number.Uint64()), config, b.Block.Hash, time, baseFee, blobGasPrice, b.Block.Transactions) for i, tx := range b.Block.Transactions { if r, _, _ := tx.RawSignatureValues(); r.Sign() != 0 { mu.Lock() @@ -379,7 +379,7 @@ var ( // CodeAt returns the code of the given account. This is needed to differentiate // between contract internal errors and the local chain being out of sync. func (env *testEnv) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - if blockNumber != nil && idx.Block(blockNumber.Uint64()) != env.store.GetLatestBlockIndex() { + if blockNumber != nil && idx.BlockID(blockNumber.Uint64()) != env.store.GetLatestBlockIndex() { return nil, errBlockNumberUnsupported } @@ -390,7 +390,7 @@ func (env *testEnv) CodeAt(ctx context.Context, contract common.Address, blockNu // ContractCall executes an Ethereum contract call with the specified data as the // input. func (env *testEnv) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - if blockNumber != nil && idx.Block(blockNumber.Uint64()) != env.store.GetLatestBlockIndex() { + if blockNumber != nil && idx.BlockID(blockNumber.Uint64()) != env.store.GetLatestBlockIndex() { return nil, errBlockNumberUnsupported } diff --git a/gossip/config.go b/gossip/config.go index 521b8a0ec..0267579d8 100644 --- a/gossip/config.go +++ b/gossip/config.go @@ -129,7 +129,7 @@ type PeerCacheConfig struct { // MaxQueuedItems is the maximum number of items to queue up before // dropping broadcasts. This is a sensitive number as a transaction list might // contain a single transaction, or thousands. - MaxQueuedItems idx.Event + MaxQueuedItems idx.EventID MaxQueuedSize uint64 } @@ -207,8 +207,8 @@ func DefaultConfig(scale cachescale.Func) Config { StructLogLimit: 2000, } sessionCfg := cfg.Protocol.DagStreamLeecher.Session - cfg.Protocol.DagProcessor.EventsBufferLimit.Num = idx.Event(sessionCfg.ParallelChunksDownload)* - idx.Event(sessionCfg.DefaultChunkItemsNum) + softLimitItems + cfg.Protocol.DagProcessor.EventsBufferLimit.Num = idx.EventID(sessionCfg.ParallelChunksDownload)* + idx.EventID(sessionCfg.DefaultChunkItemsNum) + softLimitItems cfg.Protocol.DagProcessor.EventsBufferLimit.Size = uint64(sessionCfg.ParallelChunksDownload)*sessionCfg.DefaultChunkItemsSize + 8*opt.MiB cfg.Protocol.DagStreamLeecher.MaxSessionRestart = 4 * time.Minute cfg.Protocol.DagFetcher.ArriveTimeout = 4 * time.Second @@ -221,7 +221,7 @@ func DefaultConfig(scale cachescale.Func) Config { func (c *Config) Validate() error { p := c.Protocol defaultChunkSize := ltypes.Metric{ - Num: idx.Event(p.DagStreamLeecher.Session.DefaultChunkItemsNum), + Num: idx.EventID(p.DagStreamLeecher.Session.DefaultChunkItemsNum), Size: p.DagStreamLeecher.Session.DefaultChunkItemsSize, } if defaultChunkSize.Num > hardLimitItems-1 { diff --git a/gossip/emitter/config.go b/gossip/emitter/config.go index 53cb2bef1..6ff2b1114 100644 --- a/gossip/emitter/config.go +++ b/gossip/emitter/config.go @@ -40,7 +40,7 @@ type Config struct { MaxTxsPerAddress int - MaxParents idx.Event + MaxParents idx.EventID // thresholds on GasLeft LimitedTpsThreshold uint64 diff --git a/gossip/emitter/control.go b/gossip/emitter/control.go index e44ef22d9..f9e7670db 100644 --- a/gossip/emitter/control.go +++ b/gossip/emitter/control.go @@ -12,11 +12,11 @@ import ( "github.com/Fantom-foundation/go-opera/opera" ) -func scalarUpdMetric(diff idx.Event, weight ltypes.Weight, totalWeight ltypes.Weight) ancestor.Metric { +func scalarUpdMetric(diff idx.EventID, weight ltypes.Weight, totalWeight ltypes.Weight) ancestor.Metric { return ancestor.Metric(scalarUpdMetricF(uint64(diff)*piecefunc.DecimalUnit)) * ancestor.Metric(weight) / ancestor.Metric(totalWeight) } -func updMetric(median, cur, upd idx.Event, validatorIdx idx.Validator, validators *ltypes.Validators) ancestor.Metric { +func updMetric(median, cur, upd idx.EventID, validatorIdx idx.Validator, validators *ltypes.Validators) ancestor.Metric { if upd <= median || upd <= cur { return 0 } @@ -27,7 +27,7 @@ func updMetric(median, cur, upd idx.Event, validatorIdx idx.Validator, validator return scalarUpdMetric(upd-median, weight, validators.TotalWeight()) } -func kickStartMetric(metric ancestor.Metric, seq idx.Event) ancestor.Metric { +func kickStartMetric(metric ancestor.Metric, seq idx.EventID) ancestor.Metric { // kickstart metric in a beginning of epoch, when there's nothing to observe yet if seq <= 2 && metric < 0.9*piecefunc.DecimalUnit { metric += 0.1 * piecefunc.DecimalUnit @@ -38,7 +38,7 @@ func kickStartMetric(metric ancestor.Metric, seq idx.Event) ancestor.Metric { return metric } -func eventMetric(orig ancestor.Metric, seq idx.Event) ancestor.Metric { +func eventMetric(orig ancestor.Metric, seq idx.EventID) ancestor.Metric { return kickStartMetric(ancestor.Metric(eventMetricF(uint64(orig))), seq) } diff --git a/gossip/emitter/emitter.go b/gossip/emitter/emitter.go index f4806ad75..d7baa8a5f 100644 --- a/gossip/emitter/emitter.go +++ b/gossip/emitter/emitter.go @@ -61,14 +61,14 @@ type Emitter struct { prevIdleTime time.Time prevEmittedAtTime time.Time - prevEmittedAtBlock idx.Block + prevEmittedAtBlock idx.BlockID originatedTxs *originatedtxs.Buffer pendingGas uint64 // note: track validators and epoch internally to avoid referring to // validators of a future epoch inside OnEventConnected of last epoch event validators *ltypes.Validators - epoch idx.Epoch + epoch idx.EpochID // challenges is deadlines when each validator should emit an event challenges map[idx.ValidatorID]time.Time @@ -90,12 +90,12 @@ type Emitter struct { done chan struct{} wg sync.WaitGroup - maxParents idx.Event + maxParents idx.EventID cache struct { sortedTxs *transactionsByPriceAndNonce poolTime time.Time - poolBlock idx.Block + poolBlock idx.BlockID poolCount int } @@ -346,7 +346,7 @@ func (em *Emitter) createEvent(sortedTxs *transactionsByPriceAndNonce) (*inter.E } var ( - selfParentSeq idx.Event + selfParentSeq idx.EventID selfParentTime inter.Timestamp parents hash.Events maxLamport idx.Lamport diff --git a/gossip/emitter/emitter_test.go b/gossip/emitter/emitter_test.go index bf8e7d78d..d2bdd9a00 100644 --- a/gossip/emitter/emitter_test.go +++ b/gossip/emitter/emitter_test.go @@ -69,10 +69,10 @@ func TestEmitter(t *testing.T) { AnyTimes() external.EXPECT().GetEpochValidators(). - Return(validators, idx.Epoch(1)). + Return(validators, idx.EpochID(1)). AnyTimes() - external.EXPECT().GetLastEvent(idx.Epoch(1), cfg.Validator.ID). + external.EXPECT().GetLastEvent(idx.EpochID(1), cfg.Validator.ID). Return((*hash.Event)(nil)). AnyTimes() diff --git a/gossip/emitter/hooks.go b/gossip/emitter/hooks.go index d649cf3c4..06ca126bb 100644 --- a/gossip/emitter/hooks.go +++ b/gossip/emitter/hooks.go @@ -18,7 +18,7 @@ import ( ) // OnNewEpoch should be called after each epoch change, and on startup -func (em *Emitter) OnNewEpoch(newValidators *ltypes.Validators, newEpoch idx.Epoch) { +func (em *Emitter) OnNewEpoch(newValidators *ltypes.Validators, newEpoch idx.EpochID) { em.maxParents = em.config.MaxParents rules := em.world.GetRules() if em.maxParents == 0 { @@ -76,13 +76,13 @@ func (em *Emitter) OnNewEpoch(newValidators *ltypes.Validators, newEpoch idx.Epo em.fcIndexer = ancestor.NewFCIndexer(newValidators, em.world.DagIndex(), em.config.Validator.ID) } else { em.quorumIndexer = ancestor.NewQuorumIndexer(newValidators, vecmt2dagidx.Wrap(em.world.DagIndex()), - func(median, current, update idx.Event, validatorIdx idx.Validator) ancestor.Metric { + func(median, current, update idx.EventID, validatorIdx idx.Validator) ancestor.Metric { return updMetric(median, current, update, validatorIdx, newValidators) }) em.fcIndexer = nil } em.quorumIndexer = ancestor.NewQuorumIndexer(newValidators, vecmt2dagidx.Wrap(em.world.DagIndex()), - func(median, current, update idx.Event, validatorIdx idx.Validator) ancestor.Metric { + func(median, current, update idx.EventID, validatorIdx idx.Validator) ancestor.Metric { return updMetric(median, current, update, validatorIdx, newValidators) }) em.payloadIndexer = ancestor.NewPayloadIndexer(PayloadIndexerSize) diff --git a/gossip/emitter/mock/world.go b/gossip/emitter/mock/world.go index 25cb77a8a..a1b47abc0 100644 --- a/gossip/emitter/mock/world.go +++ b/gossip/emitter/mock/world.go @@ -99,11 +99,11 @@ func (mr *MockExternalMockRecorder) DagIndex() *gomock.Call { } // GetEpochValidators mocks base method. -func (m *MockExternal) GetEpochValidators() (*ltypes.Validators, idx.Epoch) { +func (m *MockExternal) GetEpochValidators() (*ltypes.Validators, idx.EpochID) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEpochValidators") ret0, _ := ret[0].(*ltypes.Validators) - ret1, _ := ret[1].(idx.Epoch) + ret1, _ := ret[1].(idx.EpochID) return ret0, ret1 } @@ -156,7 +156,7 @@ func (mr *MockExternalMockRecorder) GetGenesisTime() *gomock.Call { } // GetHeads mocks base method. -func (m *MockExternal) GetHeads(arg0 idx.Epoch) hash.Events { +func (m *MockExternal) GetHeads(arg0 idx.EpochID) hash.Events { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetHeads", arg0) ret0, _ := ret[0].(hash.Events) @@ -170,7 +170,7 @@ func (mr *MockExternalMockRecorder) GetHeads(arg0 interface{}) *gomock.Call { } // GetLastEvent mocks base method. -func (m *MockExternal) GetLastEvent(arg0 idx.Epoch, arg1 idx.ValidatorID) *hash.Event { +func (m *MockExternal) GetLastEvent(arg0 idx.EpochID, arg1 idx.ValidatorID) *hash.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLastEvent", arg0, arg1) ret0, _ := ret[0].(*hash.Event) @@ -184,10 +184,10 @@ func (mr *MockExternalMockRecorder) GetLastEvent(arg0, arg1 interface{}) *gomock } // GetLatestBlockIndex mocks base method. -func (m *MockExternal) GetLatestBlockIndex() idx.Block { +func (m *MockExternal) GetLatestBlockIndex() idx.BlockID { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLatestBlockIndex") - ret0, _ := ret[0].(idx.Block) + ret0, _ := ret[0].(idx.BlockID) return ret0 } diff --git a/gossip/emitter/parents.go b/gossip/emitter/parents.go index d1f6357cd..739637255 100644 --- a/gossip/emitter/parents.go +++ b/gossip/emitter/parents.go @@ -9,27 +9,27 @@ import ( ) // buildSearchStrategies returns a strategy for each parent search -func (em *Emitter) buildSearchStrategies(maxParents idx.Event) []ancestor.SearchStrategy { +func (em *Emitter) buildSearchStrategies(maxParents idx.EventID) []ancestor.SearchStrategy { strategies := make([]ancestor.SearchStrategy, 0, maxParents) if maxParents == 0 { return strategies } payloadStrategy := em.payloadIndexer.SearchStrategy() - for idx.Event(len(strategies)) < 1 { + for idx.EventID(len(strategies)) < 1 { strategies = append(strategies, payloadStrategy) } randStrategy := ancestor.NewRandomStrategy(nil) - for idx.Event(len(strategies)) < maxParents/2 { + for idx.EventID(len(strategies)) < maxParents/2 { strategies = append(strategies, randStrategy) } if em.fcIndexer != nil { quorumStrategy := em.fcIndexer.SearchStrategy() - for idx.Event(len(strategies)) < maxParents { + for idx.EventID(len(strategies)) < maxParents { strategies = append(strategies, quorumStrategy) } } else if em.quorumIndexer != nil { quorumStrategy := em.quorumIndexer.SearchStrategy() - for idx.Event(len(strategies)) < maxParents { + for idx.EventID(len(strategies)) < maxParents { strategies = append(strategies, quorumStrategy) } } @@ -37,7 +37,7 @@ func (em *Emitter) buildSearchStrategies(maxParents idx.Event) []ancestor.Search } // chooseParents selects an "optimal" parents set for the validator -func (em *Emitter) chooseParents(epoch idx.Epoch, myValidatorID idx.ValidatorID) (*hash.Event, hash.Events, bool) { +func (em *Emitter) chooseParents(epoch idx.EpochID, myValidatorID idx.ValidatorID) (*hash.Event, hash.Events, bool) { selfParent := em.world.GetLastEvent(epoch, myValidatorID) if selfParent == nil { return nil, nil, true @@ -48,6 +48,6 @@ func (em *Emitter) chooseParents(epoch idx.Epoch, myValidatorID idx.ValidatorID) } parents := hash.Events{*selfParent} heads := em.world.GetHeads(epoch) // events with no descendants - parents = ancestor.ChooseParents(parents, heads, em.buildSearchStrategies(em.maxParents-idx.Event(len(parents)))) + parents = ancestor.ChooseParents(parents, heads, em.buildSearchStrategies(em.maxParents-idx.EventID(len(parents)))) return selfParent, parents, true } diff --git a/gossip/emitter/parents_test.go b/gossip/emitter/parents_test.go index 35011d6c1..79ccb7514 100644 --- a/gossip/emitter/parents_test.go +++ b/gossip/emitter/parents_test.go @@ -22,7 +22,7 @@ func TestChooseParents_NoParentsForGenesisEvent(t *testing.T) { fixedPriceBaseFeeSource{}, ) - epoch := idx.Epoch(1) + epoch := idx.EpochID(1) validatorId := idx.ValidatorID(1) external.EXPECT().GetLastEvent(epoch, validatorId) @@ -50,7 +50,7 @@ func TestChooseParents_NonGenesisEventMustHaveOneSelfParent(t *testing.T) { em.maxParents = 3 em.payloadIndexer = ancestor.NewPayloadIndexer(3) - epoch := idx.Epoch(1) + epoch := idx.EpochID(1) validatorId := idx.ValidatorID(1) validatorIndex := vecmt.NewIndex(nil, vecmt.LiteConfig()) diff --git a/gossip/emitter/txs.go b/gossip/emitter/txs.go index 22d8c5c3a..e0f296c3d 100644 --- a/gossip/emitter/txs.go +++ b/gossip/emitter/txs.go @@ -112,7 +112,7 @@ func getTxRoundIndex(now, txTime time.Time, validatorsNum idx.Validator) int { } // safe for concurrent use -func (em *Emitter) isMyTxTurn(txHash common.Hash, sender common.Address, accountNonce uint64, now time.Time, validators *ltypes.Validators, me idx.ValidatorID, epoch idx.Epoch) bool { +func (em *Emitter) isMyTxTurn(txHash common.Hash, sender common.Address, accountNonce uint64, now time.Time, validators *ltypes.Validators, me idx.ValidatorID, epoch idx.EpochID) bool { txTime := txtime.Of(txHash) roundIndex := getTxRoundIndex(now, txTime, validators.Len()) diff --git a/gossip/emitter/world.go b/gossip/emitter/world.go index 5c9c8343e..ad2e46a0a 100644 --- a/gossip/emitter/world.go +++ b/gossip/emitter/world.go @@ -55,12 +55,12 @@ type ( // Reader is a callback for getting events from an external storage. type Reader interface { - GetLatestBlockIndex() idx.Block - GetEpochValidators() (*ltypes.Validators, idx.Epoch) + GetLatestBlockIndex() idx.BlockID + GetEpochValidators() (*ltypes.Validators, idx.EpochID) GetEvent(hash.Event) *inter.Event GetEventPayload(hash.Event) *inter.EventPayload - GetLastEvent(epoch idx.Epoch, from idx.ValidatorID) *hash.Event - GetHeads(idx.Epoch) hash.Events + GetLastEvent(epoch idx.EpochID, from idx.ValidatorID) *hash.Event + GetHeads(idx.EpochID) hash.Events GetGenesisTime() inter.Timestamp GetRules() opera.Rules } diff --git a/gossip/emitter_world.go b/gossip/emitter_world.go index 78c84f9c7..09a18421e 100644 --- a/gossip/emitter_world.go +++ b/gossip/emitter_world.go @@ -77,14 +77,14 @@ func (ew *emitterWorldProc) PeersNum() int { return ew.s.handler.peers.Len() } -func (ew *emitterWorldRead) GetHeads(epoch idx.Epoch) hash.Events { +func (ew *emitterWorldRead) GetHeads(epoch idx.EpochID) hash.Events { return ew.Store.GetHeadsSlice(epoch) } -func (ew *emitterWorldRead) GetLastEvent(epoch idx.Epoch, from idx.ValidatorID) *hash.Event { +func (ew *emitterWorldRead) GetLastEvent(epoch idx.EpochID, from idx.ValidatorID) *hash.Event { return ew.Store.GetLastEvent(epoch, from) } -func (ew *emitterWorldRead) GetBlockEpoch(block idx.Block) idx.Epoch { +func (ew *emitterWorldRead) GetBlockEpoch(block idx.BlockID) idx.EpochID { return ew.Store.FindBlockEpoch(block) } diff --git a/gossip/ethapi_backend.go b/gossip/ethapi_backend.go index 87af859a2..66dd4cf1d 100644 --- a/gossip/ethapi_backend.go +++ b/gossip/ethapi_backend.go @@ -56,15 +56,15 @@ func (b *EthAPIBackend) CurrentBlock() *evmcore.EvmBlock { return b.state.CurrentBlock() } -func (b *EthAPIBackend) ResolveRpcBlockNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (idx.Block, error) { +func (b *EthAPIBackend) ResolveRpcBlockNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (idx.BlockID, error) { latest := b.svc.store.GetLatestBlockIndex() if number, ok := blockNrOrHash.Number(); ok && (number == rpc.LatestBlockNumber || number == rpc.PendingBlockNumber) { return latest, nil } else if number, ok := blockNrOrHash.Number(); ok { - if idx.Block(number) > latest { + if idx.BlockID(number) > latest { return 0, errors.New("block not found") } - return idx.Block(number), nil + return idx.BlockID(number), nil } else if h, ok := blockNrOrHash.Hash(); ok { index := b.svc.store.GetBlockIndex(hash.Event(h)) if index == nil { @@ -146,7 +146,7 @@ func (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockN // decodeShortEventID decodes ShortID // example of a ShortID: "5:26:a2395846", where 5 is epoch, 26 is lamport, a2395846 are first bytes of the hash // s is a string splitted by ":" separator -func decodeShortEventID(s []string) (idx.Epoch, idx.Lamport, []byte, error) { +func decodeShortEventID(s []string) (idx.EpochID, idx.Lamport, []byte, error) { if len(s) != 3 { return 0, 0, nil, errors.New("incorrect format of short event ID (need Epoch:Lamport:Hash") } @@ -158,7 +158,7 @@ func decodeShortEventID(s []string) (idx.Epoch, idx.Lamport, []byte, error) { if err != nil { return 0, 0, nil, errors.Wrap(err, "short hash parsing error (lamport)") } - return idx.Epoch(epoch), idx.Lamport(lamport), common.FromHex(s[2]), nil + return idx.EpochID(epoch), idx.Lamport(lamport), common.FromHex(s[2]), nil } // GetFullEventID "converts" ShortID to full event's hash, by searching in events DB. @@ -231,7 +231,7 @@ func (b *EthAPIBackend) GetHeads(ctx context.Context, epoch rpc.BlockNumber) (he return } -func (b *EthAPIBackend) epochWithDefault(ctx context.Context, epoch rpc.BlockNumber) (requested idx.Epoch, err error) { +func (b *EthAPIBackend) epochWithDefault(ctx context.Context, epoch rpc.BlockNumber) (requested idx.EpochID, err error) { current := b.svc.store.GetEpoch() switch { @@ -239,8 +239,8 @@ func (b *EthAPIBackend) epochWithDefault(ctx context.Context, epoch rpc.BlockNum requested = current case epoch == rpc.LatestBlockNumber: requested = current - 1 - case epoch >= 0 && idx.Epoch(epoch) <= current: - requested = idx.Epoch(epoch) + case epoch >= 0 && idx.EpochID(epoch) <= current: + requested = idx.EpochID(epoch) default: err = errors.New("epoch is not in range") return @@ -299,7 +299,7 @@ func (b *EthAPIBackend) GetReceiptsByNumber(ctx context.Context, number rpc.Bloc time := uint64(block.Time.Unix()) baseFee := block.BaseFee blobGasPrice := new(big.Int) // TODO issue #147 - receipts := b.svc.store.evm.GetReceipts(idx.Block(number), b.ChainConfig(), block.Hash, time, baseFee, blobGasPrice, block.Transactions) + receipts := b.svc.store.evm.GetReceipts(idx.BlockID(number), b.ChainConfig(), block.Hash, time, baseFee, blobGasPrice, block.Transactions) return receipts, nil } @@ -467,7 +467,7 @@ func (b *EthAPIBackend) EvmLogIndex() topicsdb.Index { } // CurrentEpoch returns current epoch number. -func (b *EthAPIBackend) CurrentEpoch(ctx context.Context) idx.Epoch { +func (b *EthAPIBackend) CurrentEpoch(ctx context.Context) idx.EpochID { return b.svc.store.GetEpoch() } @@ -497,14 +497,14 @@ func (b *EthAPIBackend) GetOriginatedFee(ctx context.Context, vid idx.ValidatorI return bs.GetValidatorState(vid, es.Validators).Originated, nil } -func (b *EthAPIBackend) GetDowntime(ctx context.Context, vid idx.ValidatorID) (idx.Block, inter.Timestamp, error) { +func (b *EthAPIBackend) GetDowntime(ctx context.Context, vid idx.ValidatorID) (idx.BlockID, inter.Timestamp, error) { // Note: loads bs and es atomically to avoid a race condition bs, es := b.svc.store.GetBlockEpochState() if !es.Validators.Exists(vid) { return 0, 0, nil } vs := bs.GetValidatorState(vid, es.Validators) - missedBlocks := idx.Block(0) + missedBlocks := idx.BlockID(0) if bs.LastBlock.Idx > vs.LastBlock { missedBlocks = bs.LastBlock.Idx - vs.LastBlock } @@ -526,7 +526,7 @@ func (b *EthAPIBackend) GetEpochBlockState(ctx context.Context, epoch rpc.BlockN if epoch == rpc.LatestBlockNumber { epoch = rpc.BlockNumber(b.svc.store.GetEpoch()) } - bs, es := b.svc.store.GetHistoryBlockEpochState(idx.Epoch(epoch)) + bs, es := b.svc.store.GetHistoryBlockEpochState(idx.EpochID(epoch)) return bs, es, nil } diff --git a/gossip/evm_state_reader.go b/gossip/evm_state_reader.go index 267a81f7a..7c0cd4b6b 100644 --- a/gossip/evm_state_reader.go +++ b/gossip/evm_state_reader.go @@ -63,8 +63,8 @@ func (r *EvmStateReader) LastHeaderWithArchiveState() (*evmcore.EvmHeader, error if err != nil { return nil, fmt.Errorf("failed to get latest archive block; %v", err) } - if !empty && idx.Block(latestArchiveBlock) < latestBlock { - latestBlock = idx.Block(latestArchiveBlock) + if !empty && idx.BlockID(latestArchiveBlock) < latestBlock { + latestBlock = idx.BlockID(latestArchiveBlock) } return r.getBlock(common.Hash{}, latestBlock, false).Header(), nil @@ -75,14 +75,14 @@ func (r *EvmStateReader) GetHeaderByNumber(n uint64) *evmcore.EvmHeader { } func (r *EvmStateReader) GetHeader(h common.Hash, n uint64) *evmcore.EvmHeader { - return r.getBlock(h, idx.Block(n), false).Header() + return r.getBlock(h, idx.BlockID(n), false).Header() } func (r *EvmStateReader) GetBlock(h common.Hash, n uint64) *evmcore.EvmBlock { - return r.getBlock(h, idx.Block(n), true) + return r.getBlock(h, idx.BlockID(n), true) } -func (r *EvmStateReader) getBlock(h common.Hash, n idx.Block, readTxs bool) *evmcore.EvmBlock { +func (r *EvmStateReader) getBlock(h common.Hash, n idx.BlockID, readTxs bool) *evmcore.EvmBlock { block := r.store.GetBlock(n) if block == nil { return nil diff --git a/gossip/evmstore/statedb.go b/gossip/evmstore/statedb.go index a2218d36e..87800a7ff 100644 --- a/gossip/evmstore/statedb.go +++ b/gossip/evmstore/statedb.go @@ -59,7 +59,7 @@ func (s *Store) GetRpcStateDb(blockNum *big.Int, stateRoot common.Hash) (state.S } // CheckLiveStateHash returns if the hash of the current live StateDB hash matches (and fullsync is possible) -func (s *Store) CheckLiveStateHash(blockNum idx.Block, root hash.Hash) error { +func (s *Store) CheckLiveStateHash(blockNum idx.BlockID, root hash.Hash) error { if s.liveStateDb == nil { return fmt.Errorf("unable to get live state - EvmStore is not open") } @@ -71,7 +71,7 @@ func (s *Store) CheckLiveStateHash(blockNum idx.Block, root hash.Hash) error { } // CheckArchiveStateHash returns if the hash of the given archive StateDB hash matches -func (s *Store) CheckArchiveStateHash(blockNum idx.Block, root hash.Hash) error { +func (s *Store) CheckArchiveStateHash(blockNum idx.BlockID, root hash.Hash) error { if s.carmenState == nil { return fmt.Errorf("unable to get live state - EvmStore is not open") } diff --git a/gossip/evmstore/store_block_cache.go b/gossip/evmstore/store_block_cache.go index fd95551bb..75466a19a 100644 --- a/gossip/evmstore/store_block_cache.go +++ b/gossip/evmstore/store_block_cache.go @@ -7,7 +7,7 @@ import ( "github.com/Fantom-foundation/go-opera/evmcore" ) -func (s *Store) GetCachedEvmBlock(n idx.Block) *evmcore.EvmBlock { +func (s *Store) GetCachedEvmBlock(n idx.BlockID) *evmcore.EvmBlock { c, ok := s.cache.EvmBlocks.Get(n) if !ok { return nil @@ -16,7 +16,7 @@ func (s *Store) GetCachedEvmBlock(n idx.Block) *evmcore.EvmBlock { return c.(*evmcore.EvmBlock) } -func (s *Store) SetCachedEvmBlock(n idx.Block, b *evmcore.EvmBlock) { +func (s *Store) SetCachedEvmBlock(n idx.BlockID, b *evmcore.EvmBlock) { var empty = common.Hash{} if b.EvmHeader.TxHash == empty { panic("You have to cache only completed blocks (with txs)") diff --git a/gossip/evmstore/store_receipts.go b/gossip/evmstore/store_receipts.go index d75581235..60c5f915b 100644 --- a/gossip/evmstore/store_receipts.go +++ b/gossip/evmstore/store_receipts.go @@ -15,7 +15,7 @@ import ( ) // SetReceipts stores transaction receipts. -func (s *Store) SetReceipts(n idx.Block, receipts types.Receipts) { +func (s *Store) SetReceipts(n idx.BlockID, receipts types.Receipts) { receiptsStorage := make([]*types.ReceiptForStorage, receipts.Len()) for i, r := range receipts { receiptsStorage[i] = (*types.ReceiptForStorage)(r) @@ -28,7 +28,7 @@ func (s *Store) SetReceipts(n idx.Block, receipts types.Receipts) { } // SetRawReceipts stores raw transaction receipts. -func (s *Store) SetRawReceipts(n idx.Block, receipts []*types.ReceiptForStorage) (size int) { +func (s *Store) SetRawReceipts(n idx.BlockID, receipts []*types.ReceiptForStorage) (size int) { buf, err := rlp.EncodeToBytes(receipts) if err != nil { s.Log.Crit("Failed to encode rlp", "err", err) @@ -44,7 +44,7 @@ func (s *Store) SetRawReceipts(n idx.Block, receipts []*types.ReceiptForStorage) return len(buf) } -func (s *Store) GetRawReceiptsRLP(n idx.Block) rlp.RawValue { +func (s *Store) GetRawReceiptsRLP(n idx.BlockID) rlp.RawValue { buf, err := s.table.Receipts.Get(n.Bytes()) if err != nil { s.Log.Crit("Failed to get key-value", "err", err) @@ -52,7 +52,7 @@ func (s *Store) GetRawReceiptsRLP(n idx.Block) rlp.RawValue { return buf } -func (s *Store) GetRawReceipts(n idx.Block) ([]*types.ReceiptForStorage, int) { +func (s *Store) GetRawReceipts(n idx.BlockID) ([]*types.ReceiptForStorage, int) { buf := s.GetRawReceiptsRLP(n) if buf == nil { return nil, 0 @@ -66,7 +66,7 @@ func (s *Store) GetRawReceipts(n idx.Block) ([]*types.ReceiptForStorage, int) { return receiptsStorage, len(buf) } -func UnwrapStorageReceipts(receiptsStorage []*types.ReceiptForStorage, n idx.Block, config *params.ChainConfig, hash common.Hash, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs types.Transactions) (types.Receipts, error) { +func UnwrapStorageReceipts(receiptsStorage []*types.ReceiptForStorage, n idx.BlockID, config *params.ChainConfig, hash common.Hash, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs types.Transactions) (types.Receipts, error) { receipts := make(types.Receipts, len(receiptsStorage)) for i, r := range receiptsStorage { receipts[i] = (*types.Receipt)(r) @@ -76,7 +76,7 @@ func UnwrapStorageReceipts(receiptsStorage []*types.ReceiptForStorage, n idx.Blo } // GetReceipts returns stored transaction receipts. -func (s *Store) GetReceipts(n idx.Block, config *params.ChainConfig, hash common.Hash, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs types.Transactions) types.Receipts { +func (s *Store) GetReceipts(n idx.BlockID, config *params.ChainConfig, hash common.Hash, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs types.Transactions) types.Receipts { // Get data from LRU cache first. if s.cache.Receipts != nil { if c, ok := s.cache.Receipts.Get(n); ok { diff --git a/gossip/evmstore/store_receipts_test.go b/gossip/evmstore/store_receipts_test.go index 479cb872c..997936774 100644 --- a/gossip/evmstore/store_receipts_test.go +++ b/gossip/evmstore/store_receipts_test.go @@ -85,8 +85,8 @@ func benchStoreSetRawReceipts(b *testing.B, store *Store) { } } -func fakeReceipts() (idx.Block, []*types.ReceiptForStorage) { - return idx.Block(1), +func fakeReceipts() (idx.BlockID, []*types.ReceiptForStorage) { + return idx.BlockID(1), []*types.ReceiptForStorage{ { PostState: nil, diff --git a/gossip/evmstore/store_tx_position.go b/gossip/evmstore/store_tx_position.go index be74c2af2..b5520780b 100644 --- a/gossip/evmstore/store_tx_position.go +++ b/gossip/evmstore/store_tx_position.go @@ -11,7 +11,7 @@ import ( ) type TxPosition struct { - Block idx.Block + Block idx.BlockID Event hash.Event EventOffset uint32 BlockOffset uint32 diff --git a/gossip/filters/api.go b/gossip/filters/api.go index f1ef98d82..618130e0e 100644 --- a/gossip/filters/api.go +++ b/gossip/filters/api.go @@ -52,9 +52,9 @@ type filter struct { // Config is a provided API params. type Config struct { // Block range limit for logs search (indexed). - IndexedLogsBlockRangeLimit idx.Block + IndexedLogsBlockRangeLimit idx.BlockID // Block range limit for logs search (unindexed). - UnindexedLogsBlockRangeLimit idx.Block + UnindexedLogsBlockRangeLimit idx.BlockID } func DefaultConfig() Config { diff --git a/gossip/filters/filter.go b/gossip/filters/filter.go index 595fcd8c4..5a7add4e3 100644 --- a/gossip/filters/filter.go +++ b/gossip/filters/filter.go @@ -117,13 +117,13 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { if header == nil { return nil, nil } - head := idx.Block(header.Number.Uint64()) + head := idx.BlockID(header.Number.Uint64()) - begin := idx.Block(f.begin) + begin := idx.BlockID(f.begin) if f.begin < 0 { begin = head } - end := idx.Block(f.end) + end := idx.BlockID(f.end) if f.end < 0 { end = head } @@ -139,7 +139,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } // indexedLogs returns the logs matching the filter criteria based on topics index. -func (f *Filter) indexedLogs(ctx context.Context, begin, end idx.Block) ([]*types.Log, error) { +func (f *Filter) indexedLogs(ctx context.Context, begin, end idx.BlockID) ([]*types.Log, error) { if end-begin > f.config.IndexedLogsBlockRangeLimit { return nil, fmt.Errorf("too wide blocks range, the limit is %d", f.config.IndexedLogsBlockRangeLimit) } @@ -172,7 +172,7 @@ func (f *Filter) indexedLogs(ctx context.Context, begin, end idx.Block) ([]*type // indexedLogs returns the logs matching the filter criteria based on raw block // iteration. -func (f *Filter) unindexedLogs(ctx context.Context, begin, end idx.Block) (logs []*types.Log, err error) { +func (f *Filter) unindexedLogs(ctx context.Context, begin, end idx.BlockID) (logs []*types.Log, err error) { if end-begin > f.config.UnindexedLogsBlockRangeLimit { return nil, fmt.Errorf("too wide blocks range, the limit is %d", f.config.UnindexedLogsBlockRangeLimit) } diff --git a/gossip/gasprice/gasprice.go b/gossip/gasprice/gasprice.go index efbe52a00..9462f3600 100644 --- a/gossip/gasprice/gasprice.go +++ b/gossip/gasprice/gasprice.go @@ -53,7 +53,7 @@ type Config struct { } type Reader interface { - GetLatestBlockIndex() idx.Block + GetLatestBlockIndex() idx.BlockID TotalGasPowerLeft() uint64 GetRules() opera.Rules GetPendingRules() opera.Rules diff --git a/gossip/gasprice/gasprice_test.go b/gossip/gasprice/gasprice_test.go index d249555b6..df3005307 100644 --- a/gossip/gasprice/gasprice_test.go +++ b/gossip/gasprice/gasprice_test.go @@ -20,14 +20,14 @@ type fakeTx struct { } type TestBackend struct { - block idx.Block + block idx.BlockID totalGasPowerLeft uint64 rules opera.Rules pendingRules opera.Rules pendingTxs []fakeTx } -func (t TestBackend) GetLatestBlockIndex() idx.Block { +func (t TestBackend) GetLatestBlockIndex() idx.BlockID { return t.block } diff --git a/gossip/gpo_backend.go b/gossip/gpo_backend.go index e6ba8105e..e6a026181 100644 --- a/gossip/gpo_backend.go +++ b/gossip/gpo_backend.go @@ -18,7 +18,7 @@ type GPOBackend struct { txpool TxPool } -func (b *GPOBackend) GetLatestBlockIndex() idx.Block { +func (b *GPOBackend) GetLatestBlockIndex() idx.BlockID { return b.store.GetLatestBlockIndex() } diff --git a/gossip/handler.go b/gossip/handler.go index 3bbaf1e0b..7d93d056d 100644 --- a/gossip/handler.go +++ b/gossip/handler.go @@ -68,13 +68,13 @@ func checkLenLimits(size int, v interface{}) error { } type dagNotifier interface { - SubscribeNewEpoch(ch chan<- idx.Epoch) notify.Subscription + SubscribeNewEpoch(ch chan<- idx.EpochID) notify.Subscription SubscribeNewEmitted(ch chan<- *inter.EventPayload) notify.Subscription } type processCallback struct { Event func(*inter.EventPayload) error - SwitchEpochTo func(idx.Epoch) error + SwitchEpochTo func(idx.EpochID) error } // handlerConfig is the collection of initialization parameters to create a full @@ -129,7 +129,7 @@ type handler struct { notifier dagNotifier emittedEventsCh chan *inter.EventPayload emittedEventsSub notify.Subscription - newEpochsCh chan idx.Epoch + newEpochsCh chan idx.EpochID newEpochsSub notify.Subscription quitProgressBradcast chan struct{} @@ -218,7 +218,7 @@ func newHandler( Suspend: func(_ string) bool { return h.dagFetcher.Overloaded() || h.dagProcessor.Overloaded() }, - PeerEpoch: func(peer string) idx.Epoch { + PeerEpoch: func(peer string) idx.EpochID { p := h.peers.Peer(peer) if p == nil || p.Useless() { return 0 @@ -332,7 +332,7 @@ func (h *handler) makeDagProcessor(checkers *eventcheck.Checkers) *dagprocessor. return newProcessor } -func (h *handler) isEventInterested(id hash.Event, epoch idx.Epoch) bool { +func (h *handler) isEventInterested(id hash.Event, epoch idx.EpochID) bool { if id.Epoch() != epoch { return false } @@ -400,7 +400,7 @@ func (h *handler) Start(maxPeers int) { h.emittedEventsCh = make(chan *inter.EventPayload, 4) h.emittedEventsSub = h.notifier.SubscribeNewEmitted(h.emittedEventsCh) // epoch changes - h.newEpochsCh = make(chan idx.Epoch, 4) + h.newEpochsCh = make(chan idx.EpochID, 4) h.newEpochsSub = h.notifier.SubscribeNewEpoch(h.newEpochsCh) h.loopsWg.Add(3) @@ -1176,8 +1176,8 @@ func (h *handler) peerInfoCollectionLoop(stop <-chan struct{}) { type NodeInfo struct { Network uint64 `json:"network"` // network ID Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis object - Epoch idx.Epoch `json:"epoch"` - NumOfBlocks idx.Block `json:"blocks"` + Epoch idx.EpochID `json:"epoch"` + NumOfBlocks idx.BlockID `json:"blocks"` //Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules } diff --git a/gossip/heavycheck_test.go b/gossip/heavycheck_test.go index ae8252cd1..18363a16c 100644 --- a/gossip/heavycheck_test.go +++ b/gossip/heavycheck_test.go @@ -20,7 +20,7 @@ type LLRHeavyCheckTestSuite struct { env *testEnv me *inter.MutableEventPayload - startEpoch idx.Epoch + startEpoch idx.EpochID } func (s *LLRHeavyCheckTestSuite) SetupSuite() { @@ -40,7 +40,7 @@ func (s *LLRHeavyCheckTestSuite) SetupSuite() { s.env = env s.me = mutableEventPayloadFromImmutable(e) - s.startEpoch = idx.Epoch(startEpoch) + s.startEpoch = idx.EpochID(startEpoch) } func (s *LLRHeavyCheckTestSuite) TearDownSuite() { @@ -78,10 +78,10 @@ func (s *LLRHeavyCheckTestSuite) TestHeavyCheckValidateEvent() { nil, func() { s.me.SetVersion(1) - s.me.SetEpoch(idx.Epoch(s.startEpoch)) + s.me.SetEpoch(idx.EpochID(s.startEpoch)) s.me.SetCreator(3) - s.me.SetSeq(idx.Event(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) s.me.SetPayloadHash(inter.CalcPayloadHash(s.me)) @@ -97,7 +97,7 @@ func (s *LLRHeavyCheckTestSuite) TestHeavyCheckValidateEvent() { epochcheck.ErrNotRelevant, func() { s.me.SetVersion(1) - s.me.SetEpoch(idx.Epoch(s.startEpoch + 1)) + s.me.SetEpoch(idx.EpochID(s.startEpoch + 1)) s.me.SetCreator(3) s.me.SetPayloadHash(inter.CalcPayloadHash(s.me)) @@ -113,9 +113,9 @@ func (s *LLRHeavyCheckTestSuite) TestHeavyCheckValidateEvent() { epochcheck.ErrAuth, func() { s.me.SetVersion(1) - s.me.SetEpoch(idx.Epoch(s.startEpoch)) - s.me.SetSeq(idx.Event(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetEpoch(idx.EpochID(s.startEpoch)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) invalidCreator := idx.ValidatorID(100) s.me.SetCreator(invalidCreator) @@ -133,10 +133,10 @@ func (s *LLRHeavyCheckTestSuite) TestHeavyCheckValidateEvent() { heavycheck.ErrWrongEventSig, func() { s.me.SetVersion(1) - s.me.SetEpoch(idx.Epoch(s.startEpoch)) + s.me.SetEpoch(idx.EpochID(s.startEpoch)) s.me.SetCreator(3) - s.me.SetSeq(idx.Event(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) s.me.SetPayloadHash(inter.CalcPayloadHash(s.me)) @@ -152,10 +152,10 @@ func (s *LLRHeavyCheckTestSuite) TestHeavyCheckValidateEvent() { heavycheck.ErrMalformedTxSig, func() { s.me.SetVersion(1) - s.me.SetEpoch(idx.Epoch(s.startEpoch)) + s.me.SetEpoch(idx.EpochID(s.startEpoch)) s.me.SetCreator(3) - s.me.SetSeq(idx.Event(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) h := hash.BytesToEvent(bytes.Repeat([]byte{math.MaxUint8}, 32)) tx1 := types.NewTx(&types.LegacyTx{ @@ -183,9 +183,9 @@ func (s *LLRHeavyCheckTestSuite) TestHeavyCheckValidateEvent() { heavycheck.ErrWrongPayloadHash, func() { s.me.SetVersion(1) - s.me.SetEpoch(idx.Epoch(s.startEpoch)) - s.me.SetSeq(idx.Event(1)) - s.me.SetFrame(idx.Frame(1)) + s.me.SetEpoch(idx.EpochID(s.startEpoch)) + s.me.SetSeq(idx.EventID(1)) + s.me.SetFrame(idx.FrameID(1)) s.me.SetLamport(idx.Lamport(1)) s.me.SetCreator(3) diff --git a/gossip/peer.go b/gossip/peer.go index 06ccaa556..134f7f0dd 100644 --- a/gossip/peer.go +++ b/gossip/peer.go @@ -42,8 +42,8 @@ const ( // about a connected peer. type PeerInfo struct { Version uint `json:"version"` // protocol version negotiated - Epoch idx.Epoch `json:"epoch"` - NumOfBlocks idx.Block `json:"blocks"` + Epoch idx.EpochID `json:"epoch"` + NumOfBlocks idx.BlockID `json:"blocks"` } type broadcastItem struct { diff --git a/gossip/proclogger/llr_logger.go b/gossip/proclogger/llr_logger.go index 03047d285..572305f4f 100644 --- a/gossip/proclogger/llr_logger.go +++ b/gossip/proclogger/llr_logger.go @@ -12,15 +12,15 @@ import ( ) type dagSum struct { - connected idx.Event + connected idx.EventID totalProcessing time.Duration } type llrSum struct { - bvs idx.Block - brs idx.Block - evs idx.Epoch - ers idx.Epoch + bvs idx.BlockID + brs idx.BlockID + evs idx.EpochID + ers idx.EpochID } type Logger struct { @@ -29,8 +29,8 @@ type Logger struct { llrSum llrSum // latest logged data - lastEpoch idx.Epoch - lastBlock idx.Block + lastEpoch idx.EpochID + lastBlock idx.BlockID lastID hash.Event lastEventTime inter.Timestamp lastLlrTime inter.Timestamp diff --git a/gossip/protocol.go b/gossip/protocol.go index 145118da3..7d25e11c5 100644 --- a/gossip/protocol.go +++ b/gossip/protocol.go @@ -142,8 +142,8 @@ type handshakeData struct { // PeerProgress is synchronization status of a peer type PeerProgress struct { - Epoch idx.Epoch - LastBlockIdx idx.Block + Epoch idx.EpochID + LastBlockIdx idx.BlockID LastBlockAtropos hash.Event // Currently unused HighestLamport idx.Lamport diff --git a/gossip/protocols/dag/dagstream/dagstreamleecher/leecher.go b/gossip/protocols/dag/dagstream/dagstreamleecher/leecher.go index 8c57db00e..0f44e8b89 100644 --- a/gossip/protocols/dag/dagstream/dagstreamleecher/leecher.go +++ b/gossip/protocols/dag/dagstream/dagstreamleecher/leecher.go @@ -25,7 +25,7 @@ type Leecher struct { // State session sessionState - epoch idx.Epoch + epoch idx.EpochID emptyState bool forceSyncing bool @@ -33,7 +33,7 @@ type Leecher struct { } // New creates an events downloader to request events based on lexicographic event streams -func New(epoch idx.Epoch, emptyState bool, cfg Config, callback Callbacks) *Leecher { +func New(epoch idx.EpochID, emptyState bool, cfg Config, callback Callbacks) *Leecher { l := &Leecher{ cfg: cfg, callback: callback, @@ -60,7 +60,7 @@ type Callbacks struct { RequestChunk func(peer string, r dagstream.Request) error Suspend func(peer string) bool - PeerEpoch func(peer string) idx.Epoch + PeerEpoch func(peer string) idx.EpochID } type sessionState struct { @@ -135,7 +135,7 @@ func (d *Leecher) selectSessionPeerCandidates() []string { return selected } -func getSessionID(epoch idx.Epoch, try uint32) uint32 { +func getSessionID(epoch idx.EpochID, try uint32) uint32 { return (uint32(epoch) << 12) ^ try } @@ -167,7 +167,7 @@ func (d *Leecher) startSession(candidates []string) { return d.callback.RequestChunk(peer, dagstream.Request{ Session: session, - Limit: ltypes.Metric{Num: idx.Event(maxNum), Size: maxSize}, + Limit: ltypes.Metric{Num: idx.EventID(maxNum), Size: maxSize}, Type: typ, MaxChunks: chunks, }) @@ -192,7 +192,7 @@ func (d *Leecher) startSession(candidates []string) { d.forceSyncing = false } -func (d *Leecher) OnNewEpoch(myEpoch idx.Epoch) { +func (d *Leecher) OnNewEpoch(myEpoch idx.EpochID) { d.Mu.Lock() defer d.Mu.Unlock() diff --git a/gossip/protocols/dag/dagstream/dagstreamleecher/leecher_test.go b/gossip/protocols/dag/dagstream/dagstreamleecher/leecher_test.go index 26a48cee9..e2a889bbb 100644 --- a/gossip/protocols/dag/dagstream/dagstreamleecher/leecher_test.go +++ b/gossip/protocols/dag/dagstream/dagstreamleecher/leecher_test.go @@ -33,7 +33,7 @@ func testLeecherNoDeadlocks(t *testing.T, maxPeers int) { config.MaxSessionRestart = 5 * time.Millisecond * 5 config.BaseProgressWatchdog = 3 * time.Millisecond * 5 config.Session.RecheckInterval = time.Millisecond - epoch := idx.Epoch(1) + epoch := idx.EpochID(1) leecher := New(epoch, rand.IntN(2) == 0, config, Callbacks{ IsProcessed: func(id hash.Event) bool { return rand.IntN(2) == 0 @@ -45,8 +45,8 @@ func testLeecherNoDeadlocks(t *testing.T, maxPeers int) { Suspend: func(peer string) bool { return rand.IntN(10) == 0 }, - PeerEpoch: func(peer string) idx.Epoch { - return 1 + epoch/2 + idx.Epoch(rand.IntN(int(epoch*2))) + PeerEpoch: func(peer string) idx.EpochID { + return 1 + epoch/2 + idx.EpochID(rand.IntN(int(epoch*2))) }, }) terminated := false diff --git a/gossip/service.go b/gossip/service.go index 14073faee..2795f57dc 100644 --- a/gossip/service.go +++ b/gossip/service.go @@ -61,7 +61,7 @@ type ServiceFeed struct { newLogs notify.Feed } -func (f *ServiceFeed) SubscribeNewEpoch(ch chan<- idx.Epoch) notify.Subscription { +func (f *ServiceFeed) SubscribeNewEpoch(ch chan<- idx.EpochID) notify.Subscription { return f.scope.Track(f.newEpoch.Subscribe(ch)) } @@ -145,7 +145,7 @@ type Service struct { procLogger *proclogger.Logger stopped bool - haltCheck func(oldEpoch, newEpoch idx.Epoch, time time.Time) bool + haltCheck func(oldEpoch, newEpoch idx.EpochID, time time.Time) bool tflusher PeriodicFlusher @@ -156,7 +156,7 @@ type Service struct { func NewService(stack *node.Node, config Config, store *Store, blockProc BlockProc, engine lachesis.Consensus, dagIndexer *vecmt.Index, newTxPool func(evmcore.StateReader) TxPool, - haltCheck func(oldEpoch, newEpoch idx.Epoch, age time.Time) bool) (*Service, error) { + haltCheck func(oldEpoch, newEpoch idx.EpochID, age time.Time) bool) (*Service, error) { if err := config.Validate(); err != nil { return nil, err } diff --git a/gossip/store_block.go b/gossip/store_block.go index 3e3caff78..fd3415b0b 100644 --- a/gossip/store_block.go +++ b/gossip/store_block.go @@ -47,7 +47,7 @@ func (s *Store) SetGenesisID(val hash.Hash) { } // SetBlock stores chain block. -func (s *Store) SetBlock(n idx.Block, b *inter.Block) { +func (s *Store) SetBlock(n idx.BlockID, b *inter.Block) { s.rlp.Set(s.table.Blocks, n.Bytes(), b) // Add to LRU cache. @@ -55,7 +55,7 @@ func (s *Store) SetBlock(n idx.Block, b *inter.Block) { } // GetBlock returns stored block. -func (s *Store) GetBlock(n idx.Block) *inter.Block { +func (s *Store) GetBlock(n idx.BlockID) *inter.Block { // Get block from LRU cache first. if c, ok := s.cache.Blocks.Get(n); ok { return c.(*inter.Block) @@ -71,12 +71,12 @@ func (s *Store) GetBlock(n idx.Block) *inter.Block { return block } -func (s *Store) HasBlock(n idx.Block) bool { +func (s *Store) HasBlock(n idx.BlockID) bool { has, _ := s.table.Blocks.Has(n.Bytes()) return has } -func (s *Store) ForEachBlock(fn func(index idx.Block, block *inter.Block)) { +func (s *Store) ForEachBlock(fn func(index idx.BlockID, block *inter.Block)) { it := s.table.Blocks.NewIterator(nil, nil) defer it.Release() for it.Next() { @@ -90,7 +90,7 @@ func (s *Store) ForEachBlock(fn func(index idx.Block, block *inter.Block)) { } // SetBlockIndex stores chain block index. -func (s *Store) SetBlockIndex(id common.Hash, n idx.Block) { +func (s *Store) SetBlockIndex(id common.Hash, n idx.BlockID) { if err := s.table.BlockHashes.Put(id.Bytes(), n.Bytes()); err != nil { s.Log.Crit("Failed to put key-value", "err", err) } @@ -99,10 +99,10 @@ func (s *Store) SetBlockIndex(id common.Hash, n idx.Block) { } // GetBlockIndex returns stored block index. -func (s *Store) GetBlockIndex(id hash.Event) *idx.Block { +func (s *Store) GetBlockIndex(id hash.Event) *idx.BlockID { nVal, ok := s.cache.BlockHashes.Get(id) if ok { - n, ok := nVal.(idx.Block) + n, ok := nVal.(idx.BlockID) if ok { return &n } @@ -114,7 +114,7 @@ func (s *Store) GetBlockIndex(id hash.Event) *idx.Block { } if buf == nil { if id == s.fakeGenesisHash() { - zero := idx.Block(0) + zero := idx.BlockID(0) return &zero } return nil @@ -127,14 +127,14 @@ func (s *Store) GetBlockIndex(id hash.Event) *idx.Block { } // SetGenesisBlockIndex stores genesis block index. -func (s *Store) SetGenesisBlockIndex(n idx.Block) { +func (s *Store) SetGenesisBlockIndex(n idx.BlockID) { if err := s.table.Genesis.Put([]byte("i"), n.Bytes()); err != nil { s.Log.Crit("Failed to put key-value", "err", err) } } // GetGenesisBlockIndex returns stored genesis block index. -func (s *Store) GetGenesisBlockIndex() *idx.Block { +func (s *Store) GetGenesisBlockIndex() *idx.BlockID { buf, err := s.table.Genesis.Get([]byte("i")) if err != nil { s.Log.Crit("Failed to get key-value", "err", err) @@ -159,14 +159,14 @@ func (s *Store) GetGenesisTime() inter.Timestamp { return block.Time } -func (s *Store) SetEpochBlock(b idx.Block, e idx.Epoch) { +func (s *Store) SetEpochBlock(b idx.BlockID, e idx.EpochID) { err := s.table.EpochBlocks.Put((math.MaxUint64 - b).Bytes(), e.Bytes()) if err != nil { s.Log.Crit("Failed to set key-value", "err", err) } } -func (s *Store) FindBlockEpoch(b idx.Block) idx.Epoch { +func (s *Store) FindBlockEpoch(b idx.BlockID) idx.EpochID { if c, ok := s.cache.Blocks.Get(b); ok { return c.(*inter.Block).Epoch } @@ -179,7 +179,7 @@ func (s *Store) FindBlockEpoch(b idx.Block) idx.Epoch { return idx.BytesToEpoch(it.Value()) } -func (s *Store) GetBlockTxs(n idx.Block, block *inter.Block) types.Transactions { +func (s *Store) GetBlockTxs(n idx.BlockID, block *inter.Block) types.Transactions { if cached := s.evm.GetCachedEvmBlock(n); cached != nil { return cached.Transactions } diff --git a/gossip/store_decided_state.go b/gossip/store_decided_state.go index 8d57cef65..af4575a8e 100644 --- a/gossip/store_decided_state.go +++ b/gossip/store_decided_state.go @@ -19,7 +19,7 @@ type BlockEpochState struct { } // TODO propose to pass bs, es arguments by pointer -func (s *Store) SetHistoryBlockEpochState(epoch idx.Epoch, bs iblockproc.BlockState, es iblockproc.EpochState) { +func (s *Store) SetHistoryBlockEpochState(epoch idx.EpochID, bs iblockproc.BlockState, es iblockproc.EpochState) { bs, es = bs.Copy(), es.Copy() bes := &BlockEpochState{ BlockState: &bs, @@ -31,7 +31,7 @@ func (s *Store) SetHistoryBlockEpochState(epoch idx.Epoch, bs iblockproc.BlockSt s.cache.BlockEpochStateHistory.Add(epoch, bes, nominalSize) } -func (s *Store) GetHistoryBlockEpochState(epoch idx.Epoch) (*iblockproc.BlockState, *iblockproc.EpochState) { +func (s *Store) GetHistoryBlockEpochState(epoch idx.EpochID) (*iblockproc.BlockState, *iblockproc.EpochState) { // Get HistoryBlockEpochState from LRU cache first. if v, ok := s.cache.BlockEpochStateHistory.Get(epoch); ok { bes := v.(*BlockEpochState) @@ -68,7 +68,7 @@ func (s *Store) ForEachHistoryBlockEpochState(fn func(iblockproc.BlockState, ibl } } -func (s *Store) GetHistoryEpochState(epoch idx.Epoch) *iblockproc.EpochState { +func (s *Store) GetHistoryEpochState(epoch idx.EpochID) *iblockproc.EpochState { // check current BlockEpochState as a cache if v := s.cache.BlockEpochState.Load(); v != nil { bes := v.(*BlockEpochState) @@ -81,7 +81,7 @@ func (s *Store) GetHistoryEpochState(epoch idx.Epoch) *iblockproc.EpochState { return es } -func (s *Store) HasHistoryBlockEpochState(epoch idx.Epoch) bool { +func (s *Store) HasHistoryBlockEpochState(epoch idx.EpochID) bool { has, _ := s.table.BlockEpochStateHistory.Has(epoch.Bytes()) return has } @@ -130,7 +130,7 @@ func (s *Store) GetBlockEpochState() (iblockproc.BlockState, iblockproc.EpochSta } // GetEpoch retrieves the current epoch -func (s *Store) GetEpoch() idx.Epoch { +func (s *Store) GetEpoch() idx.EpochID { return s.GetEpochState().Epoch } @@ -140,13 +140,13 @@ func (s *Store) GetValidators() *ltypes.Validators { } // GetEpochValidators retrieves the current epoch and validators atomically -func (s *Store) GetEpochValidators() (*ltypes.Validators, idx.Epoch) { +func (s *Store) GetEpochValidators() (*ltypes.Validators, idx.EpochID) { es := s.GetEpochState() return es.Validators, es.Epoch } // GetLatestBlockIndex retrieves the current block number -func (s *Store) GetLatestBlockIndex() idx.Block { +func (s *Store) GetLatestBlockIndex() idx.BlockID { return s.GetBlockState().LastBlock.Idx } @@ -161,7 +161,7 @@ func (s *Store) GetEvmChainConfig() *ethparams.ChainConfig { } // GetEpochRules retrieves current network rules and epoch atomically -func (s *Store) GetEpochRules() (opera.Rules, idx.Epoch) { +func (s *Store) GetEpochRules() (opera.Rules, idx.EpochID) { es := s.GetEpochState() return es.Rules, es.Epoch } diff --git a/gossip/store_epoch.go b/gossip/store_epoch.go index d11809b15..5e71eeb0d 100644 --- a/gossip/store_epoch.go +++ b/gossip/store_epoch.go @@ -23,7 +23,7 @@ var ( type ( epochStore struct { - epoch idx.Epoch + epoch idx.EpochID db kvdb.Store table struct { LastEvents kvdb.Store `table:"t"` @@ -39,7 +39,7 @@ type ( } ) -func newEpochStore(epoch idx.Epoch, db kvdb.Store) *epochStore { +func newEpochStore(epoch idx.EpochID, db kvdb.Store) *epochStore { es := &epochStore{ epoch: epoch, db: db, @@ -67,7 +67,7 @@ func (s *Store) getAnyEpochStore() *epochStore { } // getEpochStore is safe for concurrent use. -func (s *Store) getEpochStore(epoch idx.Epoch) *epochStore { +func (s *Store) getEpochStore(epoch idx.EpochID) *epochStore { es := s.getAnyEpochStore() if es.epoch != epoch { return nil @@ -75,7 +75,7 @@ func (s *Store) getEpochStore(epoch idx.Epoch) *epochStore { return es } -func (s *Store) resetEpochStore(newEpoch idx.Epoch) { +func (s *Store) resetEpochStore(newEpoch idx.EpochID) { oldEs := s.epochStore.Load() // create new DB s.createEpochStore(newEpoch) @@ -91,7 +91,7 @@ func (s *Store) resetEpochStore(newEpoch idx.Epoch) { } } -func (s *Store) loadEpochStore(epoch idx.Epoch) { +func (s *Store) loadEpochStore(epoch idx.EpochID) { if s.epochStore.Load() != nil { return } @@ -106,7 +106,7 @@ func (s *Store) closeEpochStore() error { return es.db.Close() } -func (s *Store) createEpochStore(epoch idx.Epoch) { +func (s *Store) createEpochStore(epoch idx.EpochID) { // create new DB name := fmt.Sprintf("gossip-%d", epoch) db, err := s.dbs.OpenDB(name) diff --git a/gossip/store_event.go b/gossip/store_event.go index 29afa6f32..c9959b491 100644 --- a/gossip/store_event.go +++ b/gossip/store_event.go @@ -99,13 +99,13 @@ func (s *Store) forEachEvent(it ethdb.Iterator, onEvent func(event *inter.EventP } } -func (s *Store) ForEachEpochEvent(epoch idx.Epoch, onEvent func(event *inter.EventPayload) bool) { +func (s *Store) ForEachEpochEvent(epoch idx.EpochID, onEvent func(event *inter.EventPayload) bool) { it := s.table.Events.NewIterator(epoch.Bytes(), nil) defer it.Release() s.forEachEvent(it, onEvent) } -func (s *Store) ForEachEvent(start idx.Epoch, onEvent func(event *inter.EventPayload) bool) { +func (s *Store) ForEachEvent(start idx.EpochID, onEvent func(event *inter.EventPayload) bool) { it := s.table.Events.NewIterator(nil, start.Bytes()) defer it.Release() s.forEachEvent(it, onEvent) @@ -121,7 +121,7 @@ func (s *Store) ForEachEventRLP(start []byte, onEvent func(key hash.Event, event } } -func (s *Store) FindEventHashes(epoch idx.Epoch, lamport idx.Lamport, hashPrefix []byte) hash.Events { +func (s *Store) FindEventHashes(epoch idx.EpochID, lamport idx.Lamport, hashPrefix []byte) hash.Events { prefix := bytes.NewBuffer(epoch.Bytes()) prefix.Write(lamport.Bytes()) prefix.Write(hashPrefix) diff --git a/gossip/store_heads.go b/gossip/store_heads.go index 36ed82122..f89017bf0 100644 --- a/gossip/store_heads.go +++ b/gossip/store_heads.go @@ -81,7 +81,7 @@ func (es *epochStore) FlushHeads() { } // GetHeadsSlice returns IDs of all the epoch events with no descendants -func (s *Store) GetHeadsSlice(epoch idx.Epoch) hash.Events { +func (s *Store) GetHeadsSlice(epoch idx.EpochID) hash.Events { heads := s.GetHeads(epoch) heads.RLock() defer heads.RUnlock() @@ -89,7 +89,7 @@ func (s *Store) GetHeadsSlice(epoch idx.Epoch) hash.Events { } // GetHeads returns set of all the epoch event IDs with no descendants -func (s *Store) GetHeads(epoch idx.Epoch) *concurrent.EventsSet { +func (s *Store) GetHeads(epoch idx.EpochID) *concurrent.EventsSet { es := s.getEpochStore(epoch) if es == nil { return nil @@ -98,7 +98,7 @@ func (s *Store) GetHeads(epoch idx.Epoch) *concurrent.EventsSet { return es.GetHeads() } -func (s *Store) SetHeads(epoch idx.Epoch, ids *concurrent.EventsSet) { +func (s *Store) SetHeads(epoch idx.EpochID, ids *concurrent.EventsSet) { es := s.getEpochStore(epoch) if es == nil { return diff --git a/gossip/store_last_events.go b/gossip/store_last_events.go index 11f4bc9f8..05a38a4ed 100644 --- a/gossip/store_last_events.go +++ b/gossip/store_last_events.go @@ -82,7 +82,7 @@ func (es *epochStore) FlushLastEvents() { } // GetLastEvents returns latest connected epoch events from each validator -func (s *Store) GetLastEvents(epoch idx.Epoch) *concurrent.ValidatorEventsSet { +func (s *Store) GetLastEvents(epoch idx.EpochID) *concurrent.ValidatorEventsSet { es := s.getEpochStore(epoch) if es == nil { return nil @@ -92,7 +92,7 @@ func (s *Store) GetLastEvents(epoch idx.Epoch) *concurrent.ValidatorEventsSet { } // GetLastEvent returns latest connected epoch event from specified validator -func (s *Store) GetLastEvent(epoch idx.Epoch, vid idx.ValidatorID) *hash.Event { +func (s *Store) GetLastEvent(epoch idx.EpochID, vid idx.ValidatorID) *hash.Event { es := s.getEpochStore(epoch) if es == nil { return nil @@ -108,7 +108,7 @@ func (s *Store) GetLastEvent(epoch idx.Epoch, vid idx.ValidatorID) *hash.Event { return &last } -func (s *Store) SetLastEvents(epoch idx.Epoch, ids *concurrent.ValidatorEventsSet) { +func (s *Store) SetLastEvents(epoch idx.EpochID, ids *concurrent.ValidatorEventsSet) { es := s.getEpochStore(epoch) if es == nil { return diff --git a/gossip/store_llr_block.go b/gossip/store_llr_block.go index ac5d385b9..f864797e1 100644 --- a/gossip/store_llr_block.go +++ b/gossip/store_llr_block.go @@ -7,7 +7,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -func (s *Store) GetFullBlockRecord(n idx.Block) *ibr.LlrFullBlockRecord { +func (s *Store) GetFullBlockRecord(n idx.BlockID) *ibr.LlrFullBlockRecord { block := s.GetBlock(n) if block == nil { return nil @@ -20,7 +20,7 @@ func (s *Store) GetFullBlockRecord(n idx.Block) *ibr.LlrFullBlockRecord { return ibr.FullBlockRecordFor(block, txs, receipts) } -func (s *Store) GetFullEpochRecord(epoch idx.Epoch) *ier.LlrFullEpochRecord { +func (s *Store) GetFullEpochRecord(epoch idx.EpochID) *ier.LlrFullEpochRecord { // Use current state if current epoch is requested. if epoch == s.GetEpoch() { state := s.getBlockEpochState() diff --git a/gossip/store_migration.go b/gossip/store_migration.go index 85288deb1..0f252368d 100644 --- a/gossip/store_migration.go +++ b/gossip/store_migration.go @@ -50,7 +50,7 @@ func unsupportedMigration() error { type legacyUpgradeHeight struct { Upgrades opera.Upgrades - Height idx.Block + Height idx.BlockID } func (s *Store) addTimeIntoUpgradeHeights() error { diff --git a/integration/assembly.go b/integration/assembly.go index ff420beac..90e5e7cf2 100644 --- a/integration/assembly.go +++ b/integration/assembly.go @@ -49,7 +49,7 @@ func getStores(producer kvdb.FlushableDBProducer, cfg Configs) (*gossip.Store, * if err != nil { return nil, nil, fmt.Errorf("failed to open lachesis database: %w", err) } - cGetEpochDB := func(epoch idx.Epoch) kvdb.Store { + cGetEpochDB := func(epoch idx.EpochID) kvdb.Store { cEpochDb, err := producer.OpenDB(fmt.Sprintf("lachesis-%d", epoch)) if err != nil { panic(fmt.Errorf("failed to open lachesis-%d database: %w", epoch, err)) diff --git a/integration/makefakegenesis/genesis.go b/integration/makefakegenesis/genesis.go index 7ecd2e62f..eae1c388c 100644 --- a/integration/makefakegenesis/genesis.go +++ b/integration/makefakegenesis/genesis.go @@ -50,7 +50,7 @@ func FakeGenesisStoreWithRules(num idx.Validator, balance, stake *big.Int, rules return FakeGenesisStoreWithRulesAndStart(num, balance, stake, rules, 2, 1) } -func FakeGenesisStoreWithRulesAndStart(num idx.Validator, balance, stake *big.Int, rules opera.Rules, epoch idx.Epoch, block idx.Block) *genesisstore.Store { +func FakeGenesisStoreWithRulesAndStart(num idx.Validator, balance, stake *big.Int, rules opera.Rules, epoch idx.EpochID, block idx.BlockID) *genesisstore.Store { builder := makegenesis.NewGenesisBuilder() validators := GetFakeValidators(num) @@ -153,7 +153,7 @@ func txBuilder() func(calldata []byte, addr common.Address) *types.Transaction { } } -func GetGenesisTxs(sealedEpoch idx.Epoch, validators gpos.Validators, totalSupply *big.Int, delegations []drivercall.Delegation, driverOwner common.Address) types.Transactions { +func GetGenesisTxs(sealedEpoch idx.EpochID, validators gpos.Validators, totalSupply *big.Int, delegations []drivercall.Delegation, driverOwner common.Address) types.Transactions { buildTx := txBuilder() internalTxs := make(types.Transactions, 0, 15) // initialization diff --git a/inter/block.go b/inter/block.go index 6e5c007fb..46af06f6a 100644 --- a/inter/block.go +++ b/inter/block.go @@ -44,7 +44,7 @@ type Block struct { TransactionHashes []common.Hash // Fields required for linking the block internally to a lachesis epoch. - Epoch idx.Epoch + Epoch idx.EpochID // The duration of this block, being the difference between the predecessor // block's timestamp and this block's timestamp, in nanoseconds. @@ -211,7 +211,7 @@ func (b *BlockBuilder) WithPrevRandao(prevRandao common.Hash) *BlockBuilder { return b } -func (b *BlockBuilder) WithEpoch(epoch idx.Epoch) *BlockBuilder { +func (b *BlockBuilder) WithEpoch(epoch idx.EpochID) *BlockBuilder { b.block.Epoch = epoch return b } diff --git a/inter/event.go b/inter/event.go index 3f675baf3..fef85ef69 100644 --- a/inter/event.go +++ b/inter/event.go @@ -38,8 +38,8 @@ type EventI interface { type EventLocator struct { BaseHash hash.Hash NetForkID uint16 - Epoch idx.Epoch - Seq idx.Event + Epoch idx.EpochID + Seq idx.EventID Lamport idx.Lamport Creator idx.ValidatorID PayloadHash hash.Hash diff --git a/inter/event_serializer.go b/inter/event_serializer.go index ca3aadd19..72aaf4398 100644 --- a/inter/event_serializer.go +++ b/inter/event_serializer.go @@ -129,7 +129,7 @@ func eventUnmarshalCSER(r *cser.Reader, e *MutableEventPayload) (err error) { h := [24]byte{} r.FixedBytes(h[:]) eID := ltypes.MutableBaseEvent{} - eID.SetEpoch(idx.Epoch(epoch)) + eID.SetEpoch(idx.EpochID(epoch)) eID.SetLamport(idx.Lamport(lamport - lamportDiff)) eID.SetID(h) parents.Add(eID.ID()) @@ -163,11 +163,11 @@ func eventUnmarshalCSER(r *cser.Reader, e *MutableEventPayload) (err error) { e.SetVersion(version) e.SetNetForkID(netForkID) - e.SetEpoch(idx.Epoch(epoch)) + e.SetEpoch(idx.EpochID(epoch)) e.SetLamport(idx.Lamport(lamport)) e.SetCreator(idx.ValidatorID(creator)) - e.SetSeq(idx.Event(seq)) - e.SetFrame(idx.Frame(frame)) + e.SetSeq(idx.EventID(seq)) + e.SetFrame(idx.FrameID(frame)) e.SetCreationTime(Timestamp(creationTime)) e.SetMedianTime(Timestamp(int64(creationTime) - medianTimeDiff)) e.SetGasPowerUsed(gasPowerUsed) @@ -217,8 +217,8 @@ func (bvs *LlrBlockVotes) UnmarshalCSER(r *cser.Reader) error { for i := range records { r.FixedBytes(records[i][:]) } - bvs.Start = idx.Block(start) - bvs.Epoch = idx.Epoch(epoch) + bvs.Start = idx.BlockID(start) + bvs.Epoch = idx.EpochID(epoch) bvs.Votes = records return nil } @@ -233,7 +233,7 @@ func (ers *LlrEpochVote) UnmarshalCSER(r *cser.Reader) error { epoch := r.U32() record := hash.Hash{} r.FixedBytes(record[:]) - ers.Epoch = idx.Epoch(epoch) + ers.Epoch = idx.EpochID(epoch) ers.Vote = record return nil } diff --git a/inter/event_serializer_test.go b/inter/event_serializer_test.go index 317459943..d1f000028 100644 --- a/inter/event_serializer_test.go +++ b/inter/event_serializer_test.go @@ -33,7 +33,7 @@ func TestEventPayloadSerialization(t *testing.T) { max := MutableEventPayload{} max.SetVersion(2) max.SetEpoch(math.MaxUint32) - max.SetSeq(idx.Event(math.MaxUint32)) + max.SetSeq(idx.EventID(math.MaxUint32)) max.SetLamport(idx.Lamport(math.MaxUint32)) h := hash.BytesToEvent(bytes.Repeat([]byte{math.MaxUint8}, 32)) max.SetParents(hash.Events{hash.Event(h), hash.Event(h), hash.Event(h)}) @@ -280,10 +280,10 @@ func FakeEvent(version uint8, txsNum, mpsNum, bvsNum int, ersNum bool) *EventPay random.SetNetForkID(uint16(r.Uint32() >> 16)) random.SetLamport(1000) random.SetExtra([]byte{byte(r.Uint32())}) - random.SetSeq(idx.Event(r.Uint32() >> 8)) - random.SetEpoch(idx.Epoch(1234)) + random.SetSeq(idx.EventID(r.Uint32() >> 8)) + random.SetEpoch(idx.EpochID(1234)) random.SetCreator(idx.ValidatorID(r.Uint32())) - random.SetFrame(idx.Frame(r.Uint32() >> 16)) + random.SetFrame(idx.FrameID(r.Uint32() >> 16)) random.SetCreationTime(Timestamp(r.Uint64())) random.SetMedianTime(Timestamp(r.Uint64())) random.SetGasPowerUsed(r.Uint64()) @@ -360,8 +360,8 @@ func FakeEvent(version uint8, txsNum, mpsNum, bvsNum int, ersNum bool) *EventPay bvs := LlrBlockVotes{} if bvsNum > 0 { - bvs.Start = 1 + idx.Block(rand.IntN(1000)) - bvs.Epoch = 1 + idx.Epoch(rand.IntN(1000)) + bvs.Start = 1 + idx.BlockID(rand.IntN(1000)) + bvs.Epoch = 1 + idx.EpochID(rand.IntN(1000)) } for i := 0; i < bvsNum; i++ { bvs.Votes = append(bvs.Votes, randHash(r)) @@ -370,7 +370,7 @@ func FakeEvent(version uint8, txsNum, mpsNum, bvsNum int, ersNum bool) *EventPay ers := LlrEpochVote{} if ersNum { - ers.Epoch = 1 + idx.Epoch(rand.IntN(1000)) + ers.Epoch = 1 + idx.EpochID(rand.IntN(1000)) ers.Vote = randHash(r) } random.SetEpochVote(ers) diff --git a/inter/iblockproc/decided_state.go b/inter/iblockproc/decided_state.go index eb873458b..5b2dc53fb 100644 --- a/inter/iblockproc/decided_state.go +++ b/inter/iblockproc/decided_state.go @@ -19,7 +19,7 @@ type ValidatorBlockState struct { Uptime inter.Timestamp LastOnlineTime inter.Timestamp LastGasPowerLeft inter.GasPowerLeft - LastBlock idx.Block + LastBlock idx.BlockID DirtyGasRefund uint64 Originated *big.Int } @@ -36,7 +36,7 @@ type ValidatorEpochState struct { } type BlockCtx struct { - Idx idx.Block + Idx idx.BlockID Time inter.Timestamp Atropos hash.Event } @@ -54,7 +54,7 @@ type BlockState struct { DirtyRules *opera.Rules `rlp:"nil"` // nil means that there's no changes compared to epoch rules - AdvanceEpochs idx.Epoch + AdvanceEpochs idx.EpochID } func (bs BlockState) Copy() BlockState { @@ -89,7 +89,7 @@ func (bs BlockState) Hash() hash.Hash { } type EpochStateV1 struct { - Epoch idx.Epoch + Epoch idx.EpochID EpochStart inter.Timestamp PrevEpochStart inter.Timestamp diff --git a/inter/iblockproc/legacy.go b/inter/iblockproc/legacy.go index 57270183b..74e585400 100644 --- a/inter/iblockproc/legacy.go +++ b/inter/iblockproc/legacy.go @@ -15,7 +15,7 @@ type ValidatorEpochStateV0 struct { } type EpochStateV0 struct { - Epoch idx.Epoch + Epoch idx.EpochID EpochStart inter.Timestamp PrevEpochStart inter.Timestamp diff --git a/inter/ibr/inter_block_records.go b/inter/ibr/inter_block_records.go index 3e21a0bd8..f24c4039b 100644 --- a/inter/ibr/inter_block_records.go +++ b/inter/ibr/inter_block_records.go @@ -20,14 +20,14 @@ type LlrFullBlockRecord struct { GasUsed uint64 BaseFee *big.Int PrevRandao hash.Hash - Epoch idx.Epoch + Epoch idx.EpochID Txs types.Transactions Receipts []*types.ReceiptForStorage } type LlrIdxFullBlockRecord struct { LlrFullBlockRecord - Idx idx.Block + Idx idx.BlockID } // FullBlockRecordFor returns the full block record used in Genesis processing diff --git a/inter/ier/inter_epoch_records.go b/inter/ier/inter_epoch_records.go index 1395867c3..55701bd96 100644 --- a/inter/ier/inter_epoch_records.go +++ b/inter/ier/inter_epoch_records.go @@ -14,7 +14,7 @@ type LlrFullEpochRecord struct { type LlrIdxFullEpochRecord struct { LlrFullEpochRecord - Idx idx.Epoch + Idx idx.EpochID } func (er LlrFullEpochRecord) Hash() hash.Hash { diff --git a/inter/inter_llr.go b/inter/inter_llr.go index 23328dfc4..86b374cb5 100644 --- a/inter/inter_llr.go +++ b/inter/inter_llr.go @@ -9,17 +9,17 @@ import ( ) type LlrBlockVotes struct { - Start idx.Block - Epoch idx.Epoch + Start idx.BlockID + Epoch idx.EpochID Votes []hash.Hash } -func (bvs LlrBlockVotes) LastBlock() idx.Block { - return bvs.Start + idx.Block(len(bvs.Votes)) - 1 +func (bvs LlrBlockVotes) LastBlock() idx.BlockID { + return bvs.Start + idx.BlockID(len(bvs.Votes)) - 1 } type LlrEpochVote struct { - Epoch idx.Epoch + Epoch idx.EpochID Vote hash.Hash } diff --git a/inter/inter_mps.go b/inter/inter_mps.go index 88fe76139..f58267710 100644 --- a/inter/inter_mps.go +++ b/inter/inter_mps.go @@ -16,7 +16,7 @@ type EventsDoublesign struct { } type BlockVoteDoublesign struct { - Block idx.Block + Block idx.BlockID Pair [2]LlrSignedBlockVotes } @@ -25,7 +25,7 @@ func (p BlockVoteDoublesign) GetVote(i int) hash.Hash { } type WrongBlockVote struct { - Block idx.Block + Block idx.BlockID Pals [MinAccomplicesForProof]LlrSignedBlockVotes WrongEpoch bool } diff --git a/opera/contracts/driver/drivercall/driver_calls.go b/opera/contracts/driver/drivercall/driver_calls.go index 61021f22b..7be6e6c96 100644 --- a/opera/contracts/driver/drivercall/driver_calls.go +++ b/opera/contracts/driver/drivercall/driver_calls.go @@ -27,8 +27,8 @@ type Delegation struct { ValidatorID idx.ValidatorID Stake *big.Int LockedStake *big.Int - LockupFromEpoch idx.Epoch - LockupEndTime idx.Epoch + LockupFromEpoch idx.EpochID + LockupEndTime idx.EpochID LockupDuration uint64 EarlyUnlockPenalty *big.Int Rewards *big.Int diff --git a/opera/contracts/netinit/netinitcalls/network_initializer_calls.go b/opera/contracts/netinit/netinitcalls/network_initializer_calls.go index 21705da4f..9e1c908a2 100644 --- a/opera/contracts/netinit/netinitcalls/network_initializer_calls.go +++ b/opera/contracts/netinit/netinitcalls/network_initializer_calls.go @@ -21,7 +21,7 @@ var ( // Methods -func InitializeAll(sealedEpoch idx.Epoch, totalSupply *big.Int, sfcAddr common.Address, driverAuthAddr common.Address, driverAddr common.Address, evmWriterAddr common.Address, owner common.Address) []byte { +func InitializeAll(sealedEpoch idx.EpochID, totalSupply *big.Int, sfcAddr common.Address, driverAuthAddr common.Address, driverAddr common.Address, evmWriterAddr common.Address, owner common.Address) []byte { data, _ := sAbi.Pack("initializeAll", utils.U64toBig(uint64(sealedEpoch)), totalSupply, sfcAddr, driverAuthAddr, driverAddr, evmWriterAddr, owner) return data } diff --git a/opera/genesis/gpos/validators.go b/opera/genesis/gpos/validators.go index 501deb5a5..98b9a816d 100644 --- a/opera/genesis/gpos/validators.go +++ b/opera/genesis/gpos/validators.go @@ -15,9 +15,9 @@ type ( Address common.Address PubKey validatorpk.PubKey CreationTime inter.Timestamp - CreationEpoch idx.Epoch + CreationEpoch idx.EpochID DeactivatedTime inter.Timestamp - DeactivatedEpoch idx.Epoch + DeactivatedEpoch idx.EpochID Status uint64 } diff --git a/opera/rules.go b/opera/rules.go index 7220c427d..b31f9aa1d 100644 --- a/opera/rules.go +++ b/opera/rules.go @@ -115,8 +115,8 @@ type EpochsRules struct { // DagRules of Lachesis DAG (directed acyclic graph). type DagRules struct { - MaxParents idx.Event - MaxFreeParents idx.Event // maximum number of parents with no gas cost + MaxParents idx.EventID + MaxFreeParents idx.EventID // maximum number of parents with no gas cost MaxExtraData uint32 } @@ -160,13 +160,13 @@ type EmitterRules struct { // BlocksMissed is information about missed blocks from a staker type BlocksMissed struct { - BlocksNum idx.Block + BlocksNum idx.BlockID Period inter.Timestamp } // EconomyRules contains economy constants type EconomyRules struct { - BlockMissedSlack idx.Block + BlockMissedSlack idx.BlockID Gas GasRules @@ -216,7 +216,7 @@ type Upgrades struct { type UpgradeHeight struct { Upgrades Upgrades - Height idx.Block + Height idx.BlockID Time inter.Timestamp } diff --git a/topicsdb/dummy.go b/topicsdb/dummy.go index e61a22a38..f143a1113 100644 --- a/topicsdb/dummy.go +++ b/topicsdb/dummy.go @@ -10,7 +10,7 @@ import ( // dummyIndex is empty implementation of Index type dummyIndex struct{} -func (n dummyIndex) FindInBlocks(ctx context.Context, from, to idx.Block, pattern [][]common.Hash) (logs []*types.Log, err error) { +func (n dummyIndex) FindInBlocks(ctx context.Context, from, to idx.BlockID, pattern [][]common.Hash) (logs []*types.Log, err error) { return nil, ErrLogsNotRecorded } diff --git a/topicsdb/index.go b/topicsdb/index.go index 74a10d85c..b74ebe12b 100644 --- a/topicsdb/index.go +++ b/topicsdb/index.go @@ -41,7 +41,7 @@ func (tt *index) WrapTablesAsBatched() (unwrap func()) { } // FindInBlocks returns all log records of block range by pattern. 1st pattern element is an address. -func (tt *index) FindInBlocks(ctx context.Context, from, to idx.Block, pattern [][]common.Hash) (logs []*types.Log, err error) { +func (tt *index) FindInBlocks(ctx context.Context, from, to idx.BlockID, pattern [][]common.Hash) (logs []*types.Log, err error) { err = tt.ForEachInBlocks( ctx, from, to, @@ -55,7 +55,7 @@ func (tt *index) FindInBlocks(ctx context.Context, from, to idx.Block, pattern [ } // ForEachInBlocks matches log records of block range by pattern. 1st pattern element is an address. -func (tt *index) ForEachInBlocks(ctx context.Context, from, to idx.Block, pattern [][]common.Hash, onLog func(*types.Log) (gonext bool)) error { +func (tt *index) ForEachInBlocks(ctx context.Context, from, to idx.BlockID, pattern [][]common.Hash, onLog func(*types.Log) (gonext bool)) error { if 0 < to && to < from { return nil } diff --git a/topicsdb/search_test.go b/topicsdb/search_test.go index 96b3a8db8..927783edf 100644 --- a/topicsdb/search_test.go +++ b/topicsdb/search_test.go @@ -35,7 +35,7 @@ func BenchmarkSearch(b *testing.B) { pooled := withThreadPool{index} - for dsc, method := range map[string]func(context.Context, idx.Block, idx.Block, [][]common.Hash) ([]*types.Log, error){ + for dsc, method := range map[string]func(context.Context, idx.BlockID, idx.BlockID, [][]common.Hash) ([]*types.Log, error){ "index": index.FindInBlocks, "pooled": pooled.FindInBlocks, } { diff --git a/topicsdb/thread_pool.go b/topicsdb/thread_pool.go index eb7ad417a..d865e046d 100644 --- a/topicsdb/thread_pool.go +++ b/topicsdb/thread_pool.go @@ -17,7 +17,7 @@ type withThreadPool struct { } // FindInBlocks returns all log records of block range by pattern. 1st pattern element is an address. -func (tt *withThreadPool) FindInBlocks(ctx context.Context, from, to idx.Block, pattern [][]common.Hash) (logs []*types.Log, err error) { +func (tt *withThreadPool) FindInBlocks(ctx context.Context, from, to idx.BlockID, pattern [][]common.Hash) (logs []*types.Log, err error) { err = tt.ForEachInBlocks( ctx, from, to, @@ -31,7 +31,7 @@ func (tt *withThreadPool) FindInBlocks(ctx context.Context, from, to idx.Block, } // ForEachInBlocks matches log records of block range by pattern. 1st pattern element is an address. -func (tt *withThreadPool) ForEachInBlocks(ctx context.Context, from, to idx.Block, pattern [][]common.Hash, onLog func(*types.Log) (gonext bool)) error { +func (tt *withThreadPool) ForEachInBlocks(ctx context.Context, from, to idx.BlockID, pattern [][]common.Hash, onLog func(*types.Log) (gonext bool)) error { if 0 < to && to < from { return nil } diff --git a/topicsdb/topicsdb.go b/topicsdb/topicsdb.go index db1b4743b..373ec780f 100644 --- a/topicsdb/topicsdb.go +++ b/topicsdb/topicsdb.go @@ -19,7 +19,7 @@ var ( ) type Index interface { - FindInBlocks(ctx context.Context, from, to idx.Block, pattern [][]common.Hash) (logs []*types.Log, err error) + FindInBlocks(ctx context.Context, from, to idx.BlockID, pattern [][]common.Hash) (logs []*types.Log, err error) Push(recs ...*types.Log) error Close() diff --git a/topicsdb/topicsdb_test.go b/topicsdb/topicsdb_test.go index b8c61d7cd..92ad486ba 100644 --- a/topicsdb/topicsdb_test.go +++ b/topicsdb/topicsdb_test.go @@ -84,7 +84,7 @@ func TestIndexSearchMultyVariants(t *testing.T) { pooled := withThreadPool{index} - for dsc, method := range map[string]func(context.Context, idx.Block, idx.Block, [][]common.Hash) ([]*types.Log, error){ + for dsc, method := range map[string]func(context.Context, idx.BlockID, idx.BlockID, [][]common.Hash) ([]*types.Log, error){ "index": index.FindInBlocks, "pooled": pooled.FindInBlocks, } { @@ -194,7 +194,7 @@ func TestIndexSearchShortCircuits(t *testing.T) { pooled := withThreadPool{index} - for dsc, method := range map[string]func(context.Context, idx.Block, idx.Block, [][]common.Hash) ([]*types.Log, error){ + for dsc, method := range map[string]func(context.Context, idx.BlockID, idx.BlockID, [][]common.Hash) ([]*types.Log, error){ "index": index.FindInBlocks, "pooled": pooled.FindInBlocks, } { @@ -254,7 +254,7 @@ func TestIndexSearchSingleVariant(t *testing.T) { pooled := withThreadPool{index} - for dsc, method := range map[string]func(context.Context, idx.Block, idx.Block, [][]common.Hash) ([]*types.Log, error){ + for dsc, method := range map[string]func(context.Context, idx.BlockID, idx.BlockID, [][]common.Hash) ([]*types.Log, error){ "index": index.FindInBlocks, "pooled": pooled.FindInBlocks, } { @@ -331,7 +331,7 @@ func TestIndexSearchSimple(t *testing.T) { pooled := withThreadPool{index} - for dsc, method := range map[string]func(context.Context, idx.Block, idx.Block, [][]common.Hash) ([]*types.Log, error){ + for dsc, method := range map[string]func(context.Context, idx.BlockID, idx.BlockID, [][]common.Hash) ([]*types.Log, error){ "index": index.FindInBlocks, "pooled": pooled.FindInBlocks, } { @@ -385,7 +385,7 @@ func TestMaxTopicsCount(t *testing.T) { pooled := withThreadPool{index} - for dsc, method := range map[string]func(context.Context, idx.Block, idx.Block, [][]common.Hash) ([]*types.Log, error){ + for dsc, method := range map[string]func(context.Context, idx.BlockID, idx.BlockID, [][]common.Hash) ([]*types.Log, error){ "index": index.FindInBlocks, "pooled": pooled.FindInBlocks, } { @@ -467,7 +467,7 @@ func TestKvdbThreadsPoolLimit(t *testing.T) { pooled := withThreadPool{index} - for dsc, method := range map[string]func(context.Context, idx.Block, idx.Block, [][]common.Hash) ([]*types.Log, error){ + for dsc, method := range map[string]func(context.Context, idx.BlockID, idx.BlockID, [][]common.Hash) ([]*types.Log, error){ "index": index.FindInBlocks, "pooled": pooled.FindInBlocks, } { diff --git a/utils/adapters/vecmt2dagidx/vecmt2lachesis.go b/utils/adapters/vecmt2dagidx/vecmt2lachesis.go index 641d08329..1e2899f78 100644 --- a/utils/adapters/vecmt2dagidx/vecmt2lachesis.go +++ b/utils/adapters/vecmt2dagidx/vecmt2lachesis.go @@ -25,12 +25,12 @@ type BranchSeq struct { } // Seq is a maximum observed e.Seq in the branch -func (b *BranchSeq) Seq() idx.Event { +func (b *BranchSeq) Seq() idx.EventID { return b.BranchSeq.Seq } // MinSeq is a minimum observed e.Seq in the branch -func (b *BranchSeq) MinSeq() idx.Event { +func (b *BranchSeq) MinSeq() idx.EventID { return b.BranchSeq.MinSeq } diff --git a/utils/eventid/eventid.go b/utils/eventid/eventid.go index 82424dcc4..02e5ca506 100644 --- a/utils/eventid/eventid.go +++ b/utils/eventid/eventid.go @@ -11,7 +11,7 @@ type Cache struct { ids map[hash.Event]bool mu sync.RWMutex maxSize int - epoch idx.Epoch + epoch idx.EpochID } func NewCache(maxSize int) *Cache { @@ -20,7 +20,7 @@ func NewCache(maxSize int) *Cache { } } -func (c *Cache) Reset(epoch idx.Epoch) { +func (c *Cache) Reset(epoch idx.EpochID) { c.mu.Lock() defer c.mu.Unlock() c.ids = make(map[hash.Event]bool) diff --git a/vecmt/vector_ops.go b/vecmt/vector_ops.go index aa6f863d2..d939fb8aa 100644 --- a/vecmt/vector_ops.go +++ b/vecmt/vector_ops.go @@ -26,11 +26,11 @@ func (b *HighestBefore) IsForkDetected(i idx.Validator) bool { return b.VSeq.IsForkDetected(i) } -func (b *HighestBefore) Seq(i idx.Validator) idx.Event { +func (b *HighestBefore) Seq(i idx.Validator) idx.EventID { return b.VSeq.Seq(i) } -func (b *HighestBefore) MinSeq(i idx.Validator) idx.Event { +func (b *HighestBefore) MinSeq(i idx.Validator) idx.EventID { return b.VSeq.MinSeq(i) }